hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
7dc3f64ff52293a7ebb9c91d48b9f2ba54c63fe24eb7a9cb754054e7c085c94f | import sys
sys._running_pytest = True # type: ignore
from distutils.version import LooseVersion as V
import pytest
from sympy.core.cache import clear_cache
import re
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def process_split(config, items):
split = config.getoption("--split")
if not split:
return
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b "
"where a and b are ints.")
i, t = map(int, m.groups())
start, end = (i-1)*len(items)//t, i*len(items)//t
if i < t:
# remove elements from end of list first
del items[end:]
del items[:start]
def pytest_report_header(config):
from sympy.utilities.misc import ARCH
s = "architecture: %s\n" % ARCH
from sympy.core.cache import USE_CACHE
s += "cache: %s\n" % USE_CACHE
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
s += "ground types: %s %s\n" % (GROUND_TYPES, version)
return s
def pytest_terminal_summary(terminalreporter):
if (terminalreporter.stats.get('error', None) or
terminalreporter.stats.get('failed', None)):
terminalreporter.write_sep(
' ', 'DO *NOT* COMMIT!', red=True, bold=True)
def pytest_addoption(parser):
parser.addoption("--split", action="store", default="",
help="split tests")
def pytest_collection_modifyitems(config, items):
""" pytest hook. """
# handle splits
process_split(config, items)
@pytest.fixture(autouse=True, scope='module')
def file_clear_cache():
clear_cache()
@pytest.fixture(autouse=True, scope='module')
def check_disabled(request):
if getattr(request.module, 'disabled', False):
pytest.skip("test requirements not met.")
elif getattr(request.module, 'ipython', False):
# need to check version and options for ipython tests
if (V(pytest.__version__) < '2.6.3' and
pytest.config.getvalue('-s') != 'no'):
pytest.skip("run py.test with -s or upgrade to newer version.")
|
3861d7ed5195f05f9d07f5161360ab694893a1a257001f9ef7325324f49c060f | __version__ = "1.8.dev"
|
c41b6ec7cd8316a22cefcbd855c0f976935539297e72afdaebb464ed84ee2d1a | """
This module exports all latin and greek letters as Symbols, so you can
conveniently do
>>> from sympy.abc import x, y
instead of the slightly more clunky-looking
>>> from sympy import symbols
>>> x, y = symbols('x y')
Caveats
=======
1. As of the time of writing this, the names ``C``, ``O``, ``S``, ``I``, ``N``,
``E``, and ``Q`` are colliding with names defined in SymPy. If you import them
from both ``sympy.abc`` and ``sympy``, the second import will "win".
This is an issue only for * imports, which should only be used for short-lived
code such as interactive sessions and throwaway scripts that do not survive
until the next SymPy upgrade, where ``sympy`` may contain a different set of
names.
2. This module does not define symbol names on demand, i.e.
``from sympy.abc import foo`` will be reported as an error because
``sympy.abc`` does not contain the name ``foo``. To get a symbol named ``foo``,
you still need to use ``Symbol('foo')`` or ``symbols('foo')``.
You can freely mix usage of ``sympy.abc`` and ``Symbol``/``symbols``, though
sticking with one and only one way to get the symbols does tend to make the code
more readable.
The module also defines some special names to help detect which names clash
with the default SymPy namespace.
``_clash1`` defines all the single letter variables that clash with
SymPy objects; ``_clash2`` defines the multi-letter clashing symbols;
and ``_clash`` is the union of both. These can be passed for ``locals``
during sympification if one desires Symbols rather than the non-Symbol
objects for those names.
Examples
========
>>> from sympy import S
>>> from sympy.abc import _clash1, _clash2, _clash
>>> S("Q & C", locals=_clash1)
C & Q
>>> S('pi(x)', locals=_clash2)
pi(x)
>>> S('pi(C, Q)', locals=_clash)
pi(C, Q)
"""
from typing import Any, Dict
import string
from .core import Symbol, symbols
from .core.alphabets import greeks
from .core.compatibility import exec_
##### Symbol definitions #####
# Implementation note: The easiest way to avoid typos in the symbols()
# parameter is to copy it from the left-hand side of the assignment.
a, b, c, d, e, f, g, h, i, j = symbols('a, b, c, d, e, f, g, h, i, j')
k, l, m, n, o, p, q, r, s, t = symbols('k, l, m, n, o, p, q, r, s, t')
u, v, w, x, y, z = symbols('u, v, w, x, y, z')
A, B, C, D, E, F, G, H, I, J = symbols('A, B, C, D, E, F, G, H, I, J')
K, L, M, N, O, P, Q, R, S, T = symbols('K, L, M, N, O, P, Q, R, S, T')
U, V, W, X, Y, Z = symbols('U, V, W, X, Y, Z')
alpha, beta, gamma, delta = symbols('alpha, beta, gamma, delta')
epsilon, zeta, eta, theta = symbols('epsilon, zeta, eta, theta')
iota, kappa, lamda, mu = symbols('iota, kappa, lamda, mu')
nu, xi, omicron, pi = symbols('nu, xi, omicron, pi')
rho, sigma, tau, upsilon = symbols('rho, sigma, tau, upsilon')
phi, chi, psi, omega = symbols('phi, chi, psi, omega')
##### Clashing-symbols diagnostics #####
# We want to know which names in SymPy collide with those in here.
# This is mostly for diagnosing SymPy's namespace during SymPy development.
_latin = list(string.ascii_letters)
# OSINEQ should not be imported as they clash; gamma, pi and zeta clash, too
_greek = list(greeks) # make a copy, so we can mutate it
# Note: We import lamda since lambda is a reserved keyword in Python
_greek.remove("lambda")
_greek.append("lamda")
ns = {} # type: Dict[str, Any]
exec_('from sympy import *', ns)
_clash1 = {}
_clash2 = {}
while ns:
_k, _ = ns.popitem()
if _k in _greek:
_clash2[_k] = Symbol(_k)
_greek.remove(_k)
elif _k in _latin:
_clash1[_k] = Symbol(_k)
_latin.remove(_k)
_clash = {}
_clash.update(_clash1)
_clash.update(_clash2)
del _latin, _greek, Symbol, _k
|
cd11e92bdc44b45374069b191a3ce010b0c0cd7bb98d9836b8900c8138101ecc | """
Continuous Random Variables - Prebuilt variables
Contains
========
Arcsin
Benini
Beta
BetaNoncentral
BetaPrime
BoundedPareto
Cauchy
Chi
ChiNoncentral
ChiSquared
Dagum
Erlang
ExGaussian
Exponential
ExponentialPower
FDistribution
FisherZ
Frechet
Gamma
GammaInverse
Gumbel
Gompertz
Kumaraswamy
Laplace
Levy
Logistic
LogLogistic
LogNormal
Lomax
Maxwell
Moyal
Nakagami
Normal
Pareto
PowerFunction
QuadraticU
RaisedCosine
Rayleigh
Reciprocal
ShiftedGompertz
StudentT
Trapezoidal
Triangular
Uniform
UniformSum
VonMises
Wald
Weibull
WignerSemicircle
"""
from sympy import beta as beta_fn
from sympy import cos, sin, tan, atan, exp, besseli, besselj, besselk
from sympy import (log, sqrt, pi, S, Dummy, Interval, sympify, gamma, sign,
Piecewise, And, Eq, binomial, factorial, Sum, floor, Abs,
Lambda, Basic, lowergamma, erf, erfc, erfi, erfinv, I, asin,
hyper, uppergamma, sinh, Ne, expint, Rational, integrate)
from sympy.matrices import MatrixBase, MatrixExpr
from sympy.stats.crv import SingleContinuousPSpace, SingleContinuousDistribution
from sympy.stats.rv import _value_check, is_random
oo = S.Infinity
__all__ = ['ContinuousRV',
'Arcsin',
'Benini',
'Beta',
'BetaNoncentral',
'BetaPrime',
'BoundedPareto',
'Cauchy',
'Chi',
'ChiNoncentral',
'ChiSquared',
'Dagum',
'Erlang',
'ExGaussian',
'Exponential',
'ExponentialPower',
'FDistribution',
'FisherZ',
'Frechet',
'Gamma',
'GammaInverse',
'Gompertz',
'Gumbel',
'Kumaraswamy',
'Laplace',
'Levy',
'Logistic',
'LogLogistic',
'LogNormal',
'Lomax',
'Maxwell',
'Moyal',
'Nakagami',
'Normal',
'GaussianInverse',
'Pareto',
'PowerFunction',
'QuadraticU',
'RaisedCosine',
'Rayleigh',
'Reciprocal',
'StudentT',
'ShiftedGompertz',
'Trapezoidal',
'Triangular',
'Uniform',
'UniformSum',
'VonMises',
'Wald',
'Weibull',
'WignerSemicircle',
]
@is_random.register(MatrixBase)
def _(x):
return any([is_random(i) for i in x])
def rv(symbol, cls, args, **kwargs):
args = list(map(sympify, args))
dist = cls(*args)
if kwargs.pop('check', True):
dist.check(*args)
pspace = SingleContinuousPSpace(symbol, dist)
if any(is_random(arg) for arg in args):
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
pspace = CompoundPSpace(symbol, CompoundDistribution(dist))
return pspace.value
class ContinuousDistributionHandmade(SingleContinuousDistribution):
_argnames = ('pdf',)
def __new__(cls, pdf, set=Interval(-oo, oo)):
return Basic.__new__(cls, pdf, set)
@property
def set(self):
return self.args[1]
@staticmethod
def check(pdf, set):
x = Dummy('x')
val = integrate(pdf(x), (x, set))
_value_check(Eq(val, 1) != S.false, "The pdf on the given set is incorrect.")
def ContinuousRV(symbol, density, set=Interval(-oo, oo), **kwargs):
"""
Create a Continuous Random Variable given the following:
Parameters
==========
symbol : Symbol
Represents name of the random variable.
density : Expression containing symbol
Represents probability density function.
set : set/Interval
Represents the region where the pdf is valid, by default is real line.
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Returns
=======
RandomSymbol
Many common continuous random variable types are already implemented.
This function should be necessary only very rarely.
Examples
========
>>> from sympy import Symbol, sqrt, exp, pi
>>> from sympy.stats import ContinuousRV, P, E
>>> x = Symbol("x")
>>> pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
>>> X = ContinuousRV(x, pdf)
>>> E(X)
0
>>> P(X>0)
1/2
"""
pdf = Piecewise((density, set.as_relational(symbol)), (0, True))
pdf = Lambda(symbol, pdf)
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(symbol.name, ContinuousDistributionHandmade, (pdf, set), **kwargs)
########################################
# Continuous Probability Distributions #
########################################
#-------------------------------------------------------------------------------
# Arcsin distribution ----------------------------------------------------------
class ArcsinDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
def pdf(self, x):
a, b = self.a, self.b
return 1/(pi*sqrt((x - a)*(b - x)))
def _cdf(self, x):
a, b = self.a, self.b
return Piecewise(
(S.Zero, x < a),
(2*asin(sqrt((x - a)/(b - a)))/pi, x <= b),
(S.One, True))
def Arcsin(name, a=0, b=1):
r"""
Create a Continuous Random Variable with an arcsin distribution.
The density of the arcsin distribution is given by
.. math::
f(x) := \frac{1}{\pi\sqrt{(x-a)(b-x)}}
with :math:`x \in (a,b)`. It must hold that :math:`-\infty < a < b < \infty`.
Parameters
==========
a : Real number, the left interval boundary
b : Real number, the right interval boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Arcsin, density, cdf
>>> from sympy import Symbol
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = Arcsin("x", a, b)
>>> density(X)(z)
1/(pi*sqrt((-a + z)*(b - z)))
>>> cdf(X)(z)
Piecewise((0, a > z),
(2*asin(sqrt((-a + z)/(-a + b)))/pi, b >= z),
(1, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Arcsine_distribution
"""
return rv(name, ArcsinDistribution, (a, b))
#-------------------------------------------------------------------------------
# Benini distribution ----------------------------------------------------------
class BeniniDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta', 'sigma')
@staticmethod
def check(alpha, beta, sigma):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
_value_check(sigma > 0, "Scale parameter Sigma must be positive.")
@property
def set(self):
return Interval(self.sigma, oo)
def pdf(self, x):
alpha, beta, sigma = self.alpha, self.beta, self.sigma
return (exp(-alpha*log(x/sigma) - beta*log(x/sigma)**2)
*(alpha/x + 2*beta*log(x/sigma)/x))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function of the '
'Benini distribution does not exist.')
def Benini(name, alpha, beta, sigma):
r"""
Create a Continuous Random Variable with a Benini distribution.
The density of the Benini distribution is given by
.. math::
f(x) := e^{-\alpha\log{\frac{x}{\sigma}}
-\beta\log^2\left[{\frac{x}{\sigma}}\right]}
\left(\frac{\alpha}{x}+\frac{2\beta\log{\frac{x}{\sigma}}}{x}\right)
This is a heavy-tailed distribution and is also known as the log-Rayleigh
distribution.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
sigma : Real number, `\sigma > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Benini, density, cdf
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Benini("x", alpha, beta, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ / z \\ / z \ 2/ z \
| 2*beta*log|-----|| - alpha*log|-----| - beta*log |-----|
|alpha \sigma/| \sigma/ \sigma/
|----- + -----------------|*e
\ z z /
>>> cdf(X)(z)
Piecewise((1 - exp(-alpha*log(z/sigma) - beta*log(z/sigma)**2), sigma <= z),
(0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Benini_distribution
.. [2] http://reference.wolfram.com/legacy/v8/ref/BeniniDistribution.html
"""
return rv(name, BeniniDistribution, (alpha, beta, sigma))
#-------------------------------------------------------------------------------
# Beta distribution ------------------------------------------------------------
class BetaDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, 1)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1) * (1 - x)**(beta - 1) / beta_fn(alpha, beta)
def _characteristic_function(self, t):
return hyper((self.alpha,), (self.alpha + self.beta,), I*t)
def _moment_generating_function(self, t):
return hyper((self.alpha,), (self.alpha + self.beta,), t)
def Beta(name, alpha, beta):
r"""
Create a Continuous Random Variable with a Beta distribution.
The density of the Beta distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1}(1-x)^{\beta-1}} {\mathrm{B}(\alpha,\beta)}
with :math:`x \in [0,1]`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Beta, density, E, variance
>>> from sympy import Symbol, simplify, pprint, factor
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Beta("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 beta - 1
z *(1 - z)
--------------------------
B(alpha, beta)
>>> simplify(E(X))
alpha/(alpha + beta)
>>> factor(simplify(variance(X)))
alpha*beta/((alpha + beta)**2*(alpha + beta + 1))
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta_distribution
.. [2] http://mathworld.wolfram.com/BetaDistribution.html
"""
return rv(name, BetaDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Noncentral Beta distribution ------------------------------------------------------------
class BetaNoncentralDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta', 'lamda')
set = Interval(0, 1)
@staticmethod
def check(alpha, beta, lamda):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
_value_check(lamda >= 0, "Noncentrality parameter Lambda must be positive")
def pdf(self, x):
alpha, beta, lamda = self.alpha, self.beta, self.lamda
k = Dummy("k")
return Sum(exp(-lamda / 2) * (lamda / 2)**k * x**(alpha + k - 1) *(
1 - x)**(beta - 1) / (factorial(k) * beta_fn(alpha + k, beta)), (k, 0, oo))
def BetaNoncentral(name, alpha, beta, lamda):
r"""
Create a Continuous Random Variable with a Type I Noncentral Beta distribution.
The density of the Noncentral Beta distribution is given by
.. math::
f(x) := \sum_{k=0}^\infty e^{-\lambda/2}\frac{(\lambda/2)^k}{k!}
\frac{x^{\alpha+k-1}(1-x)^{\beta-1}}{\mathrm{B}(\alpha+k,\beta)}
with :math:`x \in [0,1]`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
lamda: Real number, `\lambda >= 0`, noncentrality parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import BetaNoncentral, density, cdf
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> lamda = Symbol("lamda", nonnegative=True)
>>> z = Symbol("z")
>>> X = BetaNoncentral("x", alpha, beta, lamda)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
oo
_____
\ `
\ -lamda
\ k -------
\ k + alpha - 1 /lamda\ beta - 1 2
) z *|-----| *(1 - z) *e
/ \ 2 /
/ ------------------------------------------------
/ B(k + alpha, beta)*k!
/____,
k = 0
Compute cdf with specific 'x', 'alpha', 'beta' and 'lamda' values as follows :
>>> cdf(BetaNoncentral("x", 1, 1, 1), evaluate=False)(2).doit()
2*exp(1/2)
The argument evaluate=False prevents an attempt at evaluation
of the sum for general x, before the argument 2 is passed.
References
==========
.. [1] https://en.wikipedia.org/wiki/Noncentral_beta_distribution
.. [2] https://reference.wolfram.com/language/ref/NoncentralBetaDistribution.html
"""
return rv(name, BetaNoncentralDistribution, (alpha, beta, lamda))
#-------------------------------------------------------------------------------
# Beta prime distribution ------------------------------------------------------
class BetaPrimeDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
set = Interval(0, oo)
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1)*(1 + x)**(-alpha - beta)/beta_fn(alpha, beta)
def BetaPrime(name, alpha, beta):
r"""
Create a continuous random variable with a Beta prime distribution.
The density of the Beta prime distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)}
with :math:`x > 0`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import BetaPrime, density
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = BetaPrime("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 -alpha - beta
z *(z + 1)
-------------------------------
B(alpha, beta)
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta_prime_distribution
.. [2] http://mathworld.wolfram.com/BetaPrimeDistribution.html
"""
return rv(name, BetaPrimeDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Bounded Pareto Distribution --------------------------------------------------
class BoundedParetoDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'left', 'right')
@property
def set(self):
return Interval(self.left , self.right)
@staticmethod
def check(alpha, left, right):
_value_check (alpha.is_positive, "Shape must be positive.")
_value_check (left.is_positive, "Left value should be positive.")
_value_check (right > left, "Right should be greater than left.")
def pdf(self, x):
alpha, left, right = self.alpha, self.left, self.right
num = alpha * (left**alpha) * x**(- alpha -1)
den = 1 - (left/right)**alpha
return num/den
def BoundedPareto(name, alpha, left, right):
r"""
Create a continuous random variable with a Bounded Pareto distribution.
The density of the Bounded Pareto distribution is given by
.. math::
f(x) := \frac{\alpha L^{\alpha}x^{-\alpha-1}}{1-(\frac{L}{H})^{\alpha}}
Parameters
==========
alpha : Real Number, `alpha > 0`
Shape parameter
left : Real Number, `left > 0`
Location parameter
right : Real Number, `right > left`
Location parameter
Examples
========
>>> from sympy.stats import BoundedPareto, density, cdf, E
>>> from sympy import symbols
>>> L, H = symbols('L, H', positive=True)
>>> X = BoundedPareto('X', 2, L, H)
>>> x = symbols('x')
>>> density(X)(x)
2*L**2/(x**3*(1 - L**2/H**2))
>>> cdf(X)(x)
Piecewise((-H**2*L**2/(x**2*(H**2 - L**2)) + H**2/(H**2 - L**2), L <= x), (0, True))
>>> E(X).simplify()
2*H*L/(H + L)
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Pareto_distribution#Bounded_Pareto_distribution
"""
return rv (name, BoundedParetoDistribution, (alpha, left, right))
# ------------------------------------------------------------------------------
# Cauchy distribution ----------------------------------------------------------
class CauchyDistribution(SingleContinuousDistribution):
_argnames = ('x0', 'gamma')
@staticmethod
def check(x0, gamma):
_value_check(gamma > 0, "Scale parameter Gamma must be positive.")
_value_check(x0.is_real, "Location parameter must be real.")
def pdf(self, x):
return 1/(pi*self.gamma*(1 + ((x - self.x0)/self.gamma)**2))
def _cdf(self, x):
x0, gamma = self.x0, self.gamma
return (1/pi)*atan((x - x0)/gamma) + S.Half
def _characteristic_function(self, t):
return exp(self.x0 * I * t - self.gamma * Abs(t))
def _moment_generating_function(self, t):
raise NotImplementedError("The moment generating function for the "
"Cauchy distribution does not exist.")
def _quantile(self, p):
return self.x0 + self.gamma*tan(pi*(p - S.Half))
def Cauchy(name, x0, gamma):
r"""
Create a continuous random variable with a Cauchy distribution.
The density of the Cauchy distribution is given by
.. math::
f(x) := \frac{1}{\pi \gamma [1 + {(\frac{x-x_0}{\gamma})}^2]}
Parameters
==========
x0 : Real number, the location
gamma : Real number, `\gamma > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Cauchy, density
>>> from sympy import Symbol
>>> x0 = Symbol("x0")
>>> gamma = Symbol("gamma", positive=True)
>>> z = Symbol("z")
>>> X = Cauchy("x", x0, gamma)
>>> density(X)(z)
1/(pi*gamma*(1 + (-x0 + z)**2/gamma**2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Cauchy_distribution
.. [2] http://mathworld.wolfram.com/CauchyDistribution.html
"""
return rv(name, CauchyDistribution, (x0, gamma))
#-------------------------------------------------------------------------------
# Chi distribution -------------------------------------------------------------
class ChiDistribution(SingleContinuousDistribution):
_argnames = ('k',)
@staticmethod
def check(k):
_value_check(k > 0, "Number of degrees of freedom (k) must be positive.")
_value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.")
set = Interval(0, oo)
def pdf(self, x):
return 2**(1 - self.k/2)*x**(self.k - 1)*exp(-x**2/2)/gamma(self.k/2)
def _characteristic_function(self, t):
k = self.k
part_1 = hyper((k/2,), (S.Half,), -t**2/2)
part_2 = I*t*sqrt(2)*gamma((k+1)/2)/gamma(k/2)
part_3 = hyper(((k+1)/2,), (Rational(3, 2),), -t**2/2)
return part_1 + part_2*part_3
def _moment_generating_function(self, t):
k = self.k
part_1 = hyper((k / 2,), (S.Half,), t ** 2 / 2)
part_2 = t * sqrt(2) * gamma((k + 1) / 2) / gamma(k / 2)
part_3 = hyper(((k + 1) / 2,), (S(3) / 2,), t ** 2 / 2)
return part_1 + part_2 * part_3
def Chi(name, k):
r"""
Create a continuous random variable with a Chi distribution.
The density of the Chi distribution is given by
.. math::
f(x) := \frac{2^{1-k/2}x^{k-1}e^{-x^2/2}}{\Gamma(k/2)}
with :math:`x \geq 0`.
Parameters
==========
k : Positive integer, The number of degrees of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Chi, density, E
>>> from sympy import Symbol, simplify
>>> k = Symbol("k", integer=True)
>>> z = Symbol("z")
>>> X = Chi("x", k)
>>> density(X)(z)
2**(1 - k/2)*z**(k - 1)*exp(-z**2/2)/gamma(k/2)
>>> simplify(E(X))
sqrt(2)*gamma(k/2 + 1/2)/gamma(k/2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Chi_distribution
.. [2] http://mathworld.wolfram.com/ChiDistribution.html
"""
return rv(name, ChiDistribution, (k,))
#-------------------------------------------------------------------------------
# Non-central Chi distribution -------------------------------------------------
class ChiNoncentralDistribution(SingleContinuousDistribution):
_argnames = ('k', 'l')
@staticmethod
def check(k, l):
_value_check(k > 0, "Number of degrees of freedom (k) must be positive.")
_value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.")
_value_check(l > 0, "Shift parameter Lambda must be positive.")
set = Interval(0, oo)
def pdf(self, x):
k, l = self.k, self.l
return exp(-(x**2+l**2)/2)*x**k*l / (l*x)**(k/2) * besseli(k/2-1, l*x)
def ChiNoncentral(name, k, l):
r"""
Create a continuous random variable with a non-central Chi distribution.
The density of the non-central Chi distribution is given by
.. math::
f(x) := \frac{e^{-(x^2+\lambda^2)/2} x^k\lambda}
{(\lambda x)^{k/2}} I_{k/2-1}(\lambda x)
with `x \geq 0`. Here, `I_\nu (x)` is the
:ref:`modified Bessel function of the first kind <besseli>`.
Parameters
==========
k : A positive Integer, `k > 0`, the number of degrees of freedom
lambda : Real number, `\lambda > 0`, Shift parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ChiNoncentral, density
>>> from sympy import Symbol
>>> k = Symbol("k", integer=True)
>>> l = Symbol("l")
>>> z = Symbol("z")
>>> X = ChiNoncentral("x", k, l)
>>> density(X)(z)
l*z**k*(l*z)**(-k/2)*exp(-l**2/2 - z**2/2)*besseli(k/2 - 1, l*z)
References
==========
.. [1] https://en.wikipedia.org/wiki/Noncentral_chi_distribution
"""
return rv(name, ChiNoncentralDistribution, (k, l))
#-------------------------------------------------------------------------------
# Chi squared distribution -----------------------------------------------------
class ChiSquaredDistribution(SingleContinuousDistribution):
_argnames = ('k',)
@staticmethod
def check(k):
_value_check(k > 0, "Number of degrees of freedom (k) must be positive.")
_value_check(k.is_integer, "Number of degrees of freedom (k) must be an integer.")
set = Interval(0, oo)
def pdf(self, x):
k = self.k
return 1/(2**(k/2)*gamma(k/2))*x**(k/2 - 1)*exp(-x/2)
def _cdf(self, x):
k = self.k
return Piecewise(
(S.One/gamma(k/2)*lowergamma(k/2, x/2), x >= 0),
(0, True)
)
def _characteristic_function(self, t):
return (1 - 2*I*t)**(-self.k/2)
def _moment_generating_function(self, t):
return (1 - 2*t)**(-self.k/2)
def ChiSquared(name, k):
r"""
Create a continuous random variable with a Chi-squared distribution.
The density of the Chi-squared distribution is given by
.. math::
f(x) := \frac{1}{2^{\frac{k}{2}}\Gamma\left(\frac{k}{2}\right)}
x^{\frac{k}{2}-1} e^{-\frac{x}{2}}
with :math:`x \geq 0`.
Parameters
==========
k : Positive integer, The number of degrees of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ChiSquared, density, E, variance, moment
>>> from sympy import Symbol
>>> k = Symbol("k", integer=True, positive=True)
>>> z = Symbol("z")
>>> X = ChiSquared("x", k)
>>> density(X)(z)
2**(-k/2)*z**(k/2 - 1)*exp(-z/2)/gamma(k/2)
>>> E(X)
k
>>> variance(X)
2*k
>>> moment(X, 3)
k**3 + 6*k**2 + 8*k
References
==========
.. [1] https://en.wikipedia.org/wiki/Chi_squared_distribution
.. [2] http://mathworld.wolfram.com/Chi-SquaredDistribution.html
"""
return rv(name, ChiSquaredDistribution, (k, ))
#-------------------------------------------------------------------------------
# Dagum distribution -----------------------------------------------------------
class DagumDistribution(SingleContinuousDistribution):
_argnames = ('p', 'a', 'b')
set = Interval(0, oo)
@staticmethod
def check(p, a, b):
_value_check(p > 0, "Shape parameter p must be positive.")
_value_check(a > 0, "Shape parameter a must be positive.")
_value_check(b > 0, "Scale parameter b must be positive.")
def pdf(self, x):
p, a, b = self.p, self.a, self.b
return a*p/x*((x/b)**(a*p)/(((x/b)**a + 1)**(p + 1)))
def _cdf(self, x):
p, a, b = self.p, self.a, self.b
return Piecewise(((S.One + (S(x)/b)**-a)**-p, x>=0),
(S.Zero, True))
def Dagum(name, p, a, b):
r"""
Create a continuous random variable with a Dagum distribution.
The density of the Dagum distribution is given by
.. math::
f(x) := \frac{a p}{x} \left( \frac{\left(\tfrac{x}{b}\right)^{a p}}
{\left(\left(\tfrac{x}{b}\right)^a + 1 \right)^{p+1}} \right)
with :math:`x > 0`.
Parameters
==========
p : Real number, `p > 0`, a shape
a : Real number, `a > 0`, a shape
b : Real number, `b > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Dagum, density, cdf
>>> from sympy import Symbol
>>> p = Symbol("p", positive=True)
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Dagum("x", p, a, b)
>>> density(X)(z)
a*p*(z/b)**(a*p)*((z/b)**a + 1)**(-p - 1)/z
>>> cdf(X)(z)
Piecewise(((1 + (z/b)**(-a))**(-p), z >= 0), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Dagum_distribution
"""
return rv(name, DagumDistribution, (p, a, b))
#-------------------------------------------------------------------------------
# Erlang distribution ----------------------------------------------------------
def Erlang(name, k, l):
r"""
Create a continuous random variable with an Erlang distribution.
The density of the Erlang distribution is given by
.. math::
f(x) := \frac{\lambda^k x^{k-1} e^{-\lambda x}}{(k-1)!}
with :math:`x \in [0,\infty]`.
Parameters
==========
k : Positive integer
l : Real number, `\lambda > 0`, the rate
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Erlang, density, cdf, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> k = Symbol("k", integer=True, positive=True)
>>> l = Symbol("l", positive=True)
>>> z = Symbol("z")
>>> X = Erlang("x", k, l)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k k - 1 -l*z
l *z *e
---------------
Gamma(k)
>>> C = cdf(X)(z)
>>> pprint(C, use_unicode=False)
/lowergamma(k, l*z)
|------------------ for z > 0
< Gamma(k)
|
\ 0 otherwise
>>> E(X)
k/l
>>> simplify(variance(X))
k/l**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Erlang_distribution
.. [2] http://mathworld.wolfram.com/ErlangDistribution.html
"""
return rv(name, GammaDistribution, (k, S.One/l))
# -------------------------------------------------------------------------------
# ExGaussian distribution -----------------------------------------------------
class ExGaussianDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std', 'rate')
set = Interval(-oo, oo)
@staticmethod
def check(mean, std, rate):
_value_check(
std > 0, "Standard deviation of ExGaussian must be positive.")
_value_check(rate > 0, "Rate of ExGaussian must be positive.")
def pdf(self, x):
mean, std, rate = self.mean, self.std, self.rate
term1 = rate/2
term2 = exp(rate * (2 * mean + rate * std**2 - 2*x)/2)
term3 = erfc((mean + rate*std**2 - x)/(sqrt(2)*std))
return term1*term2*term3
def _cdf(self, x):
from sympy.stats import cdf
mean, std, rate = self.mean, self.std, self.rate
u = rate*(x - mean)
v = rate*std
GaussianCDF1 = cdf(Normal('x', 0, v))(u)
GaussianCDF2 = cdf(Normal('x', v**2, v))(u)
return GaussianCDF1 - exp(-u + (v**2/2) + log(GaussianCDF2))
def _characteristic_function(self, t):
mean, std, rate = self.mean, self.std, self.rate
term1 = (1 - I*t/rate)**(-1)
term2 = exp(I*mean*t - std**2*t**2/2)
return term1 * term2
def _moment_generating_function(self, t):
mean, std, rate = self.mean, self.std, self.rate
term1 = (1 - t/rate)**(-1)
term2 = exp(mean*t + std**2*t**2/2)
return term1*term2
def ExGaussian(name, mean, std, rate):
r"""
Create a continuous random variable with an Exponentially modified
Gaussian (EMG) distribution.
The density of the exponentially modified Gaussian distribution is given by
.. math::
f(x) := \frac{\lambda}{2}e^{\frac{\lambda}{2}(2\mu+\lambda\sigma^2-2x)}
\text{erfc}(\frac{\mu + \lambda\sigma^2 - x}{\sqrt{2}\sigma})
with `x > 0`. Note that the expected value is `1/\lambda`.
Parameters
==========
mu : A Real number, the mean of Gaussian component
std: A positive Real number,
:math: `\sigma^2 > 0` the variance of Gaussian component
lambda: A positive Real number,
:math: `\lambda > 0` the rate of Exponential component
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ExGaussian, density, cdf, E
>>> from sympy.stats import variance, skewness
>>> from sympy import Symbol, pprint, simplify
>>> mean = Symbol("mu")
>>> std = Symbol("sigma", positive=True)
>>> rate = Symbol("lamda", positive=True)
>>> z = Symbol("z")
>>> X = ExGaussian("x", mean, std, rate)
>>> pprint(density(X)(z), use_unicode=False)
/ 2 \
lamda*\lamda*sigma + 2*mu - 2*z/
--------------------------------- / ___ / 2 \\
2 |\/ 2 *\lamda*sigma + mu - z/|
lamda*e *erfc|-----------------------------|
\ 2*sigma /
----------------------------------------------------------------------------
2
>>> cdf(X)(z)
-(erf(sqrt(2)*(-lamda**2*sigma**2 + lamda*(-mu + z))/(2*lamda*sigma))/2 + 1/2)*exp(lamda**2*sigma**2/2 - lamda*(-mu + z)) + erf(sqrt(2)*(-mu + z)/(2*sigma))/2 + 1/2
>>> E(X)
(lamda*mu + 1)/lamda
>>> simplify(variance(X))
sigma**2 + lamda**(-2)
>>> simplify(skewness(X))
2/(lamda**2*sigma**2 + 1)**(3/2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
"""
return rv(name, ExGaussianDistribution, (mean, std, rate))
#-------------------------------------------------------------------------------
# Exponential distribution -----------------------------------------------------
class ExponentialDistribution(SingleContinuousDistribution):
_argnames = ('rate',)
set = Interval(0, oo)
@staticmethod
def check(rate):
_value_check(rate > 0, "Rate must be positive.")
def pdf(self, x):
return self.rate * exp(-self.rate*x)
def _cdf(self, x):
return Piecewise(
(S.One - exp(-self.rate*x), x >= 0),
(0, True),
)
def _characteristic_function(self, t):
rate = self.rate
return rate / (rate - I*t)
def _moment_generating_function(self, t):
rate = self.rate
return rate / (rate - t)
def _quantile(self, p):
return -log(1-p)/self.rate
def Exponential(name, rate):
r"""
Create a continuous random variable with an Exponential distribution.
The density of the exponential distribution is given by
.. math::
f(x) := \lambda \exp(-\lambda x)
with `x > 0`. Note that the expected value is `1/\lambda`.
Parameters
==========
rate : A positive Real number, `\lambda > 0`, the rate (or inverse scale/inverse mean)
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Exponential, density, cdf, E
>>> from sympy.stats import variance, std, skewness, quantile
>>> from sympy import Symbol
>>> l = Symbol("lambda", positive=True)
>>> z = Symbol("z")
>>> p = Symbol("p")
>>> X = Exponential("x", l)
>>> density(X)(z)
lambda*exp(-lambda*z)
>>> cdf(X)(z)
Piecewise((1 - exp(-lambda*z), z >= 0), (0, True))
>>> quantile(X)(p)
-log(1 - p)/lambda
>>> E(X)
1/lambda
>>> variance(X)
lambda**(-2)
>>> skewness(X)
2
>>> X = Exponential('x', 10)
>>> density(X)(z)
10*exp(-10*z)
>>> E(X)
1/10
>>> std(X)
1/10
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponential_distribution
.. [2] http://mathworld.wolfram.com/ExponentialDistribution.html
"""
return rv(name, ExponentialDistribution, (rate, ))
# -------------------------------------------------------------------------------
# Exponential Power distribution -----------------------------------------------------
class ExponentialPowerDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'alpha', 'beta')
set = Interval(-oo, oo)
@staticmethod
def check(mu, alpha, beta):
_value_check(alpha > 0, "Scale parameter alpha must be positive.")
_value_check(beta > 0, "Shape parameter beta must be positive.")
def pdf(self, x):
mu, alpha, beta = self.mu, self.alpha, self.beta
num = beta*exp(-(Abs(x - mu)/alpha)**beta)
den = 2*alpha*gamma(1/beta)
return num/den
def _cdf(self, x):
mu, alpha, beta = self.mu, self.alpha, self.beta
num = lowergamma(1/beta, (Abs(x - mu) / alpha)**beta)
den = 2*gamma(1/beta)
return sign(x - mu)*num/den + S.Half
def ExponentialPower(name, mu, alpha, beta):
r"""
Create a Continuous Random Variable with Exponential Power distribution.
This distribution is known also as Generalized Normal
distribution version 1
The density of the Exponential Power distribution is given by
.. math::
f(x) := \frac{\beta}{2\alpha\Gamma(\frac{1}{\beta})}
e^{{-(\frac{|x - \mu|}{\alpha})^{\beta}}}
with :math:`x \in [ - \infty, \infty ]`.
Parameters
==========
mu : Real number, 'mu' is a location
alpha : Real number, 'alpha > 0' is a scale
beta : Real number, 'beta > 0' is a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ExponentialPower, density, cdf
>>> from sympy import Symbol, pprint
>>> z = Symbol("z")
>>> mu = Symbol("mu")
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> X = ExponentialPower("x", mu, alpha, beta)
>>> pprint(density(X)(z), use_unicode=False)
beta
/|mu - z|\
-|--------|
\ alpha /
beta*e
---------------------
/ 1 \
2*alpha*Gamma|----|
\beta/
>>> cdf(X)(z)
1/2 + lowergamma(1/beta, (Abs(mu - z)/alpha)**beta)*sign(-mu + z)/(2*gamma(1/beta))
References
==========
.. [1] https://reference.wolfram.com/language/ref/ExponentialPowerDistribution.html
.. [2] https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
"""
return rv(name, ExponentialPowerDistribution, (mu, alpha, beta))
#-------------------------------------------------------------------------------
# F distribution ---------------------------------------------------------------
class FDistributionDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
set = Interval(0, oo)
@staticmethod
def check(d1, d2):
_value_check((d1 > 0, d1.is_integer),
"Degrees of freedom d1 must be positive integer.")
_value_check((d2 > 0, d2.is_integer),
"Degrees of freedom d2 must be positive integer.")
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (sqrt((d1*x)**d1*d2**d2 / (d1*x+d2)**(d1+d2))
/ (x * beta_fn(d1/2, d2/2)))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the '
'F-distribution does not exist.')
def FDistribution(name, d1, d2):
r"""
Create a continuous random variable with a F distribution.
The density of the F distribution is given by
.. math::
f(x) := \frac{\sqrt{\frac{(d_1 x)^{d_1} d_2^{d_2}}
{(d_1 x + d_2)^{d_1 + d_2}}}}
{x \mathrm{B} \left(\frac{d_1}{2}, \frac{d_2}{2}\right)}
with :math:`x > 0`.
Parameters
==========
d1 : `d_1 > 0`, where d_1 is the degrees of freedom (n_1 - 1)
d2 : `d_2 > 0`, where d_2 is the degrees of freedom (n_2 - 1)
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import FDistribution, density
>>> from sympy import Symbol, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FDistribution("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d2
-- ______________________________
2 / d1 -d1 - d2
d2 *\/ (d1*z) *(d1*z + d2)
--------------------------------------
/d1 d2\
z*B|--, --|
\2 2 /
References
==========
.. [1] https://en.wikipedia.org/wiki/F-distribution
.. [2] http://mathworld.wolfram.com/F-Distribution.html
"""
return rv(name, FDistributionDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Fisher Z distribution --------------------------------------------------------
class FisherZDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
set = Interval(-oo, oo)
@staticmethod
def check(d1, d2):
_value_check(d1 > 0, "Degree of freedom d1 must be positive.")
_value_check(d2 > 0, "Degree of freedom d2 must be positive.")
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (2*d1**(d1/2)*d2**(d2/2) / beta_fn(d1/2, d2/2) *
exp(d1*x) / (d1*exp(2*x)+d2)**((d1+d2)/2))
def FisherZ(name, d1, d2):
r"""
Create a Continuous Random Variable with an Fisher's Z distribution.
The density of the Fisher's Z distribution is given by
.. math::
f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)}
\frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}}
.. TODO - What is the difference between these degrees of freedom?
Parameters
==========
d1 : `d_1 > 0`, degree of freedom
d2 : `d_2 > 0`, degree of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import FisherZ, density
>>> from sympy import Symbol, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FisherZ("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d1 d2
d1 d2 - -- - --
-- -- 2 2
2 2 / 2*z \ d1*z
2*d1 *d2 *\d1*e + d2/ *e
-----------------------------------------
/d1 d2\
B|--, --|
\2 2 /
References
==========
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution
.. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html
"""
return rv(name, FisherZDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Frechet distribution ---------------------------------------------------------
class FrechetDistribution(SingleContinuousDistribution):
_argnames = ('a', 's', 'm')
set = Interval(0, oo)
@staticmethod
def check(a, s, m):
_value_check(a > 0, "Shape parameter alpha must be positive.")
_value_check(s > 0, "Scale parameter s must be positive.")
def __new__(cls, a, s=1, m=0):
a, s, m = list(map(sympify, (a, s, m)))
return Basic.__new__(cls, a, s, m)
def pdf(self, x):
a, s, m = self.a, self.s, self.m
return a/s * ((x-m)/s)**(-1-a) * exp(-((x-m)/s)**(-a))
def _cdf(self, x):
a, s, m = self.a, self.s, self.m
return Piecewise((exp(-((x-m)/s)**(-a)), x >= m),
(S.Zero, True))
def Frechet(name, a, s=1, m=0):
r"""
Create a continuous random variable with a Frechet distribution.
The density of the Frechet distribution is given by
.. math::
f(x) := \frac{\alpha}{s} \left(\frac{x-m}{s}\right)^{-1-\alpha}
e^{-(\frac{x-m}{s})^{-\alpha}}
with :math:`x \geq m`.
Parameters
==========
a : Real number, :math:`a \in \left(0, \infty\right)` the shape
s : Real number, :math:`s \in \left(0, \infty\right)` the scale
m : Real number, :math:`m \in \left(-\infty, \infty\right)` the minimum
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Frechet, density, cdf
>>> from sympy import Symbol
>>> a = Symbol("a", positive=True)
>>> s = Symbol("s", positive=True)
>>> m = Symbol("m", real=True)
>>> z = Symbol("z")
>>> X = Frechet("x", a, s, m)
>>> density(X)(z)
a*((-m + z)/s)**(-a - 1)*exp(-((-m + z)/s)**(-a))/s
>>> cdf(X)(z)
Piecewise((exp(-((-m + z)/s)**(-a)), m <= z), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Fr%C3%A9chet_distribution
"""
return rv(name, FrechetDistribution, (a, s, m))
#-------------------------------------------------------------------------------
# Gamma distribution -----------------------------------------------------------
class GammaDistribution(SingleContinuousDistribution):
_argnames = ('k', 'theta')
set = Interval(0, oo)
@staticmethod
def check(k, theta):
_value_check(k > 0, "k must be positive")
_value_check(theta > 0, "Theta must be positive")
def pdf(self, x):
k, theta = self.k, self.theta
return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k)
def _cdf(self, x):
k, theta = self.k, self.theta
return Piecewise(
(lowergamma(k, S(x)/theta)/gamma(k), x > 0),
(S.Zero, True))
def _characteristic_function(self, t):
return (1 - self.theta*I*t)**(-self.k)
def _moment_generating_function(self, t):
return (1- self.theta*t)**(-self.k)
def Gamma(name, k, theta):
r"""
Create a continuous random variable with a Gamma distribution.
The density of the Gamma distribution is given by
.. math::
f(x) := \frac{1}{\Gamma(k) \theta^k} x^{k - 1} e^{-\frac{x}{\theta}}
with :math:`x \in [0,1]`.
Parameters
==========
k : Real number, `k > 0`, a shape
theta : Real number, `\theta > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gamma, density, cdf, E, variance
>>> from sympy import Symbol, pprint, simplify
>>> k = Symbol("k", positive=True)
>>> theta = Symbol("theta", positive=True)
>>> z = Symbol("z")
>>> X = Gamma("x", k, theta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-z
-----
-k k - 1 theta
theta *z *e
---------------------
Gamma(k)
>>> C = cdf(X, meijerg=True)(z)
>>> pprint(C, use_unicode=False)
/ / z \
|k*lowergamma|k, -----|
| \ theta/
<---------------------- for z >= 0
| Gamma(k + 1)
|
\ 0 otherwise
>>> E(X)
k*theta
>>> V = simplify(variance(X))
>>> pprint(V, use_unicode=False)
2
k*theta
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_distribution
.. [2] http://mathworld.wolfram.com/GammaDistribution.html
"""
return rv(name, GammaDistribution, (k, theta))
#-------------------------------------------------------------------------------
# Inverse Gamma distribution ---------------------------------------------------
class GammaInverseDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "alpha must be positive")
_value_check(b > 0, "beta must be positive")
def pdf(self, x):
a, b = self.a, self.b
return b**a/gamma(a) * x**(-a-1) * exp(-b/x)
def _cdf(self, x):
a, b = self.a, self.b
return Piecewise((uppergamma(a,b/x)/gamma(a), x > 0),
(S.Zero, True))
def _characteristic_function(self, t):
a, b = self.a, self.b
return 2 * (-I*b*t)**(a/2) * besselk(a, sqrt(-4*I*b*t)) / gamma(a)
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the '
'gamma inverse distribution does not exist.')
def GammaInverse(name, a, b):
r"""
Create a continuous random variable with an inverse Gamma distribution.
The density of the inverse Gamma distribution is given by
.. math::
f(x) := \frac{\beta^\alpha}{\Gamma(\alpha)} x^{-\alpha - 1}
\exp\left(\frac{-\beta}{x}\right)
with :math:`x > 0`.
Parameters
==========
a : Real number, `a > 0` a shape
b : Real number, `b > 0` a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import GammaInverse, density, cdf
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = GammaInverse("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-b
---
a -a - 1 z
b *z *e
---------------
Gamma(a)
>>> cdf(X)(z)
Piecewise((uppergamma(a, b/z)/gamma(a), z > 0), (0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Inverse-gamma_distribution
"""
return rv(name, GammaInverseDistribution, (a, b))
#-------------------------------------------------------------------------------
# Gumbel distribution (Maximum and Minimum) --------------------------------------------------------
class GumbelDistribution(SingleContinuousDistribution):
_argnames = ('beta', 'mu', 'minimum')
set = Interval(-oo, oo)
@staticmethod
def check(beta, mu, minimum):
_value_check(beta > 0, "Scale parameter beta must be positive.")
def pdf(self, x):
beta, mu = self.beta, self.mu
z = (x - mu)/beta
f_max = (1/beta)*exp(-z - exp(-z))
f_min = (1/beta)*exp(z - exp(z))
return Piecewise((f_min, self.minimum), (f_max, not self.minimum))
def _cdf(self, x):
beta, mu = self.beta, self.mu
z = (x - mu)/beta
F_max = exp(-exp(-z))
F_min = 1 - exp(-exp(z))
return Piecewise((F_min, self.minimum), (F_max, not self.minimum))
def _characteristic_function(self, t):
cf_max = gamma(1 - I*self.beta*t) * exp(I*self.mu*t)
cf_min = gamma(1 + I*self.beta*t) * exp(I*self.mu*t)
return Piecewise((cf_min, self.minimum), (cf_max, not self.minimum))
def _moment_generating_function(self, t):
mgf_max = gamma(1 - self.beta*t) * exp(self.mu*t)
mgf_min = gamma(1 + self.beta*t) * exp(self.mu*t)
return Piecewise((mgf_min, self.minimum), (mgf_max, not self.minimum))
def Gumbel(name, beta, mu, minimum=False):
r"""
Create a Continuous Random Variable with Gumbel distribution.
The density of the Gumbel distribution is given by
For Maximum
.. math::
f(x) := \dfrac{1}{\beta} \exp \left( -\dfrac{x-\mu}{\beta}
- \exp \left( -\dfrac{x - \mu}{\beta} \right) \right)
with :math:`x \in [ - \infty, \infty ]`.
For Minimum
.. math::
f(x) := \frac{e^{- e^{\frac{- \mu + x}{\beta}} + \frac{- \mu + x}{\beta}}}{\beta}
with :math:`x \in [ - \infty, \infty ]`.
Parameters
==========
mu : Real number, 'mu' is a location
beta : Real number, 'beta > 0' is a scale
minimum : Boolean, by default, False, set to True for enabling minimum distribution
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gumbel, density, cdf
>>> from sympy import Symbol
>>> x = Symbol("x")
>>> mu = Symbol("mu")
>>> beta = Symbol("beta", positive=True)
>>> X = Gumbel("x", beta, mu)
>>> density(X)(x)
exp(-exp(-(-mu + x)/beta) - (-mu + x)/beta)/beta
>>> cdf(X)(x)
exp(-exp(-(-mu + x)/beta))
References
==========
.. [1] http://mathworld.wolfram.com/GumbelDistribution.html
.. [2] https://en.wikipedia.org/wiki/Gumbel_distribution
.. [3] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_max.html
.. [4] http://www.mathwave.com/help/easyfit/html/analyses/distributions/gumbel_min.html
"""
return rv(name, GumbelDistribution, (beta, mu, minimum))
#-------------------------------------------------------------------------------
# Gompertz distribution --------------------------------------------------------
class GompertzDistribution(SingleContinuousDistribution):
_argnames = ('b', 'eta')
set = Interval(0, oo)
@staticmethod
def check(b, eta):
_value_check(b > 0, "b must be positive")
_value_check(eta > 0, "eta must be positive")
def pdf(self, x):
eta, b = self.eta, self.b
return b*eta*exp(b*x)*exp(eta)*exp(-eta*exp(b*x))
def _cdf(self, x):
eta, b = self.eta, self.b
return 1 - exp(eta)*exp(-eta*exp(b*x))
def _moment_generating_function(self, t):
eta, b = self.eta, self.b
return eta * exp(eta) * expint(t/b, eta)
def Gompertz(name, b, eta):
r"""
Create a Continuous Random Variable with Gompertz distribution.
The density of the Gompertz distribution is given by
.. math::
f(x) := b \eta e^{b x} e^{\eta} \exp \left(-\eta e^{bx} \right)
with :math: 'x \in [0, \inf)'.
Parameters
==========
b: Real number, 'b > 0' a scale
eta: Real number, 'eta > 0' a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gompertz, density
>>> from sympy import Symbol
>>> b = Symbol("b", positive=True)
>>> eta = Symbol("eta", positive=True)
>>> z = Symbol("z")
>>> X = Gompertz("x", b, eta)
>>> density(X)(z)
b*eta*exp(eta)*exp(b*z)*exp(-eta*exp(b*z))
References
==========
.. [1] https://en.wikipedia.org/wiki/Gompertz_distribution
"""
return rv(name, GompertzDistribution, (b, eta))
#-------------------------------------------------------------------------------
# Kumaraswamy distribution -----------------------------------------------------
class KumaraswamyDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "a must be positive")
_value_check(b > 0, "b must be positive")
def pdf(self, x):
a, b = self.a, self.b
return a * b * x**(a-1) * (1-x**a)**(b-1)
def _cdf(self, x):
a, b = self.a, self.b
return Piecewise(
(S.Zero, x < S.Zero),
(1 - (1 - x**a)**b, x <= S.One),
(S.One, True))
def Kumaraswamy(name, a, b):
r"""
Create a Continuous Random Variable with a Kumaraswamy distribution.
The density of the Kumaraswamy distribution is given by
.. math::
f(x) := a b x^{a-1} (1-x^a)^{b-1}
with :math:`x \in [0,1]`.
Parameters
==========
a : Real number, `a > 0` a shape
b : Real number, `b > 0` a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Kumaraswamy, density, cdf
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Kumaraswamy("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
b - 1
a - 1 / a\
a*b*z *\1 - z /
>>> cdf(X)(z)
Piecewise((0, z < 0), (1 - (1 - z**a)**b, z <= 1), (1, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Kumaraswamy_distribution
"""
return rv(name, KumaraswamyDistribution, (a, b))
#-------------------------------------------------------------------------------
# Laplace distribution ---------------------------------------------------------
class LaplaceDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'b')
set = Interval(-oo, oo)
@staticmethod
def check(mu, b):
_value_check(b > 0, "Scale parameter b must be positive.")
_value_check(mu.is_real, "Location parameter mu should be real")
def pdf(self, x):
mu, b = self.mu, self.b
return 1/(2*b)*exp(-Abs(x - mu)/b)
def _cdf(self, x):
mu, b = self.mu, self.b
return Piecewise(
(S.Half*exp((x - mu)/b), x < mu),
(S.One - S.Half*exp(-(x - mu)/b), x >= mu)
)
def _characteristic_function(self, t):
return exp(self.mu*I*t) / (1 + self.b**2*t**2)
def _moment_generating_function(self, t):
return exp(self.mu*t) / (1 - self.b**2*t**2)
def Laplace(name, mu, b):
r"""
Create a continuous random variable with a Laplace distribution.
The density of the Laplace distribution is given by
.. math::
f(x) := \frac{1}{2 b} \exp \left(-\frac{|x-\mu|}b \right)
Parameters
==========
mu : Real number or a list/matrix, the location (mean) or the
location vector
b : Real number or a positive definite matrix, representing a scale
or the covariance matrix.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Laplace, density, cdf
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu")
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Laplace("x", mu, b)
>>> density(X)(z)
exp(-Abs(mu - z)/b)/(2*b)
>>> cdf(X)(z)
Piecewise((exp((-mu + z)/b)/2, mu > z), (1 - exp((mu - z)/b)/2, True))
>>> L = Laplace('L', [1, 2], [[1, 0], [0, 1]])
>>> pprint(density(L)(1, 2), use_unicode=False)
5 / ____\
e *besselk\0, \/ 35 /
---------------------
pi
References
==========
.. [1] https://en.wikipedia.org/wiki/Laplace_distribution
.. [2] http://mathworld.wolfram.com/LaplaceDistribution.html
"""
if isinstance(mu, (list, MatrixBase)) and\
isinstance(b, (list, MatrixBase)):
from sympy.stats.joint_rv_types import MultivariateLaplace
return MultivariateLaplace(name, mu, b)
return rv(name, LaplaceDistribution, (mu, b))
#-------------------------------------------------------------------------------
# Levy distribution ---------------------------------------------------------
class LevyDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'c')
@property
def set(self):
return Interval(self.mu, oo)
@staticmethod
def check(mu, c):
_value_check(c > 0, "c (scale parameter) must be positive")
_value_check(mu.is_real, "mu (location paramater) must be real")
def pdf(self, x):
mu, c = self.mu, self.c
return sqrt(c/(2*pi))*exp(-c/(2*(x - mu)))/((x - mu)**(S.One + S.Half))
def _cdf(self, x):
mu, c = self.mu, self.c
return erfc(sqrt(c/(2*(x - mu))))
def _characteristic_function(self, t):
mu, c = self.mu, self.c
return exp(I * mu * t - sqrt(-2 * I * c * t))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function of Levy distribution does not exist.')
def Levy(name, mu, c):
r"""
Create a continuous random variable with a Levy distribution.
The density of the Levy distribution is given by
.. math::
f(x) := \sqrt(\frac{c}{2 \pi}) \frac{\exp -\frac{c}{2 (x - \mu)}}{(x - \mu)^{3/2}}
Parameters
==========
mu : Real number, the location parameter
c : Real number, `c > 0`, a scale parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Levy, density, cdf
>>> from sympy import Symbol
>>> mu = Symbol("mu", real=True)
>>> c = Symbol("c", positive=True)
>>> z = Symbol("z")
>>> X = Levy("x", mu, c)
>>> density(X)(z)
sqrt(2)*sqrt(c)*exp(-c/(-2*mu + 2*z))/(2*sqrt(pi)*(-mu + z)**(3/2))
>>> cdf(X)(z)
erfc(sqrt(c)*sqrt(1/(-2*mu + 2*z)))
References
==========
.. [1] https://en.wikipedia.org/wiki/L%C3%A9vy_distribution
.. [2] http://mathworld.wolfram.com/LevyDistribution.html
"""
return rv(name, LevyDistribution, (mu, c))
#-------------------------------------------------------------------------------
# Logistic distribution --------------------------------------------------------
class LogisticDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
set = Interval(-oo, oo)
@staticmethod
def check(mu, s):
_value_check(s > 0, "Scale parameter s must be positive.")
def pdf(self, x):
mu, s = self.mu, self.s
return exp(-(x - mu)/s)/(s*(1 + exp(-(x - mu)/s))**2)
def _cdf(self, x):
mu, s = self.mu, self.s
return S.One/(1 + exp(-(x - mu)/s))
def _characteristic_function(self, t):
return Piecewise((exp(I*t*self.mu) * pi*self.s*t / sinh(pi*self.s*t), Ne(t, 0)), (S.One, True))
def _moment_generating_function(self, t):
return exp(self.mu*t) * beta_fn(1 - self.s*t, 1 + self.s*t)
def _quantile(self, p):
return self.mu - self.s*log(-S.One + S.One/p)
def Logistic(name, mu, s):
r"""
Create a continuous random variable with a logistic distribution.
The density of the logistic distribution is given by
.. math::
f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2}
Parameters
==========
mu : Real number, the location (mean)
s : Real number, `s > 0` a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Logistic, density, cdf
>>> from sympy import Symbol
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = Logistic("x", mu, s)
>>> density(X)(z)
exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2)
>>> cdf(X)(z)
1/(exp((mu - z)/s) + 1)
References
==========
.. [1] https://en.wikipedia.org/wiki/Logistic_distribution
.. [2] http://mathworld.wolfram.com/LogisticDistribution.html
"""
return rv(name, LogisticDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Log-logistic distribution --------------------------------------------------------
class LogLogisticDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Scale parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
def pdf(self, x):
a, b = self.alpha, self.beta
return ((b/a)*(x/a)**(b - 1))/(1 + (x/a)**b)**2
def _cdf(self, x):
a, b = self.alpha, self.beta
return 1/(1 + (x/a)**(-b))
def _quantile(self, p):
a, b = self.alpha, self.beta
return a*((p/(1 - p))**(1/b))
def expectation(self, expr, var, **kwargs):
a, b = self.args
return Piecewise((S.NaN, b <= 1), (pi*a/(b*sin(pi/b)), True))
def LogLogistic(name, alpha, beta):
r"""
Create a continuous random variable with a log-logistic distribution.
The distribution is unimodal when `beta > 1`.
The density of the log-logistic distribution is given by
.. math::
f(x) := \frac{(\frac{\beta}{\alpha})(\frac{x}{\alpha})^{\beta - 1}}
{(1 + (\frac{x}{\alpha})^{\beta})^2}
Parameters
==========
alpha : Real number, `\alpha > 0`, scale parameter and median of distribution
beta : Real number, `\beta > 0` a shape parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogLogistic, density, cdf, quantile
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", real=True, positive=True)
>>> beta = Symbol("beta", real=True, positive=True)
>>> p = Symbol("p")
>>> z = Symbol("z", positive=True)
>>> X = LogLogistic("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
beta - 1
/ z \
beta*|-----|
\alpha/
------------------------
2
/ beta \
|/ z \ |
alpha*||-----| + 1|
\\alpha/ /
>>> cdf(X)(z)
1/(1 + (z/alpha)**(-beta))
>>> quantile(X)(p)
alpha*(p/(1 - p))**(1/beta)
References
==========
.. [1] https://en.wikipedia.org/wiki/Log-logistic_distribution
"""
return rv(name, LogLogisticDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Log Normal distribution ------------------------------------------------------
class LogNormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
set = Interval(0, oo)
@staticmethod
def check(mean, std):
_value_check(std > 0, "Parameter std must be positive.")
def pdf(self, x):
mean, std = self.mean, self.std
return exp(-(log(x) - mean)**2 / (2*std**2)) / (x*sqrt(2*pi)*std)
def _cdf(self, x):
mean, std = self.mean, self.std
return Piecewise(
(S.Half + S.Half*erf((log(x) - mean)/sqrt(2)/std), x > 0),
(S.Zero, True)
)
def _moment_generating_function(self, t):
raise NotImplementedError('Moment generating function of the log-normal distribution is not defined.')
def LogNormal(name, mean, std):
r"""
Create a continuous random variable with a log-normal distribution.
The density of the log-normal distribution is given by
.. math::
f(x) := \frac{1}{x\sqrt{2\pi\sigma^2}}
e^{-\frac{\left(\ln x-\mu\right)^2}{2\sigma^2}}
with :math:`x \geq 0`.
Parameters
==========
mu : Real number, the log-scale
sigma : Real number, :math:`\sigma^2 > 0` a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import LogNormal, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", real=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = LogNormal("x", mu, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-(-mu + log(z))
-----------------
2
___ 2*sigma
\/ 2 *e
------------------------
____
2*\/ pi *sigma*z
>>> X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-log(z)**2/2)/(2*sqrt(pi)*z)
References
==========
.. [1] https://en.wikipedia.org/wiki/Lognormal
.. [2] http://mathworld.wolfram.com/LogNormalDistribution.html
"""
return rv(name, LogNormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Lomax Distribution -----------------------------------------------------------
class LomaxDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'lamda',)
set = Interval(0, oo)
@staticmethod
def check(alpha, lamda):
_value_check(alpha.is_real, "Shape parameter should be real.")
_value_check(lamda.is_real, "Scale parameter should be real.")
_value_check(alpha.is_positive, "Shape parameter should be positive.")
_value_check(lamda.is_positive, "Scale parameter should be positive.")
def pdf(self, x):
lamba, alpha = self.lamda, self.alpha
return (alpha/lamba) * (S.One + x/lamba)**(-alpha-1)
def Lomax(name, alpha, lamda):
r"""
Create a continuous random variable with a Lomax distribution.
The density of the Lomax distribution is given by
.. math::
f(x) := \frac{\alpha}{\lambda}\left[1+\frac{x}{\lambda}\right]^{-(\alpha+1)}
Parameters
==========
alpha : Real Number, `alpha > 0`
Shape parameter
lamda : Real Number, `lamda > 0`
Scale parameter
Examples
========
>>> from sympy.stats import Lomax, density, cdf, E
>>> from sympy import symbols
>>> a, l = symbols('a, l', positive=True)
>>> X = Lomax('X', a, l)
>>> x = symbols('x')
>>> density(X)(x)
a*(1 + x/l)**(-a - 1)/l
>>> cdf(X)(x)
Piecewise((1 - (1 + x/l)**(-a), x >= 0), (0, True))
>>> a = 2
>>> X = Lomax('X', a, l)
>>> E(X)
l
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Lomax_distribution
"""
return rv(name, LomaxDistribution, (alpha, lamda))
#-------------------------------------------------------------------------------
# Maxwell distribution ---------------------------------------------------------
class MaxwellDistribution(SingleContinuousDistribution):
_argnames = ('a',)
set = Interval(0, oo)
@staticmethod
def check(a):
_value_check(a > 0, "Parameter a must be positive.")
def pdf(self, x):
a = self.a
return sqrt(2/pi)*x**2*exp(-x**2/(2*a**2))/a**3
def _cdf(self, x):
a = self.a
return erf(sqrt(2)*x/(2*a)) - sqrt(2)*x*exp(-x**2/(2*a**2))/(sqrt(pi)*a)
def Maxwell(name, a):
r"""
Create a continuous random variable with a Maxwell distribution.
The density of the Maxwell distribution is given by
.. math::
f(x) := \sqrt{\frac{2}{\pi}} \frac{x^2 e^{-x^2/(2a^2)}}{a^3}
with :math:`x \geq 0`.
.. TODO - what does the parameter mean?
Parameters
==========
a : Real number, `a > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Maxwell, density, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", positive=True)
>>> z = Symbol("z")
>>> X = Maxwell("x", a)
>>> density(X)(z)
sqrt(2)*z**2*exp(-z**2/(2*a**2))/(sqrt(pi)*a**3)
>>> E(X)
2*sqrt(2)*a/sqrt(pi)
>>> simplify(variance(X))
a**2*(-8 + 3*pi)/pi
References
==========
.. [1] https://en.wikipedia.org/wiki/Maxwell_distribution
.. [2] http://mathworld.wolfram.com/MaxwellDistribution.html
"""
return rv(name, MaxwellDistribution, (a, ))
#-------------------------------------------------------------------------------
# Moyal Distribution -----------------------------------------------------------
class MoyalDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'sigma')
@staticmethod
def check(mu, sigma):
_value_check(mu.is_real, "Location parameter must be real.")
_value_check(sigma.is_real and sigma > 0, "Scale parameter must be real\
and positive.")
def pdf(self, x):
mu, sigma = self.mu, self.sigma
num = exp(-(exp(-(x - mu)/sigma) + (x - mu)/(sigma))/2)
den = (sqrt(2*pi) * sigma)
return num/den
def _characteristic_function(self, t):
mu, sigma = self.mu, self.sigma
term1 = exp(I*t*mu)
term2 = (2**(-I*sigma*t) * gamma(Rational(1, 2) - I*t*sigma))
return (term1 * term2)/sqrt(pi)
def _moment_generating_function(self, t):
mu, sigma = self.mu, self.sigma
term1 = exp(t*mu)
term2 = (2**(-1*sigma*t) * gamma(Rational(1, 2) - t*sigma))
return (term1 * term2)/sqrt(pi)
def Moyal(name, mu, sigma):
r"""
Create a continuous random variable with a Moyal distribution.
The density of the Moyal distribution is given by
.. math::
f(x) := \frac{\exp-\frac{1}{2}\exp-\frac{x-\mu}{\sigma}-\frac{x-\mu}{2\sigma}}{\sqrt{2\pi}\sigma}
with :math:`x \in \mathbb{R}`.
Parameters
==========
mu : Real number
Location parameter
sigma : Real positive number
Scale parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Moyal, density, cdf
>>> from sympy import Symbol, simplify
>>> mu = Symbol("mu", real=True)
>>> sigma = Symbol("sigma", positive=True, real=True)
>>> z = Symbol("z")
>>> X = Moyal("x", mu, sigma)
>>> density(X)(z)
sqrt(2)*exp(-exp((mu - z)/sigma)/2 - (-mu + z)/(2*sigma))/(2*sqrt(pi)*sigma)
>>> simplify(cdf(X)(z))
1 - erf(sqrt(2)*exp((mu - z)/(2*sigma))/2)
References
==========
.. [1] https://reference.wolfram.com/language/ref/MoyalDistribution.html
.. [2] http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
"""
return rv(name, MoyalDistribution, (mu, sigma))
#-------------------------------------------------------------------------------
# Nakagami distribution --------------------------------------------------------
class NakagamiDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'omega')
set = Interval(0, oo)
@staticmethod
def check(mu, omega):
_value_check(mu >= S.Half, "Shape parameter mu must be greater than equal to 1/2.")
_value_check(omega > 0, "Spread parameter omega must be positive.")
def pdf(self, x):
mu, omega = self.mu, self.omega
return 2*mu**mu/(gamma(mu)*omega**mu)*x**(2*mu - 1)*exp(-mu/omega*x**2)
def _cdf(self, x):
mu, omega = self.mu, self.omega
return Piecewise(
(lowergamma(mu, (mu/omega)*x**2)/gamma(mu), x > 0),
(S.Zero, True))
def Nakagami(name, mu, omega):
r"""
Create a continuous random variable with a Nakagami distribution.
The density of the Nakagami distribution is given by
.. math::
f(x) := \frac{2\mu^\mu}{\Gamma(\mu)\omega^\mu} x^{2\mu-1}
\exp\left(-\frac{\mu}{\omega}x^2 \right)
with :math:`x > 0`.
Parameters
==========
mu : Real number, `\mu \geq \frac{1}{2}` a shape
omega : Real number, `\omega > 0`, the spread
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Nakagami, density, E, variance, cdf
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", positive=True)
>>> omega = Symbol("omega", positive=True)
>>> z = Symbol("z")
>>> X = Nakagami("x", mu, omega)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-mu*z
-------
mu -mu 2*mu - 1 omega
2*mu *omega *z *e
----------------------------------
Gamma(mu)
>>> simplify(E(X))
sqrt(mu)*sqrt(omega)*gamma(mu + 1/2)/gamma(mu + 1)
>>> V = simplify(variance(X))
>>> pprint(V, use_unicode=False)
2
omega*Gamma (mu + 1/2)
omega - -----------------------
Gamma(mu)*Gamma(mu + 1)
>>> cdf(X)(z)
Piecewise((lowergamma(mu, mu*z**2/omega)/gamma(mu), z > 0),
(0, True))
References
==========
.. [1] https://en.wikipedia.org/wiki/Nakagami_distribution
"""
return rv(name, NakagamiDistribution, (mu, omega))
#-------------------------------------------------------------------------------
# Normal distribution ----------------------------------------------------------
class NormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
@staticmethod
def check(mean, std):
_value_check(std > 0, "Standard deviation must be positive")
def pdf(self, x):
return exp(-(x - self.mean)**2 / (2*self.std**2)) / (sqrt(2*pi)*self.std)
def _cdf(self, x):
mean, std = self.mean, self.std
return erf(sqrt(2)*(-mean + x)/(2*std))/2 + S.Half
def _characteristic_function(self, t):
mean, std = self.mean, self.std
return exp(I*mean*t - std**2*t**2/2)
def _moment_generating_function(self, t):
mean, std = self.mean, self.std
return exp(mean*t + std**2*t**2/2)
def _quantile(self, p):
mean, std = self.mean, self.std
return mean + std*sqrt(2)*erfinv(2*p - 1)
def Normal(name, mean, std):
r"""
Create a continuous random variable with a Normal distribution.
The density of the Normal distribution is given by
.. math::
f(x) := \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{(x-\mu)^2}{2\sigma^2} }
Parameters
==========
mu : Real number or a list representing the mean or the mean vector
sigma : Real number or a positive definite square matrix,
:math:`\sigma^2 > 0` the variance
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Normal, density, E, std, cdf, skewness, quantile, marginal_distribution
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu")
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> y = Symbol("y")
>>> p = Symbol("p")
>>> X = Normal("x", mu, sigma)
>>> density(X)(z)
sqrt(2)*exp(-(-mu + z)**2/(2*sigma**2))/(2*sqrt(pi)*sigma)
>>> C = simplify(cdf(X))(z) # it needs a little more help...
>>> pprint(C, use_unicode=False)
/ ___ \
|\/ 2 *(-mu + z)|
erf|---------------|
\ 2*sigma / 1
-------------------- + -
2 2
>>> quantile(X)(p)
mu + sqrt(2)*sigma*erfinv(2*p - 1)
>>> simplify(skewness(X))
0
>>> X = Normal("x", 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-z**2/2)/(2*sqrt(pi))
>>> E(2*X + 1)
1
>>> simplify(std(2*X + 1))
2
>>> m = Normal('X', [1, 2], [[2, 1], [1, 2]])
>>> pprint(density(m)(y, z), use_unicode=False)
/1 y\ /2*y z\ / z\ / y 2*z \
|- - -|*|--- - -| + |1 - -|*|- - + --- - 1|
___ \2 2/ \ 3 3/ \ 2/ \ 3 3 /
\/ 3 *e
--------------------------------------------------
6*pi
>>> marginal_distribution(m, m[0])(1)
1/(2*sqrt(pi))
References
==========
.. [1] https://en.wikipedia.org/wiki/Normal_distribution
.. [2] http://mathworld.wolfram.com/NormalDistributionFunction.html
"""
if isinstance(mean, (list, MatrixBase, MatrixExpr)) and\
isinstance(std, (list, MatrixBase, MatrixExpr)):
from sympy.stats.joint_rv_types import MultivariateNormal
return MultivariateNormal(name, mean, std)
return rv(name, NormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Inverse Gaussian distribution ----------------------------------------------------------
class GaussianInverseDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'shape')
@property
def set(self):
return Interval(0, oo)
@staticmethod
def check(mean, shape):
_value_check(shape > 0, "Shape parameter must be positive")
_value_check(mean > 0, "Mean must be positive")
def pdf(self, x):
mu, s = self.mean, self.shape
return exp(-s*(x - mu)**2 / (2*x*mu**2)) * sqrt(s/(2*pi*x**3))
def _cdf(self, x):
from sympy.stats import cdf
mu, s = self.mean, self.shape
stdNormalcdf = cdf(Normal('x', 0, 1))
first_term = stdNormalcdf(sqrt(s/x) * ((x/mu) - S.One))
second_term = exp(2*s/mu) * stdNormalcdf(-sqrt(s/x)*(x/mu + S.One))
return first_term + second_term
def _characteristic_function(self, t):
mu, s = self.mean, self.shape
return exp((s/mu)*(1 - sqrt(1 - (2*mu**2*I*t)/s)))
def _moment_generating_function(self, t):
mu, s = self.mean, self.shape
return exp((s/mu)*(1 - sqrt(1 - (2*mu**2*t)/s)))
def GaussianInverse(name, mean, shape):
r"""
Create a continuous random variable with an Inverse Gaussian distribution.
Inverse Gaussian distribution is also known as Wald distribution.
The density of the Inverse Gaussian distribution is given by
.. math::
f(x) := \sqrt{\frac{\lambda}{2\pi x^3}} e^{-\frac{\lambda(x-\mu)^2}{2x\mu^2}}
Parameters
==========
mu : Positive number representing the mean
lambda : Positive number representing the shape parameter
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import GaussianInverse, density, E, std, skewness
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", positive=True)
>>> lamda = Symbol("lambda", positive=True)
>>> z = Symbol("z", positive=True)
>>> X = GaussianInverse("x", mu, lamda)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-lambda*(-mu + z)
-------------------
2
___ ________ 2*mu *z
\/ 2 *\/ lambda *e
-------------------------------------
____ 3/2
2*\/ pi *z
>>> E(X)
mu
>>> std(X).expand()
mu**(3/2)/sqrt(lambda)
>>> skewness(X).expand()
3*sqrt(mu)/sqrt(lambda)
References
==========
.. [1] https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
.. [2] http://mathworld.wolfram.com/InverseGaussianDistribution.html
"""
return rv(name, GaussianInverseDistribution, (mean, shape))
Wald = GaussianInverse
#-------------------------------------------------------------------------------
# Pareto distribution ----------------------------------------------------------
class ParetoDistribution(SingleContinuousDistribution):
_argnames = ('xm', 'alpha')
@property
def set(self):
return Interval(self.xm, oo)
@staticmethod
def check(xm, alpha):
_value_check(xm > 0, "Xm must be positive")
_value_check(alpha > 0, "Alpha must be positive")
def pdf(self, x):
xm, alpha = self.xm, self.alpha
return alpha * xm**alpha / x**(alpha + 1)
def _cdf(self, x):
xm, alpha = self.xm, self.alpha
return Piecewise(
(S.One - xm**alpha/x**alpha, x>=xm),
(0, True),
)
def _moment_generating_function(self, t):
xm, alpha = self.xm, self.alpha
return alpha * (-xm*t)**alpha * uppergamma(-alpha, -xm*t)
def _characteristic_function(self, t):
xm, alpha = self.xm, self.alpha
return alpha * (-I * xm * t) ** alpha * uppergamma(-alpha, -I * xm * t)
def Pareto(name, xm, alpha):
r"""
Create a continuous random variable with the Pareto distribution.
The density of the Pareto distribution is given by
.. math::
f(x) := \frac{\alpha\,x_m^\alpha}{x^{\alpha+1}}
with :math:`x \in [x_m,\infty]`.
Parameters
==========
xm : Real number, `x_m > 0`, a scale
alpha : Real number, `\alpha > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Pareto, density
>>> from sympy import Symbol
>>> xm = Symbol("xm", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Pareto("x", xm, beta)
>>> density(X)(z)
beta*xm**beta*z**(-beta - 1)
References
==========
.. [1] https://en.wikipedia.org/wiki/Pareto_distribution
.. [2] http://mathworld.wolfram.com/ParetoDistribution.html
"""
return rv(name, ParetoDistribution, (xm, alpha))
#-------------------------------------------------------------------------------
# PowerFunction distribution ---------------------------------------------------
class PowerFunctionDistribution(SingleContinuousDistribution):
_argnames=('alpha','a','b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(alpha, a, b):
_value_check(a.is_real, "Continuous Boundary parameter should be real.")
_value_check(b.is_real, "Continuous Boundary parameter should be real.")
_value_check(a < b, " 'a' the left Boundary must be smaller than 'b' the right Boundary." )
_value_check(alpha.is_positive, "Continuous Shape parameter should be positive.")
def pdf(self, x):
alpha, a, b = self.alpha, self.a, self.b
num = alpha*(x - a)**(alpha - 1)
den = (b - a)**alpha
return num/den
def PowerFunction(name, alpha, a, b):
r"""
Creates a continuous random variable with a Power Function Distribution
The density of PowerFunction distribution is given by
.. math::
f(x) := \frac{{\alpha}(x - a)^{\alpha - 1}}{(b - a)^{\alpha}}
with :math:`x \in [a,b]`.
Parameters
==========
alpha: Positive number, `0 < alpha` the shape paramater
a : Real number, :math:`-\infty < a` the left boundary
b : Real number, :math:`a < b < \infty` the right boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import PowerFunction, density, cdf, E, variance
>>> from sympy import Symbol
>>> alpha = Symbol("alpha", positive=True)
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = PowerFunction("X", 2, a, b)
>>> density(X)(z)
(-2*a + 2*z)/(-a + b)**2
>>> cdf(X)(z)
Piecewise((a**2/(a**2 - 2*a*b + b**2) - 2*a*z/(a**2 - 2*a*b + b**2) +
z**2/(a**2 - 2*a*b + b**2), a <= z), (0, True))
>>> alpha = 2
>>> a = 0
>>> b = 1
>>> Y = PowerFunction("Y", alpha, a, b)
>>> E(Y)
2/3
>>> variance(Y)
1/18
References
==========
.. [1] http://www.mathwave.com/help/easyfit/html/analyses/distributions/power_func.html
"""
return rv(name, PowerFunctionDistribution, (alpha, a, b))
#-------------------------------------------------------------------------------
# QuadraticU distribution ------------------------------------------------------
class QuadraticUDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b):
_value_check(b > a, "Parameter b must be in range (%s, oo)."%(a))
def pdf(self, x):
a, b = self.a, self.b
alpha = 12 / (b-a)**3
beta = (a+b) / 2
return Piecewise(
(alpha * (x-beta)**2, And(a<=x, x<=b)),
(S.Zero, True))
def _moment_generating_function(self, t):
a, b = self.a, self.b
return -3 * (exp(a*t) * (4 + (a**2 + 2*a*(-2 + b) + b**2) * t) \
- exp(b*t) * (4 + (-4*b + (a + b)**2) * t)) / ((a-b)**3 * t**2)
def _characteristic_function(self, t):
a, b = self.a, self.b
return -3*I*(exp(I*a*t*exp(I*b*t)) * (4*I - (-4*b + (a+b)**2)*t)) \
/ ((a-b)**3 * t**2)
def QuadraticU(name, a, b):
r"""
Create a Continuous Random Variable with a U-quadratic distribution.
The density of the U-quadratic distribution is given by
.. math::
f(x) := \alpha (x-\beta)^2
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number
b : Real number, :math:`a < b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import QuadraticU, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = QuadraticU("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ 2
| / a b \
|12*|- - - - + z|
| \ 2 2 /
<----------------- for And(b >= z, a <= z)
| 3
| (-a + b)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/U-quadratic_distribution
"""
return rv(name, QuadraticUDistribution, (a, b))
#-------------------------------------------------------------------------------
# RaisedCosine distribution ----------------------------------------------------
class RaisedCosineDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
@property
def set(self):
return Interval(self.mu - self.s, self.mu + self.s)
@staticmethod
def check(mu, s):
_value_check(s > 0, "s must be positive")
def pdf(self, x):
mu, s = self.mu, self.s
return Piecewise(
((1+cos(pi*(x-mu)/s)) / (2*s), And(mu-s<=x, x<=mu+s)),
(S.Zero, True))
def _characteristic_function(self, t):
mu, s = self.mu, self.s
return Piecewise((exp(-I*pi*mu/s)/2, Eq(t, -pi/s)),
(exp(I*pi*mu/s)/2, Eq(t, pi/s)),
(pi**2*sin(s*t)*exp(I*mu*t) / (s*t*(pi**2 - s**2*t**2)), True))
def _moment_generating_function(self, t):
mu, s = self.mu, self.s
return pi**2 * sinh(s*t) * exp(mu*t) / (s*t*(pi**2 + s**2*t**2))
def RaisedCosine(name, mu, s):
r"""
Create a Continuous Random Variable with a raised cosine distribution.
The density of the raised cosine distribution is given by
.. math::
f(x) := \frac{1}{2s}\left(1+\cos\left(\frac{x-\mu}{s}\pi\right)\right)
with :math:`x \in [\mu-s,\mu+s]`.
Parameters
==========
mu : Real number
s : Real number, `s > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import RaisedCosine, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = RaisedCosine("x", mu, s)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ /pi*(-mu + z)\
|cos|------------| + 1
| \ s /
<--------------------- for And(z >= mu - s, z <= mu + s)
| 2*s
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Raised_cosine_distribution
"""
return rv(name, RaisedCosineDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Rayleigh distribution --------------------------------------------------------
class RayleighDistribution(SingleContinuousDistribution):
_argnames = ('sigma',)
set = Interval(0, oo)
@staticmethod
def check(sigma):
_value_check(sigma > 0, "Scale parameter sigma must be positive.")
def pdf(self, x):
sigma = self.sigma
return x/sigma**2*exp(-x**2/(2*sigma**2))
def _cdf(self, x):
sigma = self.sigma
return 1 - exp(-(x**2/(2*sigma**2)))
def _characteristic_function(self, t):
sigma = self.sigma
return 1 - sigma*t*exp(-sigma**2*t**2/2) * sqrt(pi/2) * (erfi(sigma*t/sqrt(2)) - I)
def _moment_generating_function(self, t):
sigma = self.sigma
return 1 + sigma*t*exp(sigma**2*t**2/2) * sqrt(pi/2) * (erf(sigma*t/sqrt(2)) + 1)
def Rayleigh(name, sigma):
r"""
Create a continuous random variable with a Rayleigh distribution.
The density of the Rayleigh distribution is given by
.. math ::
f(x) := \frac{x}{\sigma^2} e^{-x^2/2\sigma^2}
with :math:`x > 0`.
Parameters
==========
sigma : Real number, `\sigma > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Rayleigh, density, E, variance
>>> from sympy import Symbol
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Rayleigh("x", sigma)
>>> density(X)(z)
z*exp(-z**2/(2*sigma**2))/sigma**2
>>> E(X)
sqrt(2)*sqrt(pi)*sigma/2
>>> variance(X)
-pi*sigma**2/2 + 2*sigma**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Rayleigh_distribution
.. [2] http://mathworld.wolfram.com/RayleighDistribution.html
"""
return rv(name, RayleighDistribution, (sigma, ))
#-------------------------------------------------------------------------------
# Reciprocal distribution --------------------------------------------------------
class ReciprocalDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b):
_value_check(a > 0, "Parameter > 0. a = %s"%a)
_value_check((a < b),
"Parameter b must be in range (%s, +oo]. b = %s"%(a, b))
def pdf(self, x):
a, b = self.a, self.b
return 1/(x*(log(b) - log(a)))
def Reciprocal(name, a, b):
r"""Creates a continuous random variable with a reciprocal distribution.
Parameters
==========
a : Real number, :math:`0 < a`
b : Real number, :math:`a < b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Reciprocal, density, cdf
>>> from sympy import symbols
>>> a, b, x = symbols('a, b, x', positive=True)
>>> R = Reciprocal('R', a, b)
>>> density(R)(x)
1/(x*(-log(a) + log(b)))
>>> cdf(R)(x)
Piecewise((log(a)/(log(a) - log(b)) - log(x)/(log(a) - log(b)), a <= x), (0, True))
Reference
=========
.. [1] https://en.wikipedia.org/wiki/Reciprocal_distribution
"""
return rv(name, ReciprocalDistribution, (a, b))
#-------------------------------------------------------------------------------
# Shifted Gompertz distribution ------------------------------------------------
class ShiftedGompertzDistribution(SingleContinuousDistribution):
_argnames = ('b', 'eta')
set = Interval(0, oo)
@staticmethod
def check(b, eta):
_value_check(b > 0, "b must be positive")
_value_check(eta > 0, "eta must be positive")
def pdf(self, x):
b, eta = self.b, self.eta
return b*exp(-b*x)*exp(-eta*exp(-b*x))*(1+eta*(1-exp(-b*x)))
def ShiftedGompertz(name, b, eta):
r"""
Create a continuous random variable with a Shifted Gompertz distribution.
The density of the Shifted Gompertz distribution is given by
.. math::
f(x) := b e^{-b x} e^{-\eta \exp(-b x)} \left[1 + \eta(1 - e^(-bx)) \right]
with :math: 'x \in [0, \inf)'.
Parameters
==========
b: Real number, 'b > 0' a scale
eta: Real number, 'eta > 0' a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ShiftedGompertz, density
>>> from sympy import Symbol
>>> b = Symbol("b", positive=True)
>>> eta = Symbol("eta", positive=True)
>>> x = Symbol("x")
>>> X = ShiftedGompertz("x", b, eta)
>>> density(X)(x)
b*(eta*(1 - exp(-b*x)) + 1)*exp(-b*x)*exp(-eta*exp(-b*x))
References
==========
.. [1] https://en.wikipedia.org/wiki/Shifted_Gompertz_distribution
"""
return rv(name, ShiftedGompertzDistribution, (b, eta))
#-------------------------------------------------------------------------------
# StudentT distribution --------------------------------------------------------
class StudentTDistribution(SingleContinuousDistribution):
_argnames = ('nu',)
set = Interval(-oo, oo)
@staticmethod
def check(nu):
_value_check(nu > 0, "Degrees of freedom nu must be positive.")
def pdf(self, x):
nu = self.nu
return 1/(sqrt(nu)*beta_fn(S.Half, nu/2))*(1 + x**2/nu)**(-(nu + 1)/2)
def _cdf(self, x):
nu = self.nu
return S.Half + x*gamma((nu+1)/2)*hyper((S.Half, (nu+1)/2),
(Rational(3, 2),), -x**2/nu)/(sqrt(pi*nu)*gamma(nu/2))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the Student-T distribution is undefined.')
def StudentT(name, nu):
r"""
Create a continuous random variable with a student's t distribution.
The density of the student's t distribution is given by
.. math::
f(x) := \frac{\Gamma \left(\frac{\nu+1}{2} \right)}
{\sqrt{\nu\pi}\Gamma \left(\frac{\nu}{2} \right)}
\left(1+\frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}}
Parameters
==========
nu : Real number, `\nu > 0`, the degrees of freedom
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import StudentT, density, cdf
>>> from sympy import Symbol, pprint
>>> nu = Symbol("nu", positive=True)
>>> z = Symbol("z")
>>> X = StudentT("x", nu)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
nu 1
- -- - -
2 2
/ 2\
| z |
|1 + --|
\ nu/
-----------------
____ / nu\
\/ nu *B|1/2, --|
\ 2 /
>>> cdf(X)(z)
1/2 + z*gamma(nu/2 + 1/2)*hyper((1/2, nu/2 + 1/2), (3/2,),
-z**2/nu)/(sqrt(pi)*sqrt(nu)*gamma(nu/2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Student_t-distribution
.. [2] http://mathworld.wolfram.com/Studentst-Distribution.html
"""
return rv(name, StudentTDistribution, (nu, ))
#-------------------------------------------------------------------------------
# Trapezoidal distribution ------------------------------------------------------
class TrapezoidalDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c', 'd')
@property
def set(self):
return Interval(self.a, self.d)
@staticmethod
def check(a, b, c, d):
_value_check(a < d, "Lower bound parameter a < %s. a = %s"%(d, a))
_value_check((a <= b, b < c),
"Level start parameter b must be in range [%s, %s). b = %s"%(a, c, b))
_value_check((b < c, c <= d),
"Level end parameter c must be in range (%s, %s]. c = %s"%(b, d, c))
_value_check(d >= c, "Upper bound parameter d > %s. d = %s"%(c, d))
def pdf(self, x):
a, b, c, d = self.a, self.b, self.c, self.d
return Piecewise(
(2*(x-a) / ((b-a)*(d+c-a-b)), And(a <= x, x < b)),
(2 / (d+c-a-b), And(b <= x, x < c)),
(2*(d-x) / ((d-c)*(d+c-a-b)), And(c <= x, x <= d)),
(S.Zero, True))
def Trapezoidal(name, a, b, c, d):
r"""
Create a continuous random variable with a trapezoidal distribution.
The density of the trapezoidal distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(d+c-a-b)} & \mathrm{for\ } a \le x < b, \\
\frac{2}{d+c-a-b} & \mathrm{for\ } b \le x < c, \\
\frac{2(d-x)}{(d-c)(d+c-a-b)} & \mathrm{for\ } c \le x < d, \\
0 & \mathrm{for\ } d < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a < d`
b : Real number, :math:`a <= b < c`
c : Real number, :math:`b < c <= d`
d : Real number
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Trapezoidal, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> d = Symbol("d")
>>> z = Symbol("z")
>>> X = Trapezoidal("x", a,b,c,d)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|------------------------- for And(a <= z, b > z)
|(-a + b)*(-a - b + c + d)
|
| 2
| -------------- for And(b <= z, c > z)
< -a - b + c + d
|
| 2*d - 2*z
|------------------------- for And(d >= z, c <= z)
|(-c + d)*(-a - b + c + d)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Trapezoidal_distribution
"""
return rv(name, TrapezoidalDistribution, (a, b, c, d))
#-------------------------------------------------------------------------------
# Triangular distribution ------------------------------------------------------
class TriangularDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b, c):
_value_check(b > a, "Parameter b > %s. b = %s"%(a, b))
_value_check((a <= c, c <= b),
"Parameter c must be in range [%s, %s]. c = %s"%(a, b, c))
def pdf(self, x):
a, b, c = self.a, self.b, self.c
return Piecewise(
(2*(x - a)/((b - a)*(c - a)), And(a <= x, x < c)),
(2/(b - a), Eq(x, c)),
(2*(b - x)/((b - a)*(b - c)), And(c < x, x <= b)),
(S.Zero, True))
def _characteristic_function(self, t):
a, b, c = self.a, self.b, self.c
return -2 *((b-c) * exp(I*a*t) - (b-a) * exp(I*c*t) + (c-a) * exp(I*b*t)) / ((b-a)*(c-a)*(b-c)*t**2)
def _moment_generating_function(self, t):
a, b, c = self.a, self.b, self.c
return 2 * ((b - c) * exp(a * t) - (b - a) * exp(c * t) + (c - a) * exp(b * t)) / (
(b - a) * (c - a) * (b - c) * t ** 2)
def Triangular(name, a, b, c):
r"""
Create a continuous random variable with a triangular distribution.
The density of the triangular distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(c-a)} & \mathrm{for\ } a \le x < c, \\
\frac{2}{b-a} & \mathrm{for\ } x = c, \\
\frac{2(b-x)}{(b-a)(b-c)} & \mathrm{for\ } c < x \le b, \\
0 & \mathrm{for\ } b < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a \in \left(-\infty, \infty\right)`
b : Real number, :math:`a < b`
c : Real number, :math:`a \leq c \leq b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Triangular, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> z = Symbol("z")
>>> X = Triangular("x", a,b,c)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|----------------- for And(a <= z, c > z)
|(-a + b)*(-a + c)
|
| 2
| ------ for c = z
< -a + b
|
| 2*b - 2*z
|---------------- for And(b >= z, c < z)
|(-a + b)*(b - c)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Triangular_distribution
.. [2] http://mathworld.wolfram.com/TriangularDistribution.html
"""
return rv(name, TriangularDistribution, (a, b, c))
#-------------------------------------------------------------------------------
# Uniform distribution ---------------------------------------------------------
class UniformDistribution(SingleContinuousDistribution):
_argnames = ('left', 'right')
@property
def set(self):
return Interval(self.left, self.right)
@staticmethod
def check(left, right):
_value_check(left < right, "Lower limit should be less than Upper limit.")
def pdf(self, x):
left, right = self.left, self.right
return Piecewise(
(S.One/(right - left), And(left <= x, x <= right)),
(S.Zero, True)
)
def _cdf(self, x):
left, right = self.left, self.right
return Piecewise(
(S.Zero, x < left),
((x - left)/(right - left), x <= right),
(S.One, True)
)
def _characteristic_function(self, t):
left, right = self.left, self.right
return Piecewise(((exp(I*t*right) - exp(I*t*left)) / (I*t*(right - left)), Ne(t, 0)),
(S.One, True))
def _moment_generating_function(self, t):
left, right = self.left, self.right
return Piecewise(((exp(t*right) - exp(t*left)) / (t * (right - left)), Ne(t, 0)),
(S.One, True))
def expectation(self, expr, var, **kwargs):
from sympy import Max, Min
kwargs['evaluate'] = True
result = SingleContinuousDistribution.expectation(self, expr, var, **kwargs)
result = result.subs({Max(self.left, self.right): self.right,
Min(self.left, self.right): self.left})
return result
def Uniform(name, left, right):
r"""
Create a continuous random variable with a uniform distribution.
The density of the uniform distribution is given by
.. math::
f(x) := \begin{cases}
\frac{1}{b - a} & \text{for } x \in [a,b] \\
0 & \text{otherwise}
\end{cases}
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number, :math:`-\infty < a` the left boundary
b : Real number, :math:`a < b < \infty` the right boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Uniform, density, cdf, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", negative=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Uniform("x", a, b)
>>> density(X)(z)
Piecewise((1/(-a + b), (b >= z) & (a <= z)), (0, True))
>>> cdf(X)(z)
Piecewise((0, a > z), ((-a + z)/(-a + b), b >= z), (1, True))
>>> E(X)
a/2 + b/2
>>> simplify(variance(X))
a**2/12 - a*b/6 + b**2/12
References
==========
.. [1] https://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29
.. [2] http://mathworld.wolfram.com/UniformDistribution.html
"""
return rv(name, UniformDistribution, (left, right))
#-------------------------------------------------------------------------------
# UniformSum distribution ------------------------------------------------------
class UniformSumDistribution(SingleContinuousDistribution):
_argnames = ('n',)
@property
def set(self):
return Interval(0, self.n)
@staticmethod
def check(n):
_value_check((n > 0, n.is_integer),
"Parameter n must be positive integer.")
def pdf(self, x):
n = self.n
k = Dummy("k")
return 1/factorial(
n - 1)*Sum((-1)**k*binomial(n, k)*(x - k)**(n - 1), (k, 0, floor(x)))
def _cdf(self, x):
n = self.n
k = Dummy("k")
return Piecewise((S.Zero, x < 0),
(1/factorial(n)*Sum((-1)**k*binomial(n, k)*(x - k)**(n),
(k, 0, floor(x))), x <= n),
(S.One, True))
def _characteristic_function(self, t):
return ((exp(I*t) - 1) / (I*t))**self.n
def _moment_generating_function(self, t):
return ((exp(t) - 1) / t)**self.n
def UniformSum(name, n):
r"""
Create a continuous random variable with an Irwin-Hall distribution.
The probability distribution function depends on a single parameter
`n` which is an integer.
The density of the Irwin-Hall distribution is given by
.. math ::
f(x) := \frac{1}{(n-1)!}\sum_{k=0}^{\left\lfloor x\right\rfloor}(-1)^k
\binom{n}{k}(x-k)^{n-1}
Parameters
==========
n : A positive Integer, `n > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import UniformSum, density, cdf
>>> from sympy import Symbol, pprint
>>> n = Symbol("n", integer=True)
>>> z = Symbol("z")
>>> X = UniformSum("x", n)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
floor(z)
___
\ `
\ k n - 1 /n\
) (-1) *(-k + z) *| |
/ \k/
/__,
k = 0
--------------------------------
(n - 1)!
>>> cdf(X)(z)
Piecewise((0, z < 0), (Sum((-1)**_k*(-_k + z)**n*binomial(n, _k),
(_k, 0, floor(z)))/factorial(n), n >= z), (1, True))
Compute cdf with specific 'x' and 'n' values as follows :
>>> cdf(UniformSum("x", 5), evaluate=False)(2).doit()
9/40
The argument evaluate=False prevents an attempt at evaluation
of the sum for general n, before the argument 2 is passed.
References
==========
.. [1] https://en.wikipedia.org/wiki/Uniform_sum_distribution
.. [2] http://mathworld.wolfram.com/UniformSumDistribution.html
"""
return rv(name, UniformSumDistribution, (n, ))
#-------------------------------------------------------------------------------
# VonMises distribution --------------------------------------------------------
class VonMisesDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'k')
set = Interval(0, 2*pi)
@staticmethod
def check(mu, k):
_value_check(k > 0, "k must be positive")
def pdf(self, x):
mu, k = self.mu, self.k
return exp(k*cos(x-mu)) / (2*pi*besseli(0, k))
def VonMises(name, mu, k):
r"""
Create a Continuous Random Variable with a von Mises distribution.
The density of the von Mises distribution is given by
.. math::
f(x) := \frac{e^{\kappa\cos(x-\mu)}}{2\pi I_0(\kappa)}
with :math:`x \in [0,2\pi]`.
Parameters
==========
mu : Real number, measure of location
k : Real number, measure of concentration
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import VonMises, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu")
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = VonMises("x", mu, k)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k*cos(mu - z)
e
------------------
2*pi*besseli(0, k)
References
==========
.. [1] https://en.wikipedia.org/wiki/Von_Mises_distribution
.. [2] http://mathworld.wolfram.com/vonMisesDistribution.html
"""
return rv(name, VonMisesDistribution, (mu, k))
#-------------------------------------------------------------------------------
# Weibull distribution ---------------------------------------------------------
class WeibullDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Alpha must be positive")
_value_check(beta > 0, "Beta must be positive")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return beta * (x/alpha)**(beta - 1) * exp(-(x/alpha)**beta) / alpha
def Weibull(name, alpha, beta):
r"""
Create a continuous random variable with a Weibull distribution.
The density of the Weibull distribution is given by
.. math::
f(x) := \begin{cases}
\frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1}
e^{-(x/\lambda)^{k}} & x\geq0\\
0 & x<0
\end{cases}
Parameters
==========
lambda : Real number, :math:`\lambda > 0` a scale
k : Real number, `k > 0` a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Weibull, density, E, variance
>>> from sympy import Symbol, simplify
>>> l = Symbol("lambda", positive=True)
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = Weibull("x", l, k)
>>> density(X)(z)
k*(z/lambda)**(k - 1)*exp(-(z/lambda)**k)/lambda
>>> simplify(E(X))
lambda*gamma(1 + 1/k)
>>> simplify(variance(X))
lambda**2*(-gamma(1 + 1/k)**2 + gamma(1 + 2/k))
References
==========
.. [1] https://en.wikipedia.org/wiki/Weibull_distribution
.. [2] http://mathworld.wolfram.com/WeibullDistribution.html
"""
return rv(name, WeibullDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Wigner semicircle distribution -----------------------------------------------
class WignerSemicircleDistribution(SingleContinuousDistribution):
_argnames = ('R',)
@property
def set(self):
return Interval(-self.R, self.R)
@staticmethod
def check(R):
_value_check(R > 0, "Radius R must be positive.")
def pdf(self, x):
R = self.R
return 2/(pi*R**2)*sqrt(R**2 - x**2)
def _characteristic_function(self, t):
return Piecewise((2 * besselj(1, self.R*t) / (self.R*t), Ne(t, 0)),
(S.One, True))
def _moment_generating_function(self, t):
return Piecewise((2 * besseli(1, self.R*t) / (self.R*t), Ne(t, 0)),
(S.One, True))
def WignerSemicircle(name, R):
r"""
Create a continuous random variable with a Wigner semicircle distribution.
The density of the Wigner semicircle distribution is given by
.. math::
f(x) := \frac2{\pi R^2}\,\sqrt{R^2-x^2}
with :math:`x \in [-R,R]`.
Parameters
==========
R : Real number, `R > 0`, the radius
Returns
=======
A `RandomSymbol`.
Examples
========
>>> from sympy.stats import WignerSemicircle, density, E
>>> from sympy import Symbol
>>> R = Symbol("R", positive=True)
>>> z = Symbol("z")
>>> X = WignerSemicircle("x", R)
>>> density(X)(z)
2*sqrt(R**2 - z**2)/(pi*R**2)
>>> E(X)
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
.. [2] http://mathworld.wolfram.com/WignersSemicircleLaw.html
"""
return rv(name, WignerSemicircleDistribution, (R,))
|
65c15591aa99adb2cd607d96465393711b882d1b40925d999cc025233f0a9e92 | """
Finite Discrete Random Variables - Prebuilt variable types
Contains
========
FiniteRV
DiscreteUniform
Die
Bernoulli
Coin
Binomial
BetaBinomial
Hypergeometric
Rademacher
"""
from sympy import (S, sympify, Rational, binomial, cacheit, Integer,
Dummy, Eq, Intersection, Interval,
Symbol, Lambda, Piecewise, Or, Gt, Lt, Ge, Le, Contains)
from sympy import beta as beta_fn
from sympy.stats.frv import (SingleFiniteDistribution,
SingleFinitePSpace)
from sympy.stats.rv import _value_check, Density, is_random
__all__ = ['FiniteRV',
'DiscreteUniform',
'Die',
'Bernoulli',
'Coin',
'Binomial',
'BetaBinomial',
'Hypergeometric',
'Rademacher'
]
def rv(name, cls, *args, **kwargs):
args = list(map(sympify, args))
dist = cls(*args)
if kwargs.pop('check', True):
dist.check(*args)
pspace = SingleFinitePSpace(name, dist)
if any(is_random(arg) for arg in args):
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
pspace = CompoundPSpace(name, CompoundDistribution(dist))
return pspace.value
class FiniteDistributionHandmade(SingleFiniteDistribution):
@property
def dict(self):
return self.args[0]
def pmf(self, x):
x = Symbol('x')
return Lambda(x, Piecewise(*(
[(v, Eq(k, x)) for k, v in self.dict.items()] + [(S.Zero, True)])))
@property
def set(self):
return set(self.dict.keys())
@staticmethod
def check(density):
for p in density.values():
_value_check((p >= 0, p <= 1),
"Probability at a point must be between 0 and 1.")
val = sum(density.values())
_value_check(Eq(val, 1) != S.false, "Total Probability must be 1.")
def FiniteRV(name, density, **kwargs):
r"""
Create a Finite Random Variable given a dict representing the density.
Parameters
==========
name : Symbol
Represents name of the random variable.
density: A dict
Dictionary conatining the pdf of finite distribution
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Examples
========
>>> from sympy.stats import FiniteRV, P, E
>>> density = {0: .1, 1: .2, 2: .3, 3: .4}
>>> X = FiniteRV('X', density)
>>> E(X)
2.00000000000000
>>> P(X >= 2)
0.700000000000000
Returns
=======
RandomSymbol
"""
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(name, FiniteDistributionHandmade, density, **kwargs)
class DiscreteUniformDistribution(SingleFiniteDistribution):
@staticmethod
def check(*args):
# not using _value_check since there is a
# suggestion for the user
if len(set(args)) != len(args):
from sympy.utilities.iterables import multiset
from sympy.utilities.misc import filldedent
weights = multiset(args)
n = Integer(len(args))
for k in weights:
weights[k] /= n
raise ValueError(filldedent("""
Repeated args detected but set expected. For a
distribution having different weights for each
item use the following:""") + (
'\nS("FiniteRV(%s, %s)")' % ("'X'", weights)))
@property
def p(self):
return Rational(1, len(self.args))
@property # type: ignore
@cacheit
def dict(self):
return {k: self.p for k in self.set}
@property
def set(self):
return set(self.args)
def pmf(self, x):
if x in self.args:
return self.p
else:
return S.Zero
def DiscreteUniform(name, items):
r"""
Create a Finite Random Variable representing a uniform distribution over
the input set.
Parameters
==========
items: list/tuple
Items over which Uniform distribution is to be made
Examples
========
>>> from sympy.stats import DiscreteUniform, density
>>> from sympy import symbols
>>> X = DiscreteUniform('X', symbols('a b c')) # equally likely over a, b, c
>>> density(X).dict
{a: 1/3, b: 1/3, c: 1/3}
>>> Y = DiscreteUniform('Y', list(range(5))) # distribution over a range
>>> density(Y).dict
{0: 1/5, 1: 1/5, 2: 1/5, 3: 1/5, 4: 1/5}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Discrete_uniform_distribution
.. [2] http://mathworld.wolfram.com/DiscreteUniformDistribution.html
"""
return rv(name, DiscreteUniformDistribution, *items)
class DieDistribution(SingleFiniteDistribution):
_argnames = ('sides',)
@staticmethod
def check(sides):
_value_check((sides.is_positive, sides.is_integer),
"number of sides must be a positive integer.")
@property
def is_symbolic(self):
return not self.sides.is_number
@property
def high(self):
return self.sides
@property
def low(self):
return S.One
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.sides))
return set(map(Integer, list(range(1, self.sides + 1))))
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond = Ge(x, 1) & Le(x, self.sides) & Contains(x, S.Integers)
return Piecewise((S.One/self.sides, cond), (S.Zero, True))
def Die(name, sides=6):
r"""
Create a Finite Random Variable representing a fair die.
Parameters
==========
sides: Integer
Represents the number of sides of the Die, by default is 6
Examples
========
>>> from sympy.stats import Die, density
>>> from sympy import Symbol
>>> D6 = Die('D6', 6) # Six sided Die
>>> density(D6).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> D4 = Die('D4', 4) # Four sided Die
>>> density(D4).dict
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
>>> n = Symbol('n', positive=True, integer=True)
>>> Dn = Die('Dn', n) # n sided Die
>>> density(Dn).dict
Density(DieDistribution(n))
>>> density(Dn).dict.subs(n, 4).doit()
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
Returns
=======
RandomSymbol
"""
return rv(name, DieDistribution, sides)
class BernoulliDistribution(SingleFiniteDistribution):
_argnames = ('p', 'succ', 'fail')
@staticmethod
def check(p, succ, fail):
_value_check((p >= 0, p <= 1),
"p should be in range [0, 1].")
@property
def set(self):
return {self.succ, self.fail}
def pmf(self, x):
if isinstance(self.succ, Symbol) and isinstance(self.fail, Symbol):
return Piecewise((self.p, x == self.succ),
(1 - self.p, x == self.fail),
(S.Zero, True))
return Piecewise((self.p, Eq(x, self.succ)),
(1 - self.p, Eq(x, self.fail)),
(S.Zero, True))
def Bernoulli(name, p, succ=1, fail=0):
r"""
Create a Finite Random Variable representing a Bernoulli process.
Parameters
==========
p : Rational number between 0 and 1
Represents probability of success
succ : Integer/symbol/string
Represents event of success
fail : Integer/symbol/string
Represents event of failure
Examples
========
>>> from sympy.stats import Bernoulli, density
>>> from sympy import S
>>> X = Bernoulli('X', S(3)/4) # 1-0 Bernoulli variable, probability = 3/4
>>> density(X).dict
{0: 1/4, 1: 3/4}
>>> X = Bernoulli('X', S.Half, 'Heads', 'Tails') # A fair coin toss
>>> density(X).dict
{Heads: 1/2, Tails: 1/2}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Bernoulli_distribution
.. [2] http://mathworld.wolfram.com/BernoulliDistribution.html
"""
return rv(name, BernoulliDistribution, p, succ, fail)
def Coin(name, p=S.Half):
r"""
Create a Finite Random Variable representing a Coin toss.
Parameters
==========
p : Rational Numeber between 0 and 1
Represents probability of getting "Heads", by default is Half
Examples
========
>>> from sympy.stats import Coin, density
>>> from sympy import Rational
>>> C = Coin('C') # A fair coin toss
>>> density(C).dict
{H: 1/2, T: 1/2}
>>> C2 = Coin('C2', Rational(3, 5)) # An unfair coin
>>> density(C2).dict
{H: 3/5, T: 2/5}
Returns
=======
RandomSymbol
See Also
========
sympy.stats.Binomial
References
==========
.. [1] https://en.wikipedia.org/wiki/Coin_flipping
"""
return rv(name, BernoulliDistribution, p, 'H', 'T')
class BinomialDistribution(SingleFiniteDistribution):
_argnames = ('n', 'p', 'succ', 'fail')
@staticmethod
def check(n, p, succ, fail):
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer.")
_value_check((p <= 1, p >= 0),
"p should be in range [0, 1].")
@property
def high(self):
return self.n
@property
def low(self):
return S.Zero
@property
def is_symbolic(self):
return not self.n.is_number
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.n))
return set(self.dict.keys())
def pmf(self, x):
n, p = self.n, self.p
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond = Ge(x, 0) & Le(x, n) & Contains(x, S.Integers)
return Piecewise((binomial(n, x) * p**x * (1 - p)**(n - x), cond), (S.Zero, True))
@property # type: ignore
@cacheit
def dict(self):
if self.is_symbolic:
return Density(self)
return {k*self.succ + (self.n-k)*self.fail: self.pmf(k)
for k in range(0, self.n + 1)}
def Binomial(name, n, p, succ=1, fail=0):
r"""
Create a Finite Random Variable representing a binomial distribution.
Parameters
==========
n : Positive Integer
Represents number of trials
p : Rational Number between 0 and 1
Represents probability of success
succ : Integer/symbol/string
Represents event of success, by default is 1
fail : Integer/symbol/string
Represents event of failure, by default is 0
Examples
========
>>> from sympy.stats import Binomial, density
>>> from sympy import S, Symbol
>>> X = Binomial('X', 4, S.Half) # Four "coin flips"
>>> density(X).dict
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
>>> n = Symbol('n', positive=True, integer=True)
>>> p = Symbol('p', positive=True)
>>> X = Binomial('X', n, S.Half) # n "coin flips"
>>> density(X).dict
Density(BinomialDistribution(n, 1/2, 1, 0))
>>> density(X).dict.subs(n, 4).doit()
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Binomial_distribution
.. [2] http://mathworld.wolfram.com/BinomialDistribution.html
"""
return rv(name, BinomialDistribution, n, p, succ, fail)
#-------------------------------------------------------------------------------
# Beta-binomial distribution ----------------------------------------------------------
class BetaBinomialDistribution(SingleFiniteDistribution):
_argnames = ('n', 'alpha', 'beta')
@staticmethod
def check(n, alpha, beta):
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer. n = %s." % str(n))
_value_check((alpha > 0),
"'alpha' must be: alpha > 0 . alpha = %s" % str(alpha))
_value_check((beta > 0),
"'beta' must be: beta > 0 . beta = %s" % str(beta))
@property
def high(self):
return self.n
@property
def low(self):
return S.Zero
@property
def is_symbolic(self):
return not self.n.is_number
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.n))
return set(map(Integer, list(range(0, self.n + 1))))
def pmf(self, k):
n, a, b = self.n, self.alpha, self.beta
return binomial(n, k) * beta_fn(k + a, n - k + b) / beta_fn(a, b)
def BetaBinomial(name, n, alpha, beta):
r"""
Create a Finite Random Variable representing a Beta-binomial distribution.
Parameters
==========
n : Positive Integer
Represents number of trials
alpha : Real positive number
beta : Real positive number
Examples
========
>>> from sympy.stats import BetaBinomial, density
>>> X = BetaBinomial('X', 2, 1, 1)
>>> density(X).dict
{0: 1/3, 1: 2*beta(2, 2), 2: 1/3}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
.. [2] http://mathworld.wolfram.com/BetaBinomialDistribution.html
"""
return rv(name, BetaBinomialDistribution, n, alpha, beta)
class HypergeometricDistribution(SingleFiniteDistribution):
_argnames = ('N', 'm', 'n')
@staticmethod
def check(n, N, m):
_value_check((N.is_integer, N.is_nonnegative),
"'N' must be nonnegative integer. N = %s." % str(n))
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer. n = %s." % str(n))
_value_check((m.is_integer, m.is_nonnegative),
"'m' must be nonnegative integer. m = %s." % str(n))
@property
def is_symbolic(self):
return any(not x.is_number for x in (self.N, self.m, self.n))
@property
def high(self):
return Piecewise((self.n, Lt(self.n, self.m) != False), (self.m, True))
@property
def low(self):
return Piecewise((0, Gt(0, self.n + self.m - self.N) != False), (self.n + self.m - self.N, True))
@property
def set(self):
N, m, n = self.N, self.m, self.n
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(self.low, self.high))
return {i for i in range(max(0, n + m - N), min(n, m) + 1)}
def pmf(self, k):
N, m, n = self.N, self.m, self.n
return S(binomial(m, k) * binomial(N - m, n - k))/binomial(N, n)
def Hypergeometric(name, N, m, n):
r"""
Create a Finite Random Variable representing a hypergeometric distribution.
Parameters
==========
N : Positive Integer
Represents finite population of size N.
m : Positive Integer
Represents number of trials with required feature.
n : Positive Integer
Represents numbers of draws.
Examples
========
>>> from sympy.stats import Hypergeometric, density
>>> X = Hypergeometric('X', 10, 5, 3) # 10 marbles, 5 white (success), 3 draws
>>> density(X).dict
{0: 1/12, 1: 5/12, 2: 5/12, 3: 1/12}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Hypergeometric_distribution
.. [2] http://mathworld.wolfram.com/HypergeometricDistribution.html
"""
return rv(name, HypergeometricDistribution, N, m, n)
class RademacherDistribution(SingleFiniteDistribution):
@property
def set(self):
return {-1, 1}
@property
def pmf(self):
k = Dummy('k')
return Lambda(k, Piecewise((S.Half, Or(Eq(k, -1), Eq(k, 1))), (S.Zero, True)))
def Rademacher(name):
r"""
Create a Finite Random Variable representing a Rademacher distribution.
Examples
========
>>> from sympy.stats import Rademacher, density
>>> X = Rademacher('X')
>>> density(X).dict
{-1: 1/2, 1: 1/2}
Returns
=======
RandomSymbol
See Also
========
sympy.stats.Bernoulli
References
==========
.. [1] https://en.wikipedia.org/wiki/Rademacher_distribution
"""
return rv(name, RademacherDistribution)
|
0b964834b4ced5429f410f052ba38d6c628f7a05b5bda2bfde7d0edf169b50a1 | import random
import itertools
from typing import Sequence as tSequence, Union as tUnion, List as tList, Tuple as tTuple
from sympy import (Matrix, MatrixSymbol, S, Indexed, Basic, Tuple, Range,
Set, And, Eq, FiniteSet, ImmutableMatrix, Integer, igcd,
Lambda, Mul, Dummy, IndexedBase, Add, Interval, oo,
linsolve, eye, Or, Not, Intersection, factorial, Contains,
Union, Expr, Function, exp, cacheit, sqrt, pi, gamma,
Ge, Piecewise, Symbol, NonSquareMatrixError, EmptySet,
ceiling, MatrixBase, ConditionSet, ones, zeros, Identity,
Rational, Lt, Gt, Ne, BlockMatrix)
from sympy.core.relational import Relational
from sympy.logic.boolalg import Boolean
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import strongly_connected_components
from sympy.stats.joint_rv import JointDistribution
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.rv import (RandomIndexedSymbol, random_symbols, RandomSymbol,
_symbol_converter, _value_check, pspace, given,
dependent, is_random, sample_iter)
from sympy.stats.stochastic_process import StochasticPSpace
from sympy.stats.symbolic_probability import Probability, Expectation
from sympy.stats.frv_types import Bernoulli, BernoulliDistribution, FiniteRV
from sympy.stats.drv_types import Poisson, PoissonDistribution
from sympy.stats.crv_types import Normal, NormalDistribution, Gamma, GammaDistribution
from sympy.core.sympify import _sympify, sympify
__all__ = [
'StochasticProcess',
'DiscreteTimeStochasticProcess',
'DiscreteMarkovChain',
'TransitionMatrixOf',
'StochasticStateSpaceOf',
'GeneratorMatrixOf',
'ContinuousMarkovChain',
'BernoulliProcess',
'PoissonProcess',
'WienerProcess',
'GammaProcess'
]
@is_random.register(Indexed)
def _(x):
return is_random(x.base)
@is_random.register(RandomIndexedSymbol) # type: ignore
def _(x):
return True
def _set_converter(itr):
"""
Helper function for converting list/tuple/set to Set.
If parameter is not an instance of list/tuple/set then
no operation is performed.
Returns
=======
Set
The argument converted to Set.
Raises
======
TypeError
If the argument is not an instance of list/tuple/set.
"""
if isinstance(itr, (list, tuple, set)):
itr = FiniteSet(*itr)
if not isinstance(itr, Set):
raise TypeError("%s is not an instance of list/tuple/set."%(itr))
return itr
def _state_converter(itr: tSequence) -> tUnion[Tuple, Range]:
"""
Helper function for converting list/tuple/set/Range/Tuple/FiniteSet
to tuple/Range.
"""
if isinstance(itr, (Tuple, set, FiniteSet)):
itr = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
elif isinstance(itr, (list, tuple)):
# check if states are unique
if len(set(itr)) != len(itr):
raise ValueError('The state space must have unique elements.')
itr = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
elif isinstance(itr, Range):
# the only ordered set in sympy I know of
# try to convert to tuple
try:
itr = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
except ValueError:
pass
else:
raise TypeError("%s is not an instance of list/tuple/set/Range/Tuple/FiniteSet." % (itr))
return itr
def _sym_sympify(arg):
"""
Converts an arbitrary expression to a type that can be used inside SymPy.
As generally strings are unwise to use in the expressions,
it returns the Symbol of argument if the string type argument is passed.
Parameters
=========
arg: The parameter to be converted to be used in Sympy.
Returns
=======
The converted parameter.
"""
if isinstance(arg, str):
return Symbol(arg)
else:
return _sympify(arg)
def _matrix_checks(matrix):
if not isinstance(matrix, (Matrix, MatrixSymbol, ImmutableMatrix)):
raise TypeError("Transition probabilities either should "
"be a Matrix or a MatrixSymbol.")
if matrix.shape[0] != matrix.shape[1]:
raise NonSquareMatrixError("%s is not a square matrix"%(matrix))
if isinstance(matrix, Matrix):
matrix = ImmutableMatrix(matrix.tolist())
return matrix
class StochasticProcess(Basic):
"""
Base class for all the stochastic processes whether
discrete or continuous.
Parameters
==========
sym: Symbol or str
state_space: Set
The state space of the stochastic process, by default S.Reals.
For discrete sets it is zero indexed.
See Also
========
DiscreteTimeStochasticProcess
"""
index_set = S.Reals
def __new__(cls, sym, state_space=S.Reals, **kwargs):
sym = _symbol_converter(sym)
state_space = _set_converter(state_space)
return Basic.__new__(cls, sym, state_space)
@property
def symbol(self):
return self.args[0]
@property
def state_space(self) -> tUnion[FiniteSet, Range]:
if not isinstance(self.args[1], (FiniteSet, Range)):
return FiniteSet(*self.args[1])
return self.args[1]
@property
def distribution(self):
return None
def __call__(self, time):
"""
Overridden in ContinuousTimeStochasticProcess.
"""
raise NotImplementedError("Use [] for indexing discrete time stochastic process.")
def __getitem__(self, time):
"""
Overridden in DiscreteTimeStochasticProcess.
"""
raise NotImplementedError("Use () for indexing continuous time stochastic process.")
def probability(self, condition):
raise NotImplementedError()
def joint_distribution(self, *args):
"""
Computes the joint distribution of the random indexed variables.
Parameters
==========
args: iterable
The finite list of random indexed variables/the key of a stochastic
process whose joint distribution has to be computed.
Returns
=======
JointDistribution
The joint distribution of the list of random indexed variables.
An unevaluated object is returned if it is not possible to
compute the joint distribution.
Raises
======
ValueError: When the arguments passed are not of type RandomIndexSymbol
or Number.
"""
args = list(args)
for i, arg in enumerate(args):
if S(arg).is_Number:
if self.index_set.is_subset(S.Integers):
args[i] = self.__getitem__(arg)
else:
args[i] = self.__call__(arg)
elif not isinstance(arg, RandomIndexedSymbol):
raise ValueError("Expected a RandomIndexedSymbol or "
"key not %s"%(type(arg)))
if args[0].pspace.distribution == None: # checks if there is any distribution available
return JointDistribution(*args)
pdf = Lambda(tuple(args),
expr=Mul.fromiter(arg.pspace.process.density(arg) for arg in args))
return JointDistributionHandmade(pdf)
def expectation(self, condition, given_condition):
raise NotImplementedError("Abstract method for expectation queries.")
def sample(self):
raise NotImplementedError("Abstract method for sampling queries.")
class DiscreteTimeStochasticProcess(StochasticProcess):
"""
Base class for all discrete stochastic processes.
"""
def __getitem__(self, time):
"""
For indexing discrete time stochastic processes.
Returns
=======
RandomIndexedSymbol
"""
if time not in self.index_set:
raise IndexError("%s is not in the index set of %s"%(time, self.symbol))
idx_obj = Indexed(self.symbol, time)
pspace_obj = StochasticPSpace(self.symbol, self, self.distribution)
return RandomIndexedSymbol(idx_obj, pspace_obj)
class ContinuousTimeStochasticProcess(StochasticProcess):
"""
Base class for all continuous time stochastic process.
"""
def __call__(self, time):
"""
For indexing continuous time stochastic processes.
Returns
=======
RandomIndexedSymbol
"""
if time not in self.index_set:
raise IndexError("%s is not in the index set of %s"%(time, self.symbol))
func_obj = Function(self.symbol)(time)
pspace_obj = StochasticPSpace(self.symbol, self, self.distribution)
return RandomIndexedSymbol(func_obj, pspace_obj)
class TransitionMatrixOf(Boolean):
"""
Assumes that the matrix is the transition matrix
of the process.
"""
def __new__(cls, process, matrix):
if not isinstance(process, DiscreteMarkovChain):
raise ValueError("Currently only DiscreteMarkovChain "
"support TransitionMatrixOf.")
matrix = _matrix_checks(matrix)
return Basic.__new__(cls, process, matrix)
process = property(lambda self: self.args[0])
matrix = property(lambda self: self.args[1])
class GeneratorMatrixOf(TransitionMatrixOf):
"""
Assumes that the matrix is the generator matrix
of the process.
"""
def __new__(cls, process, matrix):
if not isinstance(process, ContinuousMarkovChain):
raise ValueError("Currently only ContinuousMarkovChain "
"support GeneratorMatrixOf.")
matrix = _matrix_checks(matrix)
return Basic.__new__(cls, process, matrix)
class StochasticStateSpaceOf(Boolean):
def __new__(cls, process, state_space):
if not isinstance(process, (DiscreteMarkovChain, ContinuousMarkovChain)):
raise ValueError("Currently only DiscreteMarkovChain and ContinuousMarkovChain "
"support StochasticStateSpaceOf.")
state_space = _state_converter(state_space)
if isinstance(state_space, Range):
ss_size = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
ss_size = len(state_space)
state_index = Range(ss_size)
return Basic.__new__(cls, process, state_index)
process = property(lambda self: self.args[0])
state_index = property(lambda self: self.args[1])
class MarkovProcess(StochasticProcess):
"""
Contains methods that handle queries
common to Markov processes.
"""
@property
def number_of_states(self) -> tUnion[Integer, Symbol]:
"""
The number of states in the Markov Chain.
"""
return _sympify(self.args[2].shape[0])
@property
def _state_index(self) -> Range:
"""
Returns state index as Range.
"""
return self.args[1]
@classmethod
def _sanity_checks(cls, state_space, trans_probs):
# Try to never have None as state_space or trans_probs.
# This helps a lot if we get it done at the start.
if (state_space is None) and (trans_probs is None):
_n = Dummy('n', integer=True, nonnegative=True)
state_space = _state_converter(Range(_n))
trans_probs = _matrix_checks(MatrixSymbol('_T', _n, _n))
elif state_space is None:
trans_probs = _matrix_checks(trans_probs)
state_space = _state_converter(Range(trans_probs.shape[0]))
elif trans_probs is None:
state_space = _state_converter(state_space)
if isinstance(state_space, Range):
_n = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
_n = len(state_space)
trans_probs = MatrixSymbol('_T', _n, _n)
else:
state_space = _state_converter(state_space)
trans_probs = _matrix_checks(trans_probs)
# Range object doesn't want to give a symbolic size
# so we do it ourselves.
if isinstance(state_space, Range):
ss_size = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
ss_size = len(state_space)
if ss_size != trans_probs.shape[0]:
raise ValueError('The size of the state space and the number of '
'rows of the transition matrix must be the same.')
return state_space, trans_probs
def _extract_information(self, given_condition):
"""
Helper function to extract information, like,
transition matrix/generator matrix, state space, etc.
"""
if isinstance(self, DiscreteMarkovChain):
trans_probs = self.transition_probabilities
state_index = self._state_index
elif isinstance(self, ContinuousMarkovChain):
trans_probs = self.generator_matrix
state_index = self._state_index
if isinstance(given_condition, And):
gcs = given_condition.args
given_condition = S.true
for gc in gcs:
if isinstance(gc, TransitionMatrixOf):
trans_probs = gc.matrix
if isinstance(gc, StochasticStateSpaceOf):
state_index = gc.state_index
if isinstance(gc, Relational):
given_condition = given_condition & gc
if isinstance(given_condition, TransitionMatrixOf):
trans_probs = given_condition.matrix
given_condition = S.true
if isinstance(given_condition, StochasticStateSpaceOf):
state_index = given_condition.state_index
given_condition = S.true
return trans_probs, state_index, given_condition
def _check_trans_probs(self, trans_probs, row_sum=1):
"""
Helper function for checking the validity of transition
probabilities.
"""
if not isinstance(trans_probs, MatrixSymbol):
rows = trans_probs.tolist()
for row in rows:
if (sum(row) - row_sum) != 0:
raise ValueError("Values in a row must sum to %s. "
"If you are using Float or floats then please use Rational."%(row_sum))
def _work_out_state_index(self, state_index, given_condition, trans_probs):
"""
Helper function to extract state space if there
is a random symbol in the given condition.
"""
# if given condition is None, then there is no need to work out
# state_space from random variables
if given_condition != None:
rand_var = list(given_condition.atoms(RandomSymbol) -
given_condition.atoms(RandomIndexedSymbol))
if len(rand_var) == 1:
state_index = rand_var[0].pspace.set
# `not None` is `True`. So the old test fails for symbolic sizes.
# Need to build the statement differently.
sym_cond = not isinstance(self.number_of_states, (int, Integer))
cond1 = not sym_cond and len(state_index) != trans_probs.shape[0]
if cond1:
raise ValueError("state space is not compatible with the transition probabilities.")
if not isinstance(trans_probs.shape[0], Symbol):
state_index = FiniteSet(*[i for i in range(trans_probs.shape[0])])
return state_index
@cacheit
def _preprocess(self, given_condition, evaluate):
"""
Helper function for pre-processing the information.
"""
is_insufficient = False
if not evaluate: # avoid pre-processing if the result is not to be evaluated
return (True, None, None, None)
# extracting transition matrix and state space
trans_probs, state_index, given_condition = self._extract_information(given_condition)
# given_condition does not have sufficient information
# for computations
if trans_probs == None or \
given_condition == None:
is_insufficient = True
else:
# checking transition probabilities
if isinstance(self, DiscreteMarkovChain):
self._check_trans_probs(trans_probs, row_sum=1)
elif isinstance(self, ContinuousMarkovChain):
self._check_trans_probs(trans_probs, row_sum=0)
# working out state space
state_index = self._work_out_state_index(state_index, given_condition, trans_probs)
return is_insufficient, trans_probs, state_index, given_condition
def replace_with_index(self, condition):
if isinstance(condition, Relational):
lhs, rhs = condition.lhs, condition.rhs
if not isinstance(lhs, RandomIndexedSymbol):
lhs, rhs = rhs, lhs
condition = type(condition)(self.index_of.get(lhs, lhs),
self.index_of.get(rhs, rhs))
return condition
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Handles probability queries for Markov process.
Parameters
==========
condition: Relational
given_condition: Relational/And
Returns
=======
Probability
If the information is not sufficient.
Expr
In all other cases.
Note
====
Any information passed at the time of query overrides
any information passed at the time of object creation like
transition probabilities, state space.
Pass the transition matrix using TransitionMatrixOf,
generator matrix using GeneratorMatrixOf and state space
using StochasticStateSpaceOf in given_condition using & or And.
"""
check, mat, state_index, new_given_condition = \
self._preprocess(given_condition, evaluate)
if check:
return Probability(condition, new_given_condition)
if isinstance(self, ContinuousMarkovChain):
trans_probs = self.transition_probabilities(mat)
elif isinstance(self, DiscreteMarkovChain):
trans_probs = mat
condition = self.replace_with_index(condition)
given_condition = self.replace_with_index(given_condition)
new_given_condition = self.replace_with_index(new_given_condition)
if isinstance(condition, Relational):
if isinstance(new_given_condition, And):
gcs = new_given_condition.args
else:
gcs = (new_given_condition, )
min_key_rv = list(new_given_condition.atoms(RandomIndexedSymbol))
rv = list(condition.atoms(RandomIndexedSymbol))
if len(min_key_rv):
min_key_rv = min_key_rv[0]
for r in rv:
if min_key_rv.key > r.key:
return Probability(condition)
else:
min_key_rv = None
return Probability(condition)
if len(rv) > 1:
rv = rv[:2]
if rv[0].key < rv[1].key:
rv[0], rv[1] = rv[1], rv[0]
s = Rational(0, 1)
n = len(self.state_space)
if isinstance(condition, Eq) or isinstance(condition, Ne):
for i in range(0, n):
s += self.probability(Eq(rv[0], i), Eq(rv[1], i)) * self.probability(Eq(rv[1], i), new_given_condition)
return s if isinstance(condition, Eq) else 1 - s
else:
upper = 0
greater = False
if isinstance(condition, Ge) or isinstance(condition, Lt):
upper = 1
if isinstance(condition, Gt) or isinstance(condition, Ge):
greater = True
for i in range(0, n):
if i <= n//2:
for j in range(0, i + upper):
s += self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition)
else:
s += self.probability(Eq(rv[0], i), new_given_condition)
for j in range(i + upper, n):
s -= self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition)
return s if greater else 1 - s
rv = rv[0]
states = condition.as_set()
prob, gstate = dict(), None
for gc in gcs:
if gc.has(min_key_rv):
if gc.has(Probability):
p, gp = (gc.rhs, gc.lhs) if isinstance(gc.lhs, Probability) \
else (gc.lhs, gc.rhs)
gr = gp.args[0]
gset = Intersection(gr.as_set(), state_index)
gstate = list(gset)[0]
prob[gset] = p
else:
_, gstate = (gc.lhs.key, gc.rhs) if isinstance(gc.lhs, RandomIndexedSymbol) \
else (gc.rhs.key, gc.lhs)
if any((k not in self.index_set) for k in (rv.key, min_key_rv.key)):
raise IndexError("The timestamps of the process are not in it's index set.")
states = Intersection(states, state_index) if not isinstance(self.number_of_states, Symbol) else states
for state in Union(states, FiniteSet(gstate)):
if not isinstance(state, (int, Integer)) or Ge(state, mat.shape[0]) is True:
raise IndexError("No information is available for (%s, %s) in "
"transition probabilities of shape, (%s, %s). "
"State space is zero indexed."
%(gstate, state, mat.shape[0], mat.shape[1]))
if prob:
gstates = Union(*prob.keys())
if len(gstates) == 1:
gstate = list(gstates)[0]
gprob = list(prob.values())[0]
prob[gstates] = gprob
elif len(gstates) == len(state_index) - 1:
gstate = list(state_index - gstates)[0]
gprob = S.One - sum(prob.values())
prob[state_index - gstates] = gprob
else:
raise ValueError("Conflicting information.")
else:
gprob = S.One
if min_key_rv == rv:
return sum([prob[FiniteSet(state)] for state in states])
if isinstance(self, ContinuousMarkovChain):
return gprob * sum([trans_probs(rv.key - min_key_rv.key).__getitem__((gstate, state))
for state in states])
if isinstance(self, DiscreteMarkovChain):
return gprob * sum([(trans_probs**(rv.key - min_key_rv.key)).__getitem__((gstate, state))
for state in states])
if isinstance(condition, Not):
expr = condition.args[0]
return S.One - self.probability(expr, given_condition, evaluate, **kwargs)
if isinstance(condition, And):
compute_later, state2cond, conds = [], dict(), condition.args
for expr in conds:
if isinstance(expr, Relational):
ris = list(expr.atoms(RandomIndexedSymbol))[0]
if state2cond.get(ris, None) is None:
state2cond[ris] = S.true
state2cond[ris] &= expr
else:
compute_later.append(expr)
ris = []
for ri in state2cond:
ris.append(ri)
cset = Intersection(state2cond[ri].as_set(), state_index)
if len(cset) == 0:
return S.Zero
state2cond[ri] = cset.as_relational(ri)
sorted_ris = sorted(ris, key=lambda ri: ri.key)
prod = self.probability(state2cond[sorted_ris[0]], given_condition, evaluate, **kwargs)
for i in range(1, len(sorted_ris)):
ri, prev_ri = sorted_ris[i], sorted_ris[i-1]
if not isinstance(state2cond[ri], Eq):
raise ValueError("The process is in multiple states at %s, unable to determine the probability."%(ri))
mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat)
prod *= self.probability(state2cond[ri], state2cond[prev_ri]
& mat_of
& StochasticStateSpaceOf(self, state_index),
evaluate, **kwargs)
for expr in compute_later:
prod *= self.probability(expr, given_condition, evaluate, **kwargs)
return prod
if isinstance(condition, Or):
return sum([self.probability(expr, given_condition, evaluate, **kwargs)
for expr in condition.args])
raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been "
"implemented yet."%(condition, given_condition))
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Handles expectation queries for markov process.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation
Unevaluated object if computations cannot be done due to
insufficient information.
Expr
In all other cases when the computations are successful.
Note
====
Any information passed at the time of query overrides
any information passed at the time of object creation like
transition probabilities, state space.
Pass the transition matrix using TransitionMatrixOf,
generator matrix using GeneratorMatrixOf and state space
using StochasticStateSpaceOf in given_condition using & or And.
"""
check, mat, state_index, condition = \
self._preprocess(condition, evaluate)
if check:
return Expectation(expr, condition)
rvs = random_symbols(expr)
if isinstance(expr, Expr) and isinstance(condition, Eq) \
and len(rvs) == 1:
# handle queries similar to E(f(X[i]), Eq(X[i-m], <some-state>))
condition=self.replace_with_index(condition)
state_index=self.replace_with_index(state_index)
rv = list(rvs)[0]
lhsg, rhsg = condition.lhs, condition.rhs
if not isinstance(lhsg, RandomIndexedSymbol):
lhsg, rhsg = (rhsg, lhsg)
if rhsg not in state_index:
raise ValueError("%s state is not in the state space."%(rhsg))
if rv.key < lhsg.key:
raise ValueError("Incorrect given condition is given, expectation "
"time %s < time %s"%(rv.key, rv.key))
mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat)
cond = condition & mat_of & \
StochasticStateSpaceOf(self, state_index)
func = lambda s: self.probability(Eq(rv, s), cond) * expr.subs(rv, self._state_index[s])
return sum([func(s) for s in state_index])
raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been "
"implemented yet."%(expr, condition))
class DiscreteMarkovChain(DiscreteTimeStochasticProcess, MarkovProcess):
"""
Represents a finite discrete time-homogeneous Markov chain.
This type of Markov Chain can be uniquely characterised by
its (ordered) state space and its one-step transition probability
matrix.
Parameters
==========
sym:
The name given to the Markov Chain
state_space:
Optional, by default, Range(n)
trans_probs:
Optional, by default, MatrixSymbol('_T', n, n)
Examples
========
>>> from sympy.stats import DiscreteMarkovChain, TransitionMatrixOf, P, E
>>> from sympy import Matrix, MatrixSymbol, Eq, symbols
>>> T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> YS = DiscreteMarkovChain("Y")
>>> Y.state_space
FiniteSet(0, 1, 2)
>>> Y.transition_probabilities
Matrix([
[0.5, 0.2, 0.3],
[0.2, 0.5, 0.3],
[0.2, 0.3, 0.5]])
>>> TS = MatrixSymbol('T', 3, 3)
>>> P(Eq(YS[3], 2), Eq(YS[1], 1) & TransitionMatrixOf(YS, TS))
T[0, 2]*T[1, 0] + T[1, 1]*T[1, 2] + T[1, 2]*T[2, 2]
>>> P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2)
0.36
Probabilities will be calculated based on indexes rather
than state names. For example, with the Sunny-Cloudy-Rainy
model with string state names:
>>> from sympy.core.symbol import Str
>>> Y = DiscreteMarkovChain("Y", [Str('Sunny'), Str('Cloudy'), Str('Rainy')], T)
>>> P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2)
0.36
This gives the same answer as the ``[0, 1, 2]`` state space.
Currently, there is no support for state names within probability
and expectation statements. Here is a work-around using ``Str``:
>>> P(Eq(Str('Rainy'), Y[3]), Eq(Y[1], Str('Cloudy'))).round(2)
0.36
Symbol state names can also be used:
>>> sunny, cloudy, rainy = symbols('Sunny, Cloudy, Rainy')
>>> Y = DiscreteMarkovChain("Y", [sunny, cloudy, rainy], T)
>>> P(Eq(Y[3], rainy), Eq(Y[1], cloudy)).round(2)
0.36
Expectations will be calculated as follows:
>>> E(Y[3], Eq(Y[1], cloudy))
0.38*Cloudy + 0.36*Rainy + 0.26*Sunny
Probability of expressions with multiple RandomIndexedSymbols
can also be calculated provided there is only 1 RandomIndexedSymbol
in the given condition. It is always better to use Rational instead
of floating point numbers for the probabilities in the
transition matrix to avoid errors.
>>> from sympy import Gt, Le, Rational
>>> T = Matrix([[Rational(5, 10), Rational(3, 10), Rational(2, 10)], [Rational(2, 10), Rational(7, 10), Rational(1, 10)], [Rational(3, 10), Rational(3, 10), Rational(4, 10)]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> P(Eq(Y[3], Y[1]), Eq(Y[0], 0)).round(3)
0.409
>>> P(Gt(Y[3], Y[1]), Eq(Y[0], 0)).round(2)
0.36
>>> P(Le(Y[15], Y[10]), Eq(Y[8], 2)).round(7)
0.6963328
There is limited support for arbitrarily sized states:
>>> n = symbols('n', nonnegative=True, integer=True)
>>> T = MatrixSymbol('T', n, n)
>>> Y = DiscreteMarkovChain("Y", trans_probs=T)
>>> Y.state_space
Range(0, n, 1)
References
==========
.. [1] https://en.wikipedia.org/wiki/Markov_chain#Discrete-time_Markov_chain
.. [2] https://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
"""
index_set = S.Naturals0
def __new__(cls, sym, state_space=None, trans_probs=None):
# type: (Basic, tUnion[str, Symbol], tSequence, tUnion[MatrixBase, MatrixSymbol]) -> DiscreteMarkovChain
sym = _symbol_converter(sym)
state_space, trans_probs = MarkovProcess._sanity_checks(state_space, trans_probs)
obj = Basic.__new__(cls, sym, state_space, trans_probs)
indices = dict()
if isinstance(obj.number_of_states, Integer):
for index, state in enumerate(obj._state_index):
indices[state] = index
obj.index_of = indices
return obj
@property
def transition_probabilities(self) -> tUnion[MatrixBase, MatrixSymbol]:
"""
Transition probabilities of discrete Markov chain,
either an instance of Matrix or MatrixSymbol.
"""
return self.args[2]
def _transient2transient(self):
"""
Computes the one step probabilities of transient
states to transient states. Used in finding
fundamental matrix, absorbing probabilities.
"""
trans_probs = self.transition_probabilities
if not isinstance(trans_probs, ImmutableMatrix):
return None
m = trans_probs.shape[0]
trans_states = [i for i in range(m) if trans_probs[i, i] != 1]
t2t = [[trans_probs[si, sj] for sj in trans_states] for si in trans_states]
return ImmutableMatrix(t2t)
def _transient2absorbing(self):
"""
Computes the one step probabilities of transient
states to absorbing states. Used in finding
fundamental matrix, absorbing probabilities.
"""
trans_probs = self.transition_probabilities
if not isinstance(trans_probs, ImmutableMatrix):
return None
m, trans_states, absorb_states = \
trans_probs.shape[0], [], []
for i in range(m):
if trans_probs[i, i] == 1:
absorb_states.append(i)
else:
trans_states.append(i)
if not absorb_states or not trans_states:
return None
t2a = [[trans_probs[si, sj] for sj in absorb_states]
for si in trans_states]
return ImmutableMatrix(t2a)
def communication_classes(self) -> tList[tTuple[tList[Basic], Boolean, Integer]]:
"""
Returns the list of communication classes that partition
the states of the markov chain.
A communication class is defined to be a set of states
such that every state in that set is reachable from
every other state in that set. Due to its properties
this forms a class in the mathematical sense.
Communication classes are also known as recurrence
classes.
Returns
=======
classes
The ``classes`` are a list of tuples. Each
tuple represents a single communication class
with its properties. The first element in the
tuple is the list of states in the class, the
second element is whether the class is recurrent
and the third element is the period of the
communication class.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix
>>> T = Matrix([[0, 1, 0],
... [1, 0, 0],
... [1, 0, 0]])
>>> X = DiscreteMarkovChain('X', [1, 2, 3], T)
>>> classes = X.communication_classes()
>>> for states, is_recurrent, period in classes:
... states, is_recurrent, period
([1, 2], True, 2)
([3], False, 1)
From this we can see that states ``1`` and ``2``
communicate, are recurrent and have a period
of 2. We can also see state ``3`` is transient
with a period of 1.
Notes
=====
The algorithm used is of order ``O(n**2)`` where
``n`` is the number of states in the markov chain.
It uses Tarjan's algorithm to find the classes
themselves and then it uses a breadth-first search
algorithm to find each class's periodicity.
Most of the algorithm's components approach ``O(n)``
as the matrix becomes more and more sparse.
References
==========
.. [1] http://www.columbia.edu/~ww2040/4701Sum07/4701-06-Notes-MCII.pdf
.. [2] http://cecas.clemson.edu/~shierd/Shier/markov.pdf
.. [3] https://ujcontent.uj.ac.za/vital/access/services/Download/uj:7506/CONTENT1
.. [4] https://www.mathworks.com/help/econ/dtmc.classify.html
"""
n = self.number_of_states
T = self.transition_probabilities
if isinstance(T, MatrixSymbol):
raise NotImplementedError("Cannot perform the operation with a symbolic matrix.")
# begin Tarjan's algorithm
V = Range(n)
# don't use state names. Rather use state
# indexes since we use them for matrix
# indexing here and later onward
E = [(i, j) for i in V for j in V if T[i, j] != 0]
classes = strongly_connected_components((V, E))
# end Tarjan's algorithm
recurrence = []
periods = []
for class_ in classes:
# begin recurrent check (similar to self._check_trans_probs())
submatrix = T[class_, class_] # get the submatrix with those states
is_recurrent = S.true
rows = submatrix.tolist()
for row in rows:
if (sum(row) - 1) != 0:
is_recurrent = S.false
break
recurrence.append(is_recurrent)
# end recurrent check
# begin breadth-first search
non_tree_edge_values = set()
visited = {class_[0]}
newly_visited = {class_[0]}
level = {class_[0]: 0}
current_level = 0
done = False # imitate a do-while loop
while not done: # runs at most len(class_) times
done = len(visited) == len(class_)
current_level += 1
# this loop and the while loop above run a combined len(class_) number of times.
# so this triple nested loop runs through each of the n states once.
for i in newly_visited:
# the loop below runs len(class_) number of times
# complexity is around about O(n * avg(len(class_)))
newly_visited = {j for j in class_ if T[i, j] != 0}
new_tree_edges = newly_visited.difference(visited)
for j in new_tree_edges:
level[j] = current_level
new_non_tree_edges = newly_visited.intersection(visited)
new_non_tree_edge_values = {level[i]-level[j]+1 for j in new_non_tree_edges}
non_tree_edge_values = non_tree_edge_values.union(new_non_tree_edge_values)
visited = visited.union(new_tree_edges)
# igcd needs at least 2 arguments
positive_ntev = {val_e for val_e in non_tree_edge_values if val_e > 0}
if len(positive_ntev) == 0:
periods.append(len(class_))
elif len(positive_ntev) == 1:
periods.append(positive_ntev.pop())
else:
periods.append(igcd(*positive_ntev))
# end breadth-first search
# convert back to the user's state names
classes = [[self._state_index[i] for i in class_] for class_ in classes]
return sympify(list(zip(classes, recurrence, periods)))
def fundamental_matrix(self):
Q = self._transient2transient()
if Q == None:
return None
I = eye(Q.shape[0])
if (I - Q).det() == 0:
raise ValueError("Fundamental matrix doesn't exists.")
return ImmutableMatrix((I - Q).inv().tolist())
def absorbing_probabilities(self):
"""
Computes the absorbing probabilities, i.e.,
the ij-th entry of the matrix denotes the
probability of Markov chain being absorbed
in state j starting from state i.
"""
R = self._transient2absorbing()
N = self.fundamental_matrix()
if R == None or N == None:
return None
return N*R
def absorbing_probabilites(self):
SymPyDeprecationWarning(
feature="absorbing_probabilites",
useinstead="absorbing_probabilities",
issue=20042,
deprecated_since_version="1.7"
).warn()
return self.absorbing_probabilities()
def is_regular(self):
tuples = self.communication_classes()
if len(tuples) == 0:
return S.false # not defined for a 0x0 matrix
classes, _, periods = list(zip(*tuples))
return And(len(classes) == 1, periods[0] == 1)
def is_ergodic(self):
tuples = self.communication_classes()
if len(tuples) == 0:
return S.false # not defined for a 0x0 matrix
classes, _, _ = list(zip(*tuples))
return S(len(classes) == 1)
def is_absorbing_state(self, state):
trans_probs = self.transition_probabilities
if isinstance(trans_probs, ImmutableMatrix) and \
state < trans_probs.shape[0]:
return S(trans_probs[state, state]) is S.One
def is_absorbing_chain(self):
states, A, B, C = self.decompose()
r = A.shape[0]
return And(r > 0, A == Identity(r).as_explicit())
def stationary_distribution(self, condition_set=False) -> tUnion[ImmutableMatrix, ConditionSet, Lambda]:
"""
The stationary distribution is any row vector, p, that solves p = pP,
is row stochastic and each element in p must be nonnegative.
That means in matrix form: :math:`(P-I)^T p^T = 0` and
:math:`(1, ..., 1) p = 1`
where ``P`` is the one-step transition matrix.
All time-homogeneous Markov Chains with a finite state space
have at least one stationary distribution. In addition, if
a finite time-homogeneous Markov Chain is irreducible, the
stationary distribution is unique.
Parameters
==========
condition_set : bool
If the chain has a symbolic size or transition matrix,
it will return a ``Lambda`` if ``False`` and return a
``ConditionSet`` if ``True``.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix, S
An irreducible Markov Chain
>>> T = Matrix([[S(1)/2, S(1)/2, 0],
... [S(4)/5, S(1)/5, 0],
... [1, 0, 0]])
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> X.stationary_distribution()
Matrix([[8/13, 5/13, 0]])
A reducible Markov Chain
>>> T = Matrix([[S(1)/2, S(1)/2, 0],
... [S(4)/5, S(1)/5, 0],
... [0, 0, 1]])
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> X.stationary_distribution()
Matrix([[8/13 - 8*tau0/13, 5/13 - 5*tau0/13, tau0]])
>>> Y = DiscreteMarkovChain('Y')
>>> Y.stationary_distribution()
Lambda((wm, _T), Eq(wm*_T, wm))
>>> Y.stationary_distribution(condition_set=True)
ConditionSet(wm, Eq(wm*_T, wm))
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_2_6_stationary_and_limiting_distributions.php
.. [2] https://galton.uchicago.edu/~yibi/teaching/stat317/2014/Lectures/Lecture4_6up.pdf
See Also
========
sympy.stats.stochastic_process_types.DiscreteMarkovChain.limiting_distribution
"""
trans_probs = self.transition_probabilities
n = self.number_of_states
if n == 0:
return ImmutableMatrix(Matrix([[]]))
# symbolic matrix version
if isinstance(trans_probs, MatrixSymbol):
wm = MatrixSymbol('wm', 1, n)
if condition_set:
return ConditionSet(wm, Eq(wm * trans_probs, wm))
else:
return Lambda((wm, trans_probs), Eq(wm * trans_probs, wm))
# numeric matrix version
a = Matrix(trans_probs - Identity(n)).T
a[0, 0:n] = ones(1, n)
b = zeros(n, 1)
b[0, 0] = 1
soln = list(linsolve((a, b)))[0]
return ImmutableMatrix([[sol for sol in soln]])
def fixed_row_vector(self):
"""
A wrapper for ``stationary_distribution()``.
"""
return self.stationary_distribution()
@property
def limiting_distribution(self):
"""
The fixed row vector is the limiting
distribution of a discrete Markov chain.
"""
return self.fixed_row_vector()
def decompose(self) -> tTuple[tList[Basic], ImmutableMatrix, ImmutableMatrix, ImmutableMatrix]:
"""
Decomposes the transition matrix into submatrices with
special properties.
The transition matrix can be decomposed into 4 submatrices:
- A - the submatrix from recurrent states to recurrent states.
- B - the submatrix from transient to recurrent states.
- C - the submatrix from transient to transient states.
- O - the submatrix of zeros for recurrent to transient states.
Returns
=======
states, A, B, C
``states`` - a list of state names with the first being
the recurrent states and the last being
the transient states in the order
of the row names of A and then the row names of C.
``A`` - the submatrix from recurrent states to recurrent states.
``B`` - the submatrix from transient to recurrent states.
``C`` - the submatrix from transient to transient states.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix, S
One can decompose this chain for example:
>>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0],
... [S(2)/5, S(1)/5, S(2)/5, 0, 0],
... [0, 0, 1, 0, 0],
... [0, 0, S(1)/2, S(1)/2, 0],
... [S(1)/2, 0, 0, 0, S(1)/2]])
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> states, A, B, C = X.decompose()
>>> states
[2, 0, 1, 3, 4]
>>> A # recurrent to recurrent
Matrix([[1]])
>>> B # transient to recurrent
Matrix([
[ 0],
[2/5],
[1/2],
[ 0]])
>>> C # transient to transient
Matrix([
[1/2, 1/2, 0, 0],
[2/5, 1/5, 0, 0],
[ 0, 0, 1/2, 0],
[1/2, 0, 0, 1/2]])
This means that state 2 is the only absorbing state
(since A is a 1x1 matrix). B is a 4x1 matrix since
the 4 remaining transient states all merge into reccurent
state 2. And C is the 4x4 matrix that shows how the
transient states 0, 1, 3, 4 all interact.
See Also
========
sympy.stats.stochastic_process_types.DiscreteMarkovChain.communication_classes
sympy.stats.stochastic_process_types.DiscreteMarkovChain.canonical_form
References
==========
.. [1] https://en.wikipedia.org/wiki/Absorbing_Markov_chain
.. [2] http://people.brandeis.edu/~igusa/Math56aS08/Math56a_S08_notes015.pdf
"""
trans_probs = self.transition_probabilities
classes = self.communication_classes()
r_states = []
t_states = []
for states, recurrent, period in classes:
if recurrent:
r_states += states
else:
t_states += states
states = r_states + t_states
indexes = [self.index_of[state] for state in states]
A = Matrix(len(r_states), len(r_states),
lambda i, j: trans_probs[indexes[i], indexes[j]])
B = Matrix(len(t_states), len(r_states),
lambda i, j: trans_probs[indexes[len(r_states) + i], indexes[j]])
C = Matrix(len(t_states), len(t_states),
lambda i, j: trans_probs[indexes[len(r_states) + i], indexes[len(r_states) + j]])
return states, A.as_immutable(), B.as_immutable(), C.as_immutable()
def canonical_form(self) -> tTuple[tList[Basic], ImmutableMatrix]:
"""
Reorders the one-step transition matrix
so that recurrent states appear first and transient
states appear last. Other representations include inserting
transient states first and recurrent states last.
Returns
=======
states, P_new
``states`` is the list that describes the order of the
new states in the matrix
so that the ith element in ``states`` is the state of the
ith row of A.
``P_new`` is the new transition matrix in canonical form.
Examples
========
>>> from sympy.stats import DiscreteMarkovChain
>>> from sympy import Matrix, S
You can convert your chain into canonical form:
>>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0],
... [S(2)/5, S(1)/5, S(2)/5, 0, 0],
... [0, 0, 1, 0, 0],
... [0, 0, S(1)/2, S(1)/2, 0],
... [S(1)/2, 0, 0, 0, S(1)/2]])
>>> X = DiscreteMarkovChain('X', list(range(1, 6)), trans_probs=T)
>>> states, new_matrix = X.canonical_form()
>>> states
[3, 1, 2, 4, 5]
>>> new_matrix
Matrix([
[ 1, 0, 0, 0, 0],
[ 0, 1/2, 1/2, 0, 0],
[2/5, 2/5, 1/5, 0, 0],
[1/2, 0, 0, 1/2, 0],
[ 0, 1/2, 0, 0, 1/2]])
The new states are [3, 1, 2, 4, 5] and you can
create a new chain with this and its canonical
form will remain the same (since it is already
in canonical form).
>>> X = DiscreteMarkovChain('X', states, new_matrix)
>>> states, new_matrix = X.canonical_form()
>>> states
[3, 1, 2, 4, 5]
>>> new_matrix
Matrix([
[ 1, 0, 0, 0, 0],
[ 0, 1/2, 1/2, 0, 0],
[2/5, 2/5, 1/5, 0, 0],
[1/2, 0, 0, 1/2, 0],
[ 0, 1/2, 0, 0, 1/2]])
This is not limited to absorbing chains:
>>> T = Matrix([[0, 5, 5, 0, 0],
... [0, 0, 0, 10, 0],
... [5, 0, 5, 0, 0],
... [0, 10, 0, 0, 0],
... [0, 3, 0, 3, 4]])/10
>>> X = DiscreteMarkovChain('X', trans_probs=T)
>>> states, new_matrix = X.canonical_form()
>>> states
[1, 3, 0, 2, 4]
>>> new_matrix
Matrix([
[ 0, 1, 0, 0, 0],
[ 1, 0, 0, 0, 0],
[ 1/2, 0, 0, 1/2, 0],
[ 0, 0, 1/2, 1/2, 0],
[3/10, 3/10, 0, 0, 2/5]])
See Also
========
sympy.stats.stochastic_process_types.DiscreteMarkovChain.communication_classes
sympy.stats.stochastic_process_types.DiscreteMarkovChain.decompose
References
==========
.. [1] https://onlinelibrary.wiley.com/doi/pdf/10.1002/9780470316887.app1
.. [2] http://www.columbia.edu/~ww2040/6711F12/lect1023big.pdf
"""
states, A, B, C = self.decompose()
O = zeros(A.shape[0], C.shape[1])
return states, BlockMatrix([[A, O], [B, C]]).as_explicit()
def sample(self):
"""
Returns
=======
sample: iterator object
iterator object containing the sample
"""
if not isinstance(self.transition_probabilities, (Matrix, ImmutableMatrix)):
raise ValueError("Transition Matrix must be provided for sampling")
Tlist = self.transition_probabilities.tolist()
samps = [random.choice(list(self.state_space))]
yield samps[0]
time = 1
densities = {}
for state in self.state_space:
states = list(self.state_space)
densities[state] = {states[i]: Tlist[state][i]
for i in range(len(states))}
while time < S.Infinity:
samps.append(next(sample_iter(FiniteRV("_", densities[samps[time - 1]]))))
yield samps[time]
time += 1
class ContinuousMarkovChain(ContinuousTimeStochasticProcess, MarkovProcess):
"""
Represents continuous time Markov chain.
Parameters
==========
sym: Symbol/str
state_space: Set
Optional, by default, S.Reals
gen_mat: Matrix/ImmutableMatrix/MatrixSymbol
Optional, by default, None
Examples
========
>>> from sympy.stats import ContinuousMarkovChain
>>> from sympy import Matrix, S
>>> G = Matrix([[-S(1), S(1)], [S(1), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1], gen_mat=G)
>>> C.limiting_distribution()
Matrix([[1/2, 1/2]])
References
==========
.. [1] https://en.wikipedia.org/wiki/Markov_chain#Continuous-time_Markov_chain
.. [2] http://u.math.biu.ac.il/~amirgi/CTMCnotes.pdf
"""
index_set = S.Reals
def __new__(cls, sym, state_space=None, gen_mat=None):
sym = _symbol_converter(sym)
state_space, gen_mat = MarkovProcess._sanity_checks(state_space, gen_mat)
obj = Basic.__new__(cls, sym, state_space, gen_mat)
indices = dict()
if isinstance(obj.number_of_states, Integer):
for index, state in enumerate(obj.state_space):
indices[state] = index
obj.index_of = indices
return obj
@property
def generator_matrix(self):
return self.args[2]
@cacheit
def transition_probabilities(self, gen_mat=None):
t = Dummy('t')
if isinstance(gen_mat, (Matrix, ImmutableMatrix)) and \
gen_mat.is_diagonalizable():
# for faster computation use diagonalized generator matrix
Q, D = gen_mat.diagonalize()
return Lambda(t, Q*exp(t*D)*Q.inv())
if gen_mat != None:
return Lambda(t, exp(t*gen_mat))
def limiting_distribution(self):
gen_mat = self.generator_matrix
if gen_mat == None:
return None
if isinstance(gen_mat, MatrixSymbol):
wm = MatrixSymbol('wm', 1, gen_mat.shape[0])
return Lambda((wm, gen_mat), Eq(wm*gen_mat, wm))
w = IndexedBase('w')
wi = [w[i] for i in range(gen_mat.shape[0])]
wm = Matrix([wi])
eqs = (wm*gen_mat).tolist()[0]
eqs.append(sum(wi) - 1)
soln = list(linsolve(eqs, wi))[0]
return ImmutableMatrix([[sol for sol in soln]])
class BernoulliProcess(DiscreteTimeStochasticProcess):
"""
The Bernoulli process consists of repeated
independent Bernoulli process trials with the same parameter `p`.
It's assumed that the probability `p` applies to every
trial and that the outcomes of each trial
are independent of all the rest. Therefore Bernoulli Processs
is Discrete State and Discrete Time Stochastic Process.
Parameters
==========
sym: Symbol/str
success: Integer/str
The event which is considered to be success, by default is 1.
failure: Integer/str
The event which is considered to be failure, by default is 0.
p: Real Number between 0 and 1
Represents the probability of getting success.
Examples
========
>>> from sympy.stats import BernoulliProcess, P, E
>>> from sympy import Eq, Gt
>>> B = BernoulliProcess("B", p=0.7, success=1, failure=0)
>>> B.state_space
FiniteSet(0, 1)
>>> (B.p).round(2)
0.70
>>> B.success
1
>>> B.failure
0
>>> X = B[1] + B[2] + B[3]
>>> P(Eq(X, 0)).round(2)
0.03
>>> P(Eq(X, 2)).round(2)
0.44
>>> P(Eq(X, 4)).round(2)
0
>>> P(Gt(X, 1)).round(2)
0.78
>>> P(Eq(B[1], 0) & Eq(B[2], 1) & Eq(B[3], 0) & Eq(B[4], 1)).round(2)
0.04
>>> B.joint_distribution(B[1], B[2])
JointDistributionHandmade(Lambda((B[1], B[2]), Piecewise((0.7, Eq(B[1], 1)),
(0.3, Eq(B[1], 0)), (0, True))*Piecewise((0.7, Eq(B[2], 1)), (0.3, Eq(B[2], 0)),
(0, True))))
>>> E(2*B[1] + B[2]).round(2)
2.10
>>> P(B[1] < 1).round(2)
0.30
References
==========
.. [1] https://en.wikipedia.org/wiki/Bernoulli_process
.. [2] https://mathcs.clarku.edu/~djoyce/ma217/bernoulli.pdf
"""
index_set = S.Naturals0
def __new__(cls, sym, p, success=1, failure=0):
_value_check(p >= 0 and p <= 1, 'Value of p must be between 0 and 1.')
sym = _symbol_converter(sym)
p = _sympify(p)
success = _sym_sympify(success)
failure = _sym_sympify(failure)
return Basic.__new__(cls, sym, p, success, failure)
@property
def symbol(self):
return self.args[0]
@property
def p(self):
return self.args[1]
@property
def success(self):
return self.args[2]
@property
def failure(self):
return self.args[3]
@property
def state_space(self):
return _set_converter([self.success, self.failure])
@property
def distribution(self):
return BernoulliDistribution(self.p)
def simple_rv(self, rv):
return Bernoulli(rv.name, p=self.p,
succ=self.success, fail=self.failure)
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Computes expectation.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation of the RandomIndexedSymbol.
"""
return _SubstituteRV._expectation(expr, condition, evaluate, **kwargs)
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Computes probability.
Parameters
==========
condition: Relational
Condition for which probability has to be computed. Must
contain a RandomIndexedSymbol of the process.
given_condition: Relational/And
The given conditions under which computations should be done.
Returns
=======
Probability of the condition.
"""
return _SubstituteRV._probability(condition, given_condition, evaluate, **kwargs)
def density(self, x):
return Piecewise((self.p, Eq(x, self.success)),
(1 - self.p, Eq(x, self.failure)),
(S.Zero, True))
class _SubstituteRV:
"""
Internal class to handle the queries of expectation and probability
by substitution.
"""
@staticmethod
def _rvindexed_subs(expr, condition=None):
"""
Substitutes the RandomIndexedSymbol with the RandomSymbol with
same name, distribution and probability as RandomIndexedSymbol.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
"""
rvs_expr = random_symbols(expr)
if len(rvs_expr) != 0:
swapdict_expr = {}
for rv in rvs_expr:
if isinstance(rv, RandomIndexedSymbol):
newrv = rv.pspace.process.simple_rv(rv) # substitute with equivalent simple rv
swapdict_expr[rv] = newrv
expr = expr.subs(swapdict_expr)
rvs_cond = random_symbols(condition)
if len(rvs_cond)!=0:
swapdict_cond = {}
for rv in rvs_cond:
if isinstance(rv, RandomIndexedSymbol):
newrv = rv.pspace.process.simple_rv(rv)
swapdict_cond[rv] = newrv
condition = condition.subs(swapdict_cond)
return expr, condition
@classmethod
def _expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Internal method for computing expectation of indexed RV.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation of the RandomIndexedSymbol.
"""
new_expr, new_condition = self._rvindexed_subs(expr, condition)
if not is_random(new_expr):
return new_expr
new_pspace = pspace(new_expr)
if new_condition is not None:
new_expr = given(new_expr, new_condition)
if new_expr.is_Add: # As E is Linear
return Add(*[new_pspace.compute_expectation(
expr=arg, evaluate=evaluate, **kwargs)
for arg in new_expr.args])
return new_pspace.compute_expectation(
new_expr, evaluate=evaluate, **kwargs)
@classmethod
def _probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Internal method for computing probability of indexed RV
Parameters
==========
condition: Relational
Condition for which probability has to be computed. Must
contain a RandomIndexedSymbol of the process.
given_condition: Relational/And
The given conditions under which computations should be done.
Returns
=======
Probability of the condition.
"""
new_condition, new_givencondition = self._rvindexed_subs(condition, given_condition)
if isinstance(new_givencondition, RandomSymbol):
condrv = random_symbols(new_condition)
if len(condrv) == 1 and condrv[0] == new_givencondition:
return BernoulliDistribution(self._probability(new_condition), 0, 1)
if any([dependent(rv, new_givencondition) for rv in condrv]):
return Probability(new_condition, new_givencondition)
else:
return self._probability(new_condition)
if new_givencondition is not None and \
not isinstance(new_givencondition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (new_givencondition))
if new_givencondition == False or new_condition == False:
return S.Zero
if new_condition == True:
return S.One
if not isinstance(new_condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (new_condition))
if new_givencondition is not None: # If there is a condition
# Recompute on new conditional expr
return self._probability(given(new_condition, new_givencondition, **kwargs), **kwargs)
result = pspace(new_condition).probability(new_condition, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def get_timerv_swaps(expr, condition):
"""
Finds the appropriate interval for each time stamp in expr by parsing
the given condition and returns intervals for each timestamp and
dictionary that maps variable time-stamped Random Indexed Symbol to its
corresponding Random Indexed variable with fixed time stamp.
Parameters
==========
expr: Sympy Expression
Expression containing Random Indexed Symbols with variable time stamps
condition: Relational/Boolean Expression
Expression containing time bounds of variable time stamps in expr
Examples
========
>>> from sympy.stats.stochastic_process_types import get_timerv_swaps, PoissonProcess
>>> from sympy import symbols, Contains, Interval
>>> x, t, d = symbols('x t d', positive=True)
>>> X = PoissonProcess("X", 3)
>>> get_timerv_swaps(x*X(t), Contains(t, Interval.Lopen(0, 1)))
([Interval.Lopen(0, 1)], {X(t): X(1)})
>>> get_timerv_swaps((X(t)**2 + X(d)**2), Contains(t, Interval.Lopen(0, 1))
... & Contains(d, Interval.Ropen(1, 4))) # doctest: +SKIP
([Interval.Ropen(1, 4), Interval.Lopen(0, 1)], {X(d): X(3), X(t): X(1)})
Returns
=======
intervals: list
List of Intervals/FiniteSet on which each time stamp is defined
rv_swap: dict
Dictionary mapping variable time Random Indexed Symbol to constant time
Random Indexed Variable
"""
if not isinstance(condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (condition))
expr_syms = list(expr.atoms(RandomIndexedSymbol))
if isinstance(condition, (And, Or)):
given_cond_args = condition.args
else: # single condition
given_cond_args = (condition, )
rv_swap = {}
intervals = []
for expr_sym in expr_syms:
for arg in given_cond_args:
if arg.has(expr_sym.key) and isinstance(expr_sym.key, Symbol):
intv = _set_converter(arg.args[1])
diff_key = intv._sup - intv._inf
if diff_key == oo:
raise ValueError("%s should have finite bounds" % str(expr_sym.name))
elif diff_key == S.Zero: # has singleton set
diff_key = intv._sup
rv_swap[expr_sym] = expr_sym.subs({expr_sym.key: diff_key})
intervals.append(intv)
return intervals, rv_swap
class CountingProcess(ContinuousTimeStochasticProcess):
"""
This class handles the common methods of the Counting Processes
such as Poisson, Wiener and Gamma Processes
"""
index_set = _set_converter(Interval(0, oo))
@property
def symbol(self):
return self.args[0]
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Computes expectation
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Boolean
The given conditions under which computations should be done, i.e,
the intervals on which each variable time stamp in expr is defined
Returns
=======
Expectation of the given expr
"""
if condition is not None:
intervals, rv_swap = get_timerv_swaps(expr, condition)
# they are independent when they have non-overlapping intervals
if len(intervals) == 1 or all(Intersection(*intv_comb) == EmptySet
for intv_comb in itertools.combinations(intervals, 2)):
if expr.is_Add:
return Add.fromiter(self.expectation(arg, condition)
for arg in expr.args)
expr = expr.subs(rv_swap)
else:
return Expectation(expr, condition)
return _SubstituteRV._expectation(expr, evaluate=evaluate, **kwargs)
def _solve_argwith_tworvs(self, arg):
if arg.args[0].key >= arg.args[1].key or isinstance(arg, Eq):
diff_key = abs(arg.args[0].key - arg.args[1].key)
rv = arg.args[0]
arg = arg.__class__(rv.pspace.process(diff_key), 0)
else:
diff_key = arg.args[1].key - arg.args[0].key
rv = arg.args[1]
arg = arg.__class__(rv.pspace.process(diff_key), 0)
return arg
def _solve_numerical(self, condition, given_condition=None):
if isinstance(condition, And):
args_list = list(condition.args)
else:
args_list = [condition]
if given_condition is not None:
if isinstance(given_condition, And):
args_list.extend(list(given_condition.args))
else:
args_list.extend([given_condition])
# sort the args based on timestamp to get the independent increments in
# each segment using all the condition args as well as given_condition args
args_list = sorted(args_list, key=lambda x: x.args[0].key)
result = []
cond_args = list(condition.args) if isinstance(condition, And) else [condition]
if args_list[0] in cond_args and not (is_random(args_list[0].args[0])
and is_random(args_list[0].args[1])):
result.append(_SubstituteRV._probability(args_list[0]))
if is_random(args_list[0].args[0]) and is_random(args_list[0].args[1]):
arg = self._solve_argwith_tworvs(args_list[0])
result.append(_SubstituteRV._probability(arg))
for i in range(len(args_list) - 1):
curr, nex = args_list[i], args_list[i + 1]
diff_key = nex.args[0].key - curr.args[0].key
working_set = curr.args[0].pspace.process.state_space
if curr.args[1] > nex.args[1]: #impossible condition so return 0
result.append(0)
break
if isinstance(curr, Eq):
working_set = Intersection(working_set, Interval.Lopen(curr.args[1], oo))
else:
working_set = Intersection(working_set, curr.as_set())
if isinstance(nex, Eq):
working_set = Intersection(working_set, Interval(-oo, nex.args[1]))
else:
working_set = Intersection(working_set, nex.as_set())
if working_set == EmptySet:
rv = Eq(curr.args[0].pspace.process(diff_key), 0)
result.append(_SubstituteRV._probability(rv))
else:
if working_set.is_finite_set:
if isinstance(curr, Eq) and isinstance(nex, Eq):
rv = Eq(curr.args[0].pspace.process(diff_key), len(working_set))
result.append(_SubstituteRV._probability(rv))
elif isinstance(curr, Eq) ^ isinstance(nex, Eq):
result.append(Add.fromiter(_SubstituteRV._probability(Eq(
curr.args[0].pspace.process(diff_key), x))
for x in range(len(working_set))))
else:
n = len(working_set)
result.append(Add.fromiter((n - x)*_SubstituteRV._probability(Eq(
curr.args[0].pspace.process(diff_key), x)) for x in range(n)))
else:
result.append(_SubstituteRV._probability(
curr.args[0].pspace.process(diff_key) <= working_set._sup - working_set._inf))
return Mul.fromiter(result)
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Computes probability
Parameters
==========
condition: Relational
Condition for which probability has to be computed. Must
contain a RandomIndexedSymbol of the process.
given_condition: Relational, Boolean
The given conditions under which computations should be done, i.e,
the intervals on which each variable time stamp in expr is defined
Returns
=======
Probability of the condition
"""
check_numeric = True
if isinstance(condition, (And, Or)):
cond_args = condition.args
else:
cond_args = (condition, )
# check that condition args are numeric or not
if not all(arg.args[0].key.is_number for arg in cond_args):
check_numeric = False
if given_condition is not None:
check_given_numeric = True
if isinstance(given_condition, (And, Or)):
given_cond_args = given_condition.args
else:
given_cond_args = (given_condition, )
# check that given condition args are numeric or not
if given_condition.has(Contains):
check_given_numeric = False
# Handle numerical queries
if check_numeric and check_given_numeric:
res = []
if isinstance(condition, Or):
res.append(Add.fromiter(self._solve_numerical(arg, given_condition)
for arg in condition.args))
if isinstance(given_condition, Or):
res.append(Add.fromiter(self._solve_numerical(condition, arg)
for arg in given_condition.args))
if res:
return Add.fromiter(res)
return self._solve_numerical(condition, given_condition)
# No numeric queries, go by Contains?... then check that all the
# given condition are in form of `Contains`
if not all(arg.has(Contains) for arg in given_cond_args):
raise ValueError("If given condition is passed with `Contains`, then "
"please pass the evaluated condition with its corresponding information "
"in terms of intervals of each time stamp to be passed in given condition.")
intervals, rv_swap = get_timerv_swaps(condition, given_condition)
# they are independent when they have non-overlapping intervals
if len(intervals) == 1 or all(Intersection(*intv_comb) == EmptySet
for intv_comb in itertools.combinations(intervals, 2)):
if isinstance(condition, And):
return Mul.fromiter(self.probability(arg, given_condition)
for arg in condition.args)
elif isinstance(condition, Or):
return Add.fromiter(self.probability(arg, given_condition)
for arg in condition.args)
condition = condition.subs(rv_swap)
else:
return Probability(condition, given_condition)
if check_numeric:
return self._solve_numerical(condition)
return _SubstituteRV._probability(condition, evaluate=evaluate, **kwargs)
class PoissonProcess(CountingProcess):
"""
The Poisson process is a counting process. It is usually used in scenarios
where we are counting the occurrences of certain events that appear
to happen at a certain rate, but completely at random.
Parameters
==========
sym: Symbol/str
lamda: Positive number
Rate of the process, ``lamda > 0``
Examples
========
>>> from sympy.stats import PoissonProcess, P, E
>>> from sympy import symbols, Eq, Ne, Contains, Interval
>>> X = PoissonProcess("X", lamda=3)
>>> X.state_space
Naturals0
>>> X.lamda
3
>>> t1, t2 = symbols('t1 t2', positive=True)
>>> P(X(t1) < 4)
(9*t1**3/2 + 9*t1**2/2 + 3*t1 + 1)*exp(-3*t1)
>>> P(Eq(X(t1), 2) | Ne(X(t1), 4), Contains(t1, Interval.Ropen(2, 4)))
1 - 36*exp(-6)
>>> P(Eq(X(t1), 2) & Eq(X(t2), 3), Contains(t1, Interval.Lopen(0, 2))
... & Contains(t2, Interval.Lopen(2, 4)))
648*exp(-12)
>>> E(X(t1))
3*t1
>>> E(X(t1)**2 + 2*X(t2), Contains(t1, Interval.Lopen(0, 1))
... & Contains(t2, Interval.Lopen(1, 2)))
18
>>> P(X(3) < 1, Eq(X(1), 0))
exp(-6)
>>> P(Eq(X(4), 3), Eq(X(2), 3))
exp(-6)
>>> P(X(2) <= 3, X(1) > 1)
5*exp(-3)
Merging two Poisson Processes
>>> Y = PoissonProcess("Y", lamda=4)
>>> Z = X + Y
>>> Z.lamda
7
Splitting a Poisson Process into two independent Poisson Processes
>>> N, M = Z.split(l1=2, l2=5)
>>> N.lamda, M.lamda
(2, 5)
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_0_0_intro.php
.. [2] https://en.wikipedia.org/wiki/Poisson_point_process
"""
def __new__(cls, sym, lamda):
_value_check(lamda > 0, 'lamda should be a positive number.')
sym = _symbol_converter(sym)
lamda = _sympify(lamda)
return Basic.__new__(cls, sym, lamda)
@property
def lamda(self):
return self.args[1]
@property
def state_space(self):
return S.Naturals0
def distribution(self, rv):
return PoissonDistribution(self.lamda*rv.key)
def density(self, x):
return (self.lamda*x.key)**x / factorial(x) * exp(-(self.lamda*x.key))
def simple_rv(self, rv):
return Poisson(rv.name, lamda=self.lamda*rv.key)
def __add__(self, other):
if not isinstance(other, PoissonProcess):
raise ValueError("Only instances of Poisson Process can be merged")
return PoissonProcess(Dummy(self.symbol.name + other.symbol.name),
self.lamda + other.lamda)
def split(self, l1, l2):
if _sympify(l1 + l2) != self.lamda:
raise ValueError("Sum of l1 and l2 should be %s" % str(self.lamda))
return PoissonProcess(Dummy("l1"), l1), PoissonProcess(Dummy("l2"), l2)
class WienerProcess(CountingProcess):
"""
The Wiener process is a real valued continuous-time stochastic process.
In physics it is used to study Brownian motion and therefore also known as
Brownian Motion.
Parameters
==========
sym: Symbol/str
Examples
========
>>> from sympy.stats import WienerProcess, P, E
>>> from sympy import symbols, Contains, Interval
>>> X = WienerProcess("X")
>>> X.state_space
Reals
>>> t1, t2 = symbols('t1 t2', positive=True)
>>> P(X(t1) < 7).simplify()
erf(7*sqrt(2)/(2*sqrt(t1)))/2 + 1/2
>>> P((X(t1) > 2) | (X(t1) < 4), Contains(t1, Interval.Ropen(2, 4))).simplify()
-erf(1)/2 + erf(2)/2 + 1
>>> E(X(t1))
0
>>> E(X(t1) + 2*X(t2), Contains(t1, Interval.Lopen(0, 1))
... & Contains(t2, Interval.Lopen(1, 2)))
0
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_4_0_brownian_motion_wiener_process.php
.. [2] https://en.wikipedia.org/wiki/Wiener_process
"""
def __new__(cls, sym):
sym = _symbol_converter(sym)
return Basic.__new__(cls, sym)
@property
def state_space(self):
return S.Reals
def distribution(self, rv):
return NormalDistribution(0, sqrt(rv.key))
def density(self, x):
return exp(-x**2/(2*x.key)) / (sqrt(2*pi)*sqrt(x.key))
def simple_rv(self, rv):
return Normal(rv.name, 0, sqrt(rv.key))
class GammaProcess(CountingProcess):
"""
A Gamma process is a random process with independent gamma distributed
increments. It is a pure-jump increasing Levy process.
Parameters
==========
sym: Symbol/str
lamda: Positive number
Jump size of the process, ``lamda > 0``
gamma: Positive number
Rate of jump arrivals, ``gamma > 0``
Examples
========
>>> from sympy.stats import GammaProcess, E, P, variance
>>> from sympy import symbols, Contains, Interval, Not
>>> t, d, x, l, g = symbols('t d x l g', positive=True)
>>> X = GammaProcess("X", l, g)
>>> E(X(t))
g*t/l
>>> variance(X(t)).simplify()
g*t/l**2
>>> X = GammaProcess('X', 1, 2)
>>> P(X(t) < 1).simplify()
lowergamma(2*t, 1)/gamma(2*t)
>>> P(Not((X(t) < 5) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) &
... Contains(d, Interval.Lopen(7, 8))).simplify()
-4*exp(-3) + 472*exp(-8)/3 + 1
>>> E(X(2) + x*E(X(5)))
10*x + 4
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_process
"""
def __new__(cls, sym, lamda, gamma):
_value_check(lamda > 0, 'lamda should be a positive number')
_value_check(gamma > 0, 'gamma should be a positive number')
sym = _symbol_converter(sym)
gamma = _sympify(gamma)
lamda = _sympify(lamda)
return Basic.__new__(cls, sym, lamda, gamma)
@property
def lamda(self):
return self.args[1]
@property
def gamma(self):
return self.args[2]
@property
def state_space(self):
return _set_converter(Interval(0, oo))
def distribution(self, rv):
return GammaDistribution(self.gamma*rv.key, 1/self.lamda)
def density(self, x):
k = self.gamma*x.key
theta = 1/self.lamda
return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k)
def simple_rv(self, rv):
return Gamma(rv.name, self.gamma*rv.key, 1/self.lamda)
|
11006d95d054685811a86705e3bedec66da29b1dd21a490cd39c1962ce6d08bf | from sympy import Basic
from sympy.stats.rv import PSpace, _symbol_converter, RandomMatrixSymbol
class RandomMatrixPSpace(PSpace):
"""
Represents probability space for
random matrices. It contains the mechanics
for handling the API calls for random matrices.
"""
def __new__(cls, sym, model=None):
sym = _symbol_converter(sym)
return Basic.__new__(cls, sym, model)
model = property(lambda self: self.args[1])
def compute_density(self, expr, *args):
rms = expr.atoms(RandomMatrixSymbol)
if len(rms) > 2 or (not isinstance(expr, RandomMatrixSymbol)):
raise NotImplementedError("Currently, no algorithm has been "
"implemented to handle general expressions containing "
"multiple random matrices.")
return self.model.density(expr)
|
a75326df053687003ca2dad0e6d5a64c94ffd90f58329f227a4b8a82bf185057 | """Tools for arithmetic error propagation."""
from itertools import repeat, combinations
from sympy import S, Symbol, Add, Mul, simplify, Pow, exp
from sympy.stats.symbolic_probability import RandomSymbol, Variance, Covariance
from sympy.stats.rv import is_random
_arg0_or_var = lambda var: var.args[0] if len(var.args) > 0 else var
def variance_prop(expr, consts=(), include_covar=False):
r"""Symbolically propagates variance (`\sigma^2`) for expressions.
This is computed as as seen in [1]_.
Parameters
==========
expr : Expr
A sympy expression to compute the variance for.
consts : sequence of Symbols, optional
Represents symbols that are known constants in the expr,
and thus have zero variance. All symbols not in consts are
assumed to be variant.
include_covar : bool, optional
Flag for whether or not to include covariances, default=False.
Returns
=======
var_expr : Expr
An expression for the total variance of the expr.
The variance for the original symbols (e.g. x) are represented
via instance of the Variance symbol (e.g. Variance(x)).
Examples
========
>>> from sympy import symbols, exp
>>> from sympy.stats.error_prop import variance_prop
>>> x, y = symbols('x y')
>>> variance_prop(x + y)
Variance(x) + Variance(y)
>>> variance_prop(x * y)
x**2*Variance(y) + y**2*Variance(x)
>>> variance_prop(exp(2*x))
4*exp(4*x)*Variance(x)
References
==========
.. [1] https://en.wikipedia.org/wiki/Propagation_of_uncertainty
"""
args = expr.args
if len(args) == 0:
if expr in consts:
return S.Zero
elif is_random(expr):
return Variance(expr).doit()
elif isinstance(expr, Symbol):
return Variance(RandomSymbol(expr)).doit()
else:
return S.Zero
nargs = len(args)
var_args = list(map(variance_prop, args, repeat(consts, nargs),
repeat(include_covar, nargs)))
if isinstance(expr, Add):
var_expr = Add(*var_args)
if include_covar:
terms = [2 * Covariance(_arg0_or_var(x), _arg0_or_var(y)).expand() \
for x, y in combinations(var_args, 2)]
var_expr += Add(*terms)
elif isinstance(expr, Mul):
terms = [v/a**2 for a, v in zip(args, var_args)]
var_expr = simplify(expr**2 * Add(*terms))
if include_covar:
terms = [2*Covariance(_arg0_or_var(x), _arg0_or_var(y)).expand()/(a*b) \
for (a, b), (x, y) in zip(combinations(args, 2),
combinations(var_args, 2))]
var_expr += Add(*terms)
elif isinstance(expr, Pow):
b = args[1]
v = var_args[0] * (expr * b / args[0])**2
var_expr = simplify(v)
elif isinstance(expr, exp):
var_expr = simplify(var_args[0] * expr**2)
else:
# unknown how to proceed, return variance of whole expr.
var_expr = Variance(expr)
return var_expr
|
c2a7db439ebd009d6dcf09cedeb15172aad81627f4f11279bb73b229188d4eb0 | """
Contains
========
Geometric
Hermite
Logarithmic
NegativeBinomial
Poisson
Skellam
YuleSimon
Zeta
"""
from sympy import (Basic, factorial, exp, S, sympify, I, zeta, polylog, log, beta,
hyper, binomial, Piecewise, floor, besseli, sqrt, Sum, Dummy,
Lambda, Eq)
from sympy.stats.drv import SingleDiscreteDistribution, SingleDiscretePSpace
from sympy.stats.rv import _value_check, is_random
__all__ = ['Geometric',
'Hermite',
'Logarithmic',
'NegativeBinomial',
'Poisson',
'Skellam',
'YuleSimon',
'Zeta'
]
def rv(symbol, cls, *args, **kwargs):
args = list(map(sympify, args))
dist = cls(*args)
if kwargs.pop('check', True):
dist.check(*args)
pspace = SingleDiscretePSpace(symbol, dist)
if any(is_random(arg) for arg in args):
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
pspace = CompoundPSpace(symbol, CompoundDistribution(dist))
return pspace.value
class DiscreteDistributionHandmade(SingleDiscreteDistribution):
_argnames = ('pdf',)
def __new__(cls, pdf, set=S.Integers):
return Basic.__new__(cls, pdf, set)
@property
def set(self):
return self.args[1]
@staticmethod
def check(pdf, set):
x = Dummy('x')
val = Sum(pdf(x), (x, set._inf, set._sup)).doit()
_value_check(Eq(val, 1) != S.false, "The pdf is incorrect on the given set.")
def DiscreteRV(symbol, density, set=S.Integers, **kwargs):
"""
Create a Discrete Random Variable given the following:
Parameters
==========
symbol : Symbol
Represents name of the random variable.
density : Expression containing symbol
Represents probability density function.
set : set
Represents the region where the pdf is valid, by default is real line.
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Examples
========
>>> from sympy.stats import DiscreteRV, P, E
>>> from sympy import Rational, Symbol
>>> x = Symbol('x')
>>> n = 10
>>> density = Rational(1, 10)
>>> X = DiscreteRV(x, density, set=set(range(n)))
>>> E(X)
9/2
>>> P(X>3)
3/5
Returns
=======
RandomSymbol
"""
set = sympify(set)
pdf = Piecewise((density, set.as_relational(symbol)), (0, True))
pdf = Lambda(symbol, pdf)
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(symbol.name, DiscreteDistributionHandmade, pdf, set, **kwargs)
#-------------------------------------------------------------------------------
# Geometric distribution ------------------------------------------------------------
class GeometricDistribution(SingleDiscreteDistribution):
_argnames = ('p',)
set = S.Naturals
@staticmethod
def check(p):
_value_check((0 < p, p <= 1), "p must be between 0 and 1")
def pdf(self, k):
return (1 - self.p)**(k - 1) * self.p
def _characteristic_function(self, t):
p = self.p
return p * exp(I*t) / (1 - (1 - p)*exp(I*t))
def _moment_generating_function(self, t):
p = self.p
return p * exp(t) / (1 - (1 - p) * exp(t))
def Geometric(name, p):
r"""
Create a discrete random variable with a Geometric distribution.
The density of the Geometric distribution is given by
.. math::
f(k) := p (1 - p)^{k - 1}
Parameters
==========
p: A probability between 0 and 1
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Geometric, density, E, variance
>>> from sympy import Symbol, S
>>> p = S.One / 5
>>> z = Symbol("z")
>>> X = Geometric("x", p)
>>> density(X)(z)
(4/5)**(z - 1)/5
>>> E(X)
5
>>> variance(X)
20
References
==========
.. [1] https://en.wikipedia.org/wiki/Geometric_distribution
.. [2] http://mathworld.wolfram.com/GeometricDistribution.html
"""
return rv(name, GeometricDistribution, p)
#-------------------------------------------------------------------------------
# Hermite distribution ---------------------------------------------------------
class HermiteDistribution(SingleDiscreteDistribution):
_argnames = ('a1', 'a2')
set = S.Naturals0
@staticmethod
def check(a1, a2):
_value_check(a1.is_nonnegative, 'Parameter a1 must be >= 0.')
_value_check(a2.is_nonnegative, 'Parameter a2 must be >= 0.')
def pdf(self, k):
a1, a2 = self.a1, self.a2
term1 = exp(-(a1 + a2))
j = Dummy("j", integer=True)
num = a1**(k - 2*j) * a2**j
den = factorial(k - 2*j) * factorial(j)
return term1 * Sum(num/den, (j, 0, k//2)).doit()
def _moment_generating_function(self, t):
a1, a2 = self.a1, self.a2
term1 = a1 * (exp(t) - 1)
term2 = a2 * (exp(2*t) - 1)
return exp(term1 + term2)
def _characteristic_function(self, t):
a1, a2 = self.a1, self.a2
term1 = a1 * (exp(I*t) - 1)
term2 = a2 * (exp(2*I*t) - 1)
return exp(term1 + term2)
def Hermite(name, a1, a2):
r"""
Create a discrete random variable with a Hermite distribution.
The density of the Hermite distribution is given by
.. math::
f(x):= e^{-a_1 -a_2}\sum_{j=0}^{\left \lfloor x/2 \right \rfloor}
\frac{a_{1}^{x-2j}a_{2}^{j}}{(x-2j)!j!}
Parameters
==========
a1: A Positive number greater than equal to 0.
a2: A Positive number greater than equal to 0.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Hermite, density, E, variance
>>> from sympy import Symbol
>>> a1 = Symbol("a1", positive=True)
>>> a2 = Symbol("a2", positive=True)
>>> x = Symbol("x")
>>> H = Hermite("H", a1=5, a2=4)
>>> density(H)(2)
33*exp(-9)/2
>>> E(H)
13
>>> variance(H)
21
References
==========
.. [1] https://en.wikipedia.org/wiki/Hermite_distribution
"""
return rv(name, HermiteDistribution, a1, a2)
#-------------------------------------------------------------------------------
# Logarithmic distribution ------------------------------------------------------------
class LogarithmicDistribution(SingleDiscreteDistribution):
_argnames = ('p',)
set = S.Naturals
@staticmethod
def check(p):
_value_check((p > 0, p < 1), "p should be between 0 and 1")
def pdf(self, k):
p = self.p
return (-1) * p**k / (k * log(1 - p))
def _characteristic_function(self, t):
p = self.p
return log(1 - p * exp(I*t)) / log(1 - p)
def _moment_generating_function(self, t):
p = self.p
return log(1 - p * exp(t)) / log(1 - p)
def Logarithmic(name, p):
r"""
Create a discrete random variable with a Logarithmic distribution.
The density of the Logarithmic distribution is given by
.. math::
f(k) := \frac{-p^k}{k \ln{(1 - p)}}
Parameters
==========
p: A value between 0 and 1
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Logarithmic, density, E, variance
>>> from sympy import Symbol, S
>>> p = S.One / 5
>>> z = Symbol("z")
>>> X = Logarithmic("x", p)
>>> density(X)(z)
-5**(-z)/(z*log(4/5))
>>> E(X)
-1/(-4*log(5) + 8*log(2))
>>> variance(X)
-1/((-4*log(5) + 8*log(2))*(-2*log(5) + 4*log(2))) + 1/(-64*log(2)*log(5) + 64*log(2)**2 + 16*log(5)**2) - 10/(-32*log(5) + 64*log(2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Logarithmic_distribution
.. [2] http://mathworld.wolfram.com/LogarithmicDistribution.html
"""
return rv(name, LogarithmicDistribution, p)
#-------------------------------------------------------------------------------
# Negative binomial distribution ------------------------------------------------------------
class NegativeBinomialDistribution(SingleDiscreteDistribution):
_argnames = ('r', 'p')
set = S.Naturals0
@staticmethod
def check(r, p):
_value_check(r > 0, 'r should be positive')
_value_check((p > 0, p < 1), 'p should be between 0 and 1')
def pdf(self, k):
r = self.r
p = self.p
return binomial(k + r - 1, k) * (1 - p)**r * p**k
def _characteristic_function(self, t):
r = self.r
p = self.p
return ((1 - p) / (1 - p * exp(I*t)))**r
def _moment_generating_function(self, t):
r = self.r
p = self.p
return ((1 - p) / (1 - p * exp(t)))**r
def NegativeBinomial(name, r, p):
r"""
Create a discrete random variable with a Negative Binomial distribution.
The density of the Negative Binomial distribution is given by
.. math::
f(k) := \binom{k + r - 1}{k} (1 - p)^r p^k
Parameters
==========
r: A positive value
p: A value between 0 and 1
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import NegativeBinomial, density, E, variance
>>> from sympy import Symbol, S
>>> r = 5
>>> p = S.One / 5
>>> z = Symbol("z")
>>> X = NegativeBinomial("x", r, p)
>>> density(X)(z)
1024*5**(-z)*binomial(z + 4, z)/3125
>>> E(X)
5/4
>>> variance(X)
25/16
References
==========
.. [1] https://en.wikipedia.org/wiki/Negative_binomial_distribution
.. [2] http://mathworld.wolfram.com/NegativeBinomialDistribution.html
"""
return rv(name, NegativeBinomialDistribution, r, p)
#-------------------------------------------------------------------------------
# Poisson distribution ------------------------------------------------------------
class PoissonDistribution(SingleDiscreteDistribution):
_argnames = ('lamda',)
set = S.Naturals0
@staticmethod
def check(lamda):
_value_check(lamda > 0, "Lambda must be positive")
def pdf(self, k):
return self.lamda**k / factorial(k) * exp(-self.lamda)
def _characteristic_function(self, t):
return exp(self.lamda * (exp(I*t) - 1))
def _moment_generating_function(self, t):
return exp(self.lamda * (exp(t) - 1))
def Poisson(name, lamda):
r"""
Create a discrete random variable with a Poisson distribution.
The density of the Poisson distribution is given by
.. math::
f(k) := \frac{\lambda^{k} e^{- \lambda}}{k!}
Parameters
==========
lamda: Positive number, a rate
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Poisson, density, E, variance
>>> from sympy import Symbol, simplify
>>> rate = Symbol("lambda", positive=True)
>>> z = Symbol("z")
>>> X = Poisson("x", rate)
>>> density(X)(z)
lambda**z*exp(-lambda)/factorial(z)
>>> E(X)
lambda
>>> simplify(variance(X))
lambda
References
==========
.. [1] https://en.wikipedia.org/wiki/Poisson_distribution
.. [2] http://mathworld.wolfram.com/PoissonDistribution.html
"""
return rv(name, PoissonDistribution, lamda)
# -----------------------------------------------------------------------------
# Skellam distribution --------------------------------------------------------
class SkellamDistribution(SingleDiscreteDistribution):
_argnames = ('mu1', 'mu2')
set = S.Integers
@staticmethod
def check(mu1, mu2):
_value_check(mu1 >= 0, 'Parameter mu1 must be >= 0')
_value_check(mu2 >= 0, 'Parameter mu2 must be >= 0')
def pdf(self, k):
(mu1, mu2) = (self.mu1, self.mu2)
term1 = exp(-(mu1 + mu2)) * (mu1 / mu2) ** (k / 2)
term2 = besseli(k, 2 * sqrt(mu1 * mu2))
return term1 * term2
def _cdf(self, x):
raise NotImplementedError(
"Skellam doesn't have closed form for the CDF.")
def _characteristic_function(self, t):
(mu1, mu2) = (self.mu1, self.mu2)
return exp(-(mu1 + mu2) + mu1 * exp(I * t) + mu2 * exp(-I * t))
def _moment_generating_function(self, t):
(mu1, mu2) = (self.mu1, self.mu2)
return exp(-(mu1 + mu2) + mu1 * exp(t) + mu2 * exp(-t))
def Skellam(name, mu1, mu2):
r"""
Create a discrete random variable with a Skellam distribution.
The Skellam is the distribution of the difference N1 - N2
of two statistically independent random variables N1 and N2
each Poisson-distributed with respective expected values mu1 and mu2.
The density of the Skellam distribution is given by
.. math::
f(k) := e^{-(\mu_1+\mu_2)}(\frac{\mu_1}{\mu_2})^{k/2}I_k(2\sqrt{\mu_1\mu_2})
Parameters
==========
mu1: A non-negative value
mu2: A non-negative value
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Skellam, density, E, variance
>>> from sympy import Symbol, pprint
>>> z = Symbol("z", integer=True)
>>> mu1 = Symbol("mu1", positive=True)
>>> mu2 = Symbol("mu2", positive=True)
>>> X = Skellam("x", mu1, mu2)
>>> pprint(density(X)(z), use_unicode=False)
z
-
2
/mu1\ -mu1 - mu2 / _____ _____\
|---| *e *besseli\z, 2*\/ mu1 *\/ mu2 /
\mu2/
>>> E(X)
mu1 - mu2
>>> variance(X).expand()
mu1 + mu2
References
==========
.. [1] https://en.wikipedia.org/wiki/Skellam_distribution
"""
return rv(name, SkellamDistribution, mu1, mu2)
#-------------------------------------------------------------------------------
# Yule-Simon distribution ------------------------------------------------------------
class YuleSimonDistribution(SingleDiscreteDistribution):
_argnames = ('rho',)
set = S.Naturals
@staticmethod
def check(rho):
_value_check(rho > 0, 'rho should be positive')
def pdf(self, k):
rho = self.rho
return rho * beta(k, rho + 1)
def _cdf(self, x):
return Piecewise((1 - floor(x) * beta(floor(x), self.rho + 1), x >= 1), (0, True))
def _characteristic_function(self, t):
rho = self.rho
return rho * hyper((1, 1), (rho + 2,), exp(I*t)) * exp(I*t) / (rho + 1)
def _moment_generating_function(self, t):
rho = self.rho
return rho * hyper((1, 1), (rho + 2,), exp(t)) * exp(t) / (rho + 1)
def YuleSimon(name, rho):
r"""
Create a discrete random variable with a Yule-Simon distribution.
The density of the Yule-Simon distribution is given by
.. math::
f(k) := \rho B(k, \rho + 1)
Parameters
==========
rho: A positive value
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import YuleSimon, density, E, variance
>>> from sympy import Symbol, simplify
>>> p = 5
>>> z = Symbol("z")
>>> X = YuleSimon("x", p)
>>> density(X)(z)
5*beta(z, 6)
>>> simplify(E(X))
5/4
>>> simplify(variance(X))
25/48
References
==========
.. [1] https://en.wikipedia.org/wiki/Yule%E2%80%93Simon_distribution
"""
return rv(name, YuleSimonDistribution, rho)
#-------------------------------------------------------------------------------
# Zeta distribution ------------------------------------------------------------
class ZetaDistribution(SingleDiscreteDistribution):
_argnames = ('s',)
set = S.Naturals
@staticmethod
def check(s):
_value_check(s > 1, 's should be greater than 1')
def pdf(self, k):
s = self.s
return 1 / (k**s * zeta(s))
def _characteristic_function(self, t):
return polylog(self.s, exp(I*t)) / zeta(self.s)
def _moment_generating_function(self, t):
return polylog(self.s, exp(t)) / zeta(self.s)
def Zeta(name, s):
r"""
Create a discrete random variable with a Zeta distribution.
The density of the Zeta distribution is given by
.. math::
f(k) := \frac{1}{k^s \zeta{(s)}}
Parameters
==========
s: A value greater than 1
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Zeta, density, E, variance
>>> from sympy import Symbol
>>> s = 5
>>> z = Symbol("z")
>>> X = Zeta("x", s)
>>> density(X)(z)
1/(z**5*zeta(5))
>>> E(X)
pi**4/(90*zeta(5))
>>> variance(X)
-pi**8/(8100*zeta(5)**2) + zeta(3)/zeta(5)
References
==========
.. [1] https://en.wikipedia.org/wiki/Zeta_distribution
"""
return rv(name, ZetaDistribution, s)
|
853eeaa953001805a1b43ff9e77c108e4bb4a60cb7c1158a239a22286ae74e09 | from sympy.sets import FiniteSet
from sympy import (sqrt, log, exp, FallingFactorial, Rational, Eq, Dummy,
piecewise_fold, solveset, Integral)
from .rv import (probability, expectation, density, where, given, pspace, cdf, PSpace,
characteristic_function, sample, sample_iter, random_symbols, independent, dependent,
sampling_density, moment_generating_function, quantile, is_random,
sample_stochastic_process)
__all__ = ['P', 'E', 'H', 'density', 'where', 'given', 'sample', 'cdf',
'characteristic_function', 'pspace', 'sample_iter', 'variance', 'std',
'skewness', 'kurtosis', 'covariance', 'dependent', 'entropy', 'median',
'independent', 'random_symbols', 'correlation', 'factorial_moment',
'moment', 'cmoment', 'sampling_density', 'moment_generating_function',
'smoment', 'quantile', 'sample_stochastic_process']
def moment(X, n, c=0, condition=None, *, evaluate=True, **kwargs):
"""
Return the nth moment of a random expression about c.
.. math::
moment(X, c, n) = E((X-c)^{n})
Default value of c is 0.
Examples
========
>>> from sympy.stats import Die, moment, E
>>> X = Die('X', 6)
>>> moment(X, 1, 6)
-5/2
>>> moment(X, 2)
91/6
>>> moment(X, 1) == E(X)
True
"""
from sympy.stats.symbolic_probability import Moment
if evaluate:
return Moment(X, n, c, condition).doit()
return Moment(X, n, c, condition).rewrite(Integral)
def variance(X, condition=None, **kwargs):
"""
Variance of a random expression
.. math::
variance(X) = E((X-E(X))^{2})
Examples
========
>>> from sympy.stats import Die, Bernoulli, variance
>>> from sympy import simplify, Symbol
>>> X = Die('X', 6)
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> variance(2*X)
35/3
>>> simplify(variance(B))
p*(1 - p)
"""
if is_random(X) and pspace(X) == PSpace():
from sympy.stats.symbolic_probability import Variance
return Variance(X, condition)
return cmoment(X, 2, condition, **kwargs)
def standard_deviation(X, condition=None, **kwargs):
r"""
Standard Deviation of a random expression
.. math::
std(X) = \sqrt(E((X-E(X))^{2}))
Examples
========
>>> from sympy.stats import Bernoulli, std
>>> from sympy import Symbol, simplify
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> simplify(std(B))
sqrt(p*(1 - p))
"""
return sqrt(variance(X, condition, **kwargs))
std = standard_deviation
def entropy(expr, condition=None, **kwargs):
"""
Calculuates entropy of a probability distribution
Parameters
==========
expression : the random expression whose entropy is to be calculated
condition : optional, to specify conditions on random expression
b: base of the logarithm, optional
By default, it is taken as Euler's number
Returns
=======
result : Entropy of the expression, a constant
Examples
========
>>> from sympy.stats import Normal, Die, entropy
>>> X = Normal('X', 0, 1)
>>> entropy(X)
log(2)/2 + 1/2 + log(pi)/2
>>> D = Die('D', 4)
>>> entropy(D)
log(4)
References
==========
.. [1] https://en.wikipedia.org/wiki/Entropy_(information_theory)
.. [2] https://www.crmarsh.com/static/pdf/Charles_Marsh_Continuous_Entropy.pdf
.. [3] http://www.math.uconn.edu/~kconrad/blurbs/analysis/entropypost.pdf
"""
pdf = density(expr, condition, **kwargs)
base = kwargs.get('b', exp(1))
if hasattr(pdf, 'dict'):
return sum([-prob*log(prob, base) for prob in pdf.dict.values()])
return expectation(-log(pdf(expr), base))
def covariance(X, Y, condition=None, **kwargs):
"""
Covariance of two random expressions
The expectation that the two variables will rise and fall together
.. math::
covariance(X,Y) = E((X-E(X)) (Y-E(Y)))
Examples
========
>>> from sympy.stats import Exponential, covariance
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> covariance(X, X)
lambda**(-2)
>>> covariance(X, Y)
0
>>> covariance(X, Y + rate*X)
1/lambda
"""
if (is_random(X) and pspace(X) == PSpace()) or (is_random(Y) and pspace(Y) == PSpace()):
from sympy.stats.symbolic_probability import Covariance
return Covariance(X, Y, condition)
return expectation(
(X - expectation(X, condition, **kwargs)) *
(Y - expectation(Y, condition, **kwargs)),
condition, **kwargs)
def correlation(X, Y, condition=None, **kwargs):
r"""
Correlation of two random expressions, also known as correlation
coefficient or Pearson's correlation
The normalized expectation that the two variables will rise
and fall together
.. math::
correlation(X,Y) = E((X-E(X))(Y-E(Y)) / (\sigma_x \sigma_y))
Examples
========
>>> from sympy.stats import Exponential, correlation
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> correlation(X, X)
1
>>> correlation(X, Y)
0
>>> correlation(X, Y + rate*X)
1/sqrt(1 + lambda**(-2))
"""
return covariance(X, Y, condition, **kwargs)/(std(X, condition, **kwargs)
* std(Y, condition, **kwargs))
def cmoment(X, n, condition=None, *, evaluate=True, **kwargs):
"""
Return the nth central moment of a random expression about its mean.
.. math::
cmoment(X, n) = E((X - E(X))^{n})
Examples
========
>>> from sympy.stats import Die, cmoment, variance
>>> X = Die('X', 6)
>>> cmoment(X, 3)
0
>>> cmoment(X, 2)
35/12
>>> cmoment(X, 2) == variance(X)
True
"""
from sympy.stats.symbolic_probability import CentralMoment
if evaluate:
return CentralMoment(X, n, condition).doit()
return CentralMoment(X, n, condition).rewrite(Integral)
def smoment(X, n, condition=None, **kwargs):
r"""
Return the nth Standardized moment of a random expression.
.. math::
smoment(X, n) = E(((X - \mu)/\sigma_X)^{n})
Examples
========
>>> from sympy.stats import skewness, Exponential, smoment
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> Y = Exponential('Y', rate)
>>> smoment(Y, 4)
9
>>> smoment(Y, 4) == smoment(3*Y, 4)
True
>>> smoment(Y, 3) == skewness(Y)
True
"""
sigma = std(X, condition, **kwargs)
return (1/sigma)**n*cmoment(X, n, condition, **kwargs)
def skewness(X, condition=None, **kwargs):
r"""
Measure of the asymmetry of the probability distribution.
Positive skew indicates that most of the values lie to the right of
the mean.
.. math::
skewness(X) = E(((X - E(X))/\sigma_X)^{3})
Parameters
==========
condition : Expr containing RandomSymbols
A conditional expression. skewness(X, X>0) is skewness of X given X > 0
Examples
========
>>> from sympy.stats import skewness, Exponential, Normal
>>> from sympy import Symbol
>>> X = Normal('X', 0, 1)
>>> skewness(X)
0
>>> skewness(X, X > 0) # find skewness given X > 0
(-sqrt(2)/sqrt(pi) + 4*sqrt(2)/pi**(3/2))/(1 - 2/pi)**(3/2)
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> Y = Exponential('Y', rate)
>>> skewness(Y)
2
"""
return smoment(X, 3, condition=condition, **kwargs)
def kurtosis(X, condition=None, **kwargs):
r"""
Characterizes the tails/outliers of a probability distribution.
Kurtosis of any univariate normal distribution is 3. Kurtosis less than
3 means that the distribution produces fewer and less extreme outliers
than the normal distribution.
.. math::
kurtosis(X) = E(((X - E(X))/\sigma_X)^{4})
Parameters
==========
condition : Expr containing RandomSymbols
A conditional expression. kurtosis(X, X>0) is kurtosis of X given X > 0
Examples
========
>>> from sympy.stats import kurtosis, Exponential, Normal
>>> from sympy import Symbol
>>> X = Normal('X', 0, 1)
>>> kurtosis(X)
3
>>> kurtosis(X, X > 0) # find kurtosis given X > 0
(-4/pi - 12/pi**2 + 3)/(1 - 2/pi)**2
>>> rate = Symbol('lamda', positive=True, real=True, finite=True)
>>> Y = Exponential('Y', rate)
>>> kurtosis(Y)
9
References
==========
.. [1] https://en.wikipedia.org/wiki/Kurtosis
.. [2] http://mathworld.wolfram.com/Kurtosis.html
"""
return smoment(X, 4, condition=condition, **kwargs)
def factorial_moment(X, n, condition=None, **kwargs):
"""
The factorial moment is a mathematical quantity defined as the expectation
or average of the falling factorial of a random variable.
.. math::
factorial-moment(X, n) = E(X(X - 1)(X - 2)...(X - n + 1))
Parameters
==========
n: A natural number, n-th factorial moment.
condition : Expr containing RandomSymbols
A conditional expression.
Examples
========
>>> from sympy.stats import factorial_moment, Poisson, Binomial
>>> from sympy import Symbol, S
>>> lamda = Symbol('lamda')
>>> X = Poisson('X', lamda)
>>> factorial_moment(X, 2)
lamda**2
>>> Y = Binomial('Y', 2, S.Half)
>>> factorial_moment(Y, 2)
1/2
>>> factorial_moment(Y, 2, Y > 1) # find factorial moment for Y > 1
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Factorial_moment
.. [2] http://mathworld.wolfram.com/FactorialMoment.html
"""
return expectation(FallingFactorial(X, n), condition=condition, **kwargs)
def median(X, evaluate=True, **kwargs):
r"""
Calculuates the median of the probability distribution.
Mathematically, median of Probability distribution is defined as all those
values of `m` for which the following condition is satisfied
.. math::
P(X\leq m) \geq \frac{1}{2} \text{ and} \text{ } P(X\geq m)\geq \frac{1}{2}
Parameters
==========
X: The random expression whose median is to be calculated.
Returns
=======
The FiniteSet or an Interval which contains the median of the
random expression.
Examples
========
>>> from sympy.stats import Normal, Die, median
>>> N = Normal('N', 3, 1)
>>> median(N)
FiniteSet(3)
>>> D = Die('D')
>>> median(D)
FiniteSet(3, 4)
References
==========
.. [1] https://en.wikipedia.org/wiki/Median#Probability_distributions
"""
from sympy.stats.crv import ContinuousPSpace
from sympy.stats.drv import DiscretePSpace
from sympy.stats.frv import FinitePSpace
if isinstance(pspace(X), FinitePSpace):
cdf = pspace(X).compute_cdf(X)
result = []
for key, value in cdf.items():
if value>= Rational(1, 2) and (1 - value) + \
pspace(X).probability(Eq(X, key)) >= Rational(1, 2):
result.append(key)
return FiniteSet(*result)
if isinstance(pspace(X), ContinuousPSpace) or isinstance(pspace(X), DiscretePSpace):
cdf = pspace(X).compute_cdf(X)
x = Dummy('x')
result = solveset(piecewise_fold(cdf(x) - Rational(1, 2)), x, pspace(X).set)
return result
raise NotImplementedError("The median of %s is not implemeted."%str(pspace(X)))
def coskewness(X, Y, Z, condition=None, **kwargs):
r"""
Calculates the co-skewness of three random variables.
Mathematically Coskewness is defined as
.. math::
coskewness(X,Y,Z)=\frac{E[(X-E[X]) * (Y-E[Y]) * (Z-E[Z])]} {\sigma_{X}\sigma_{Y}\sigma_{Z}}
Parameters
==========
X : RandomSymbol
Random Variable used to calculate coskewness
Y : RandomSymbol
Random Variable used to calculate coskewness
Z : RandomSymbol
Random Variable used to calculate coskewness
condition : Expr containing RandomSymbols
A conditional expression
Examples
========
>>> from sympy.stats import coskewness, Exponential, skewness
>>> from sympy import symbols
>>> p = symbols('p', positive=True)
>>> X = Exponential('X', p)
>>> Y = Exponential('Y', 2*p)
>>> coskewness(X, Y, Y)
0
>>> coskewness(X, Y + X, Y + 2*X)
16*sqrt(85)/85
>>> coskewness(X + 2*Y, Y + X, Y + 2*X, X > 3)
9*sqrt(170)/85
>>> coskewness(Y, Y, Y) == skewness(Y)
True
>>> coskewness(X, Y + p*X, Y + 2*p*X)
4/(sqrt(1 + 1/(4*p**2))*sqrt(4 + 1/(4*p**2)))
Returns
=======
coskewness : The coskewness of the three random variables
References
==========
.. [1] https://en.wikipedia.org/wiki/Coskewness
"""
num = expectation((X - expectation(X, condition, **kwargs)) \
* (Y - expectation(Y, condition, **kwargs)) \
* (Z - expectation(Z, condition, **kwargs)), condition, **kwargs)
den = std(X, condition, **kwargs) * std(Y, condition, **kwargs) \
* std(Z, condition, **kwargs)
return num/den
P = probability
E = expectation
H = entropy
|
0b11e6a3403228ec07bd8d08ede68446f5662cc9868519ae065f4ea404df35ad | """
Main Random Variables Module
Defines abstract random variable type.
Contains interfaces for probability space object (PSpace) as well as standard
operators, P, E, sample, density, where, quantile
See Also
========
sympy.stats.crv
sympy.stats.frv
sympy.stats.rv_interface
"""
from functools import singledispatch
from typing import Tuple as tTuple
from sympy import (Basic, S, Expr, Symbol, Tuple, And, Add, Eq, lambdify, Or,
Equality, Lambda, sympify, Dummy, Ne, KroneckerDelta,
DiracDelta, Mul, Indexed, MatrixSymbol, Function)
from sympy.core.relational import Relational
from sympy.core.sympify import _sympify
from sympy.sets.sets import FiniteSet, ProductSet, Intersection
from sympy.solvers.solveset import solveset
from sympy.external import import_module
from sympy.utilities.misc import filldedent
import warnings
x = Symbol('x')
@singledispatch
def is_random(x):
return False
@is_random.register(Basic)
def _(x):
atoms = x.free_symbols
return any([is_random(i) for i in atoms])
class RandomDomain(Basic):
"""
Represents a set of variables and the values which they can take
See Also
========
sympy.stats.crv.ContinuousDomain
sympy.stats.frv.FiniteDomain
"""
is_ProductDomain = False
is_Finite = False
is_Continuous = False
is_Discrete = False
def __new__(cls, symbols, *args):
symbols = FiniteSet(*symbols)
return Basic.__new__(cls, symbols, *args)
@property
def symbols(self):
return self.args[0]
@property
def set(self):
return self.args[1]
def __contains__(self, other):
raise NotImplementedError()
def compute_expectation(self, expr):
raise NotImplementedError()
class SingleDomain(RandomDomain):
"""
A single variable and its domain
See Also
========
sympy.stats.crv.SingleContinuousDomain
sympy.stats.frv.SingleFiniteDomain
"""
def __new__(cls, symbol, set):
assert symbol.is_Symbol
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
def __contains__(self, other):
if len(other) != 1:
return False
sym, val = tuple(other)[0]
return self.symbol == sym and val in self.set
class MatrixDomain(RandomDomain):
"""
A Random Matrix variable and its domain
"""
def __new__(cls, symbol, set):
symbol, set = _symbol_converter(symbol), _sympify(set)
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
class ConditionalDomain(RandomDomain):
"""
A RandomDomain with an attached condition
See Also
========
sympy.stats.crv.ConditionalContinuousDomain
sympy.stats.frv.ConditionalFiniteDomain
"""
def __new__(cls, fulldomain, condition):
condition = condition.xreplace({rs: rs.symbol
for rs in random_symbols(condition)})
return Basic.__new__(cls, fulldomain, condition)
@property
def symbols(self):
return self.fulldomain.symbols
@property
def fulldomain(self):
return self.args[0]
@property
def condition(self):
return self.args[1]
@property
def set(self):
raise NotImplementedError("Set of Conditional Domain not Implemented")
def as_boolean(self):
return And(self.fulldomain.as_boolean(), self.condition)
class PSpace(Basic):
"""
A Probability Space
Probability Spaces encode processes that equal different values
probabilistically. These underly Random Symbols which occur in SymPy
expressions and contain the mechanics to evaluate statistical statements.
See Also
========
sympy.stats.crv.ContinuousPSpace
sympy.stats.frv.FinitePSpace
"""
is_Finite = None # type: bool
is_Continuous = None # type: bool
is_Discrete = None # type: bool
is_real = None # type: bool
@property
def domain(self):
return self.args[0]
@property
def density(self):
return self.args[1]
@property
def values(self):
return frozenset(RandomSymbol(sym, self) for sym in self.symbols)
@property
def symbols(self):
return self.domain.symbols
def where(self, condition):
raise NotImplementedError()
def compute_density(self, expr):
raise NotImplementedError()
def sample(self):
raise NotImplementedError()
def probability(self, condition):
raise NotImplementedError()
def compute_expectation(self, expr):
raise NotImplementedError()
class SinglePSpace(PSpace):
"""
Represents the probabilities of a set of random events that can be
attributed to a single variable/symbol.
"""
def __new__(cls, s, distribution):
s = _symbol_converter(s)
return Basic.__new__(cls, s, distribution)
@property
def value(self):
return RandomSymbol(self.symbol, self)
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[1]
@property
def pdf(self):
return self.distribution.pdf(self.symbol)
class RandomSymbol(Expr):
"""
Random Symbols represent ProbabilitySpaces in SymPy Expressions
In principle they can take on any value that their symbol can take on
within the associated PSpace with probability determined by the PSpace
Density.
Random Symbols contain pspace and symbol properties.
The pspace property points to the represented Probability Space
The symbol is a standard SymPy Symbol that is used in that probability space
for example in defining a density.
You can form normal SymPy expressions using RandomSymbols and operate on
those expressions with the Functions
E - Expectation of a random expression
P - Probability of a condition
density - Probability Density of an expression
given - A new random expression (with new random symbols) given a condition
An object of the RandomSymbol type should almost never be created by the
user. They tend to be created instead by the PSpace class's value method.
Traditionally a user doesn't even do this but instead calls one of the
convenience functions Normal, Exponential, Coin, Die, FiniteRV, etc....
"""
def __new__(cls, symbol, pspace=None):
from sympy.stats.joint_rv import JointRandomSymbol
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
symbol = _symbol_converter(symbol)
if not isinstance(pspace, PSpace):
raise TypeError("pspace variable should be of type PSpace")
if cls == JointRandomSymbol and isinstance(pspace, SinglePSpace):
cls = RandomSymbol
return Basic.__new__(cls, symbol, pspace)
is_finite = True
is_symbol = True
is_Atom = True
_diff_wrt = True
pspace = property(lambda self: self.args[1])
symbol = property(lambda self: self.args[0])
name = property(lambda self: self.symbol.name)
def _eval_is_positive(self):
return self.symbol.is_positive
def _eval_is_integer(self):
return self.symbol.is_integer
def _eval_is_real(self):
return self.symbol.is_real or self.pspace.is_real
@property
def is_commutative(self):
return self.symbol.is_commutative
@property
def free_symbols(self):
return {self}
class RandomIndexedSymbol(RandomSymbol):
def __new__(cls, idx_obj, pspace=None):
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
if not isinstance(idx_obj, (Indexed, Function)):
raise TypeError("An Function or Indexed object is expected not %s"%(idx_obj))
return Basic.__new__(cls, idx_obj, pspace)
symbol = property(lambda self: self.args[0])
name = property(lambda self: str(self.args[0]))
@property
def key(self):
if isinstance(self.symbol, Indexed):
return self.symbol.args[1]
elif isinstance(self.symbol, Function):
return self.symbol.args[0]
@property
def free_symbols(self):
if self.key.free_symbols:
free_syms = self.key.free_symbols
free_syms.add(self)
return free_syms
return {self}
class RandomMatrixSymbol(RandomSymbol, MatrixSymbol): # type: ignore
def __new__(cls, symbol, n, m, pspace=None):
n, m = _sympify(n), _sympify(m)
symbol = _symbol_converter(symbol)
if pspace is None:
# Allow single arg, representing pspace == PSpace()
pspace = PSpace()
return Basic.__new__(cls, symbol, n, m, pspace)
symbol = property(lambda self: self.args[0])
pspace = property(lambda self: self.args[3])
class ProductPSpace(PSpace):
"""
Abstract class for representing probability spaces with multiple random
variables.
See Also
========
sympy.stats.rv.IndependentProductPSpace
sympy.stats.joint_rv.JointPSpace
"""
pass
class IndependentProductPSpace(ProductPSpace):
"""
A probability space resulting from the merger of two independent probability
spaces.
Often created using the function, pspace
"""
def __new__(cls, *spaces):
rs_space_dict = {}
for space in spaces:
for value in space.values:
rs_space_dict[value] = space
symbols = FiniteSet(*[val.symbol for val in rs_space_dict.keys()])
# Overlapping symbols
from sympy.stats.joint_rv import MarginalDistribution
from sympy.stats.compound_rv import CompoundDistribution
if len(symbols) < sum(len(space.symbols) for space in spaces if not
isinstance(space.distribution, (
CompoundDistribution, MarginalDistribution))):
raise ValueError("Overlapping Random Variables")
if all(space.is_Finite for space in spaces):
from sympy.stats.frv import ProductFinitePSpace
cls = ProductFinitePSpace
obj = Basic.__new__(cls, *FiniteSet(*spaces))
return obj
@property
def pdf(self):
p = Mul(*[space.pdf for space in self.spaces])
return p.subs({rv: rv.symbol for rv in self.values})
@property
def rs_space_dict(self):
d = {}
for space in self.spaces:
for value in space.values:
d[value] = space
return d
@property
def symbols(self):
return FiniteSet(*[val.symbol for val in self.rs_space_dict.keys()])
@property
def spaces(self):
return FiniteSet(*self.args)
@property
def values(self):
return sumsets(space.values for space in self.spaces)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
rvs = rvs or self.values
rvs = frozenset(rvs)
for space in self.spaces:
expr = space.compute_expectation(expr, rvs & space.values, evaluate=False, **kwargs)
if evaluate and hasattr(expr, 'doit'):
return expr.doit(**kwargs)
return expr
@property
def domain(self):
return ProductDomain(*[space.domain for space in self.spaces])
@property
def density(self):
raise NotImplementedError("Density not available for ProductSpaces")
def sample(self, size=(), library='scipy'):
return {k: v for space in self.spaces
for k, v in space.sample(size=size, library=library).items()}
def probability(self, condition, **kwargs):
cond_inv = False
if isinstance(condition, Ne):
condition = Eq(condition.args[0], condition.args[1])
cond_inv = True
elif isinstance(condition, And): # they are independent
return Mul(*[self.probability(arg) for arg in condition.args])
elif isinstance(condition, Or): # they are independent
return Add(*[self.probability(arg) for arg in condition.args])
expr = condition.lhs - condition.rhs
rvs = random_symbols(expr)
dens = self.compute_density(expr)
if any([pspace(rv).is_Continuous for rv in rvs]):
from sympy.stats.crv import SingleContinuousPSpace
from sympy.stats.crv_types import ContinuousDistributionHandmade
if expr in self.values:
# Marginalize all other random symbols out of the density
randomsymbols = tuple(set(self.values) - frozenset([expr]))
symbols = tuple(rs.symbol for rs in randomsymbols)
pdf = self.domain.integrate(self.pdf, symbols, **kwargs)
return Lambda(expr.symbol, pdf)
dens = ContinuousDistributionHandmade(dens)
z = Dummy('z', real=True)
space = SingleContinuousPSpace(z, dens)
result = space.probability(condition.__class__(space.value, 0))
else:
from sympy.stats.drv import SingleDiscretePSpace
from sympy.stats.drv_types import DiscreteDistributionHandmade
dens = DiscreteDistributionHandmade(dens)
z = Dummy('z', integer=True)
space = SingleDiscretePSpace(z, dens)
result = space.probability(condition.__class__(space.value, 0))
return result if not cond_inv else S.One - result
def compute_density(self, expr, **kwargs):
rvs = random_symbols(expr)
if any(pspace(rv).is_Continuous for rv in rvs):
z = Dummy('z', real=True)
expr = self.compute_expectation(DiracDelta(expr - z),
**kwargs)
else:
z = Dummy('z', integer=True)
expr = self.compute_expectation(KroneckerDelta(expr, z),
**kwargs)
return Lambda(z, expr)
def compute_cdf(self, expr, **kwargs):
raise ValueError("CDF not well defined on multivariate expressions")
def conditional_space(self, condition, normalize=True, **kwargs):
rvs = random_symbols(condition)
condition = condition.xreplace({rv: rv.symbol for rv in self.values})
if any([pspace(rv).is_Continuous for rv in rvs]):
from sympy.stats.crv import (ConditionalContinuousDomain,
ContinuousPSpace)
space = ContinuousPSpace
domain = ConditionalContinuousDomain(self.domain, condition)
elif any([pspace(rv).is_Discrete for rv in rvs]):
from sympy.stats.drv import (ConditionalDiscreteDomain,
DiscretePSpace)
space = DiscretePSpace
domain = ConditionalDiscreteDomain(self.domain, condition)
elif all([pspace(rv).is_Finite for rv in rvs]):
from sympy.stats.frv import FinitePSpace
return FinitePSpace.conditional_space(self, condition)
if normalize:
replacement = {rv: Dummy(str(rv)) for rv in self.symbols}
norm = domain.compute_expectation(self.pdf, **kwargs)
pdf = self.pdf / norm.xreplace(replacement)
# XXX: Converting symbols from set to tuple. The order matters to
# Lambda though so we shouldn't be starting with a set here...
density = Lambda(tuple(domain.symbols), pdf)
return space(domain, density)
class ProductDomain(RandomDomain):
"""
A domain resulting from the merger of two independent domains
See Also
========
sympy.stats.crv.ProductContinuousDomain
sympy.stats.frv.ProductFiniteDomain
"""
is_ProductDomain = True
def __new__(cls, *domains):
# Flatten any product of products
domains2 = []
for domain in domains:
if not domain.is_ProductDomain:
domains2.append(domain)
else:
domains2.extend(domain.domains)
domains2 = FiniteSet(*domains2)
if all(domain.is_Finite for domain in domains2):
from sympy.stats.frv import ProductFiniteDomain
cls = ProductFiniteDomain
if all(domain.is_Continuous for domain in domains2):
from sympy.stats.crv import ProductContinuousDomain
cls = ProductContinuousDomain
if all(domain.is_Discrete for domain in domains2):
from sympy.stats.drv import ProductDiscreteDomain
cls = ProductDiscreteDomain
return Basic.__new__(cls, *domains2)
@property
def sym_domain_dict(self):
return {symbol: domain for domain in self.domains
for symbol in domain.symbols}
@property
def symbols(self):
return FiniteSet(*[sym for domain in self.domains
for sym in domain.symbols])
@property
def domains(self):
return self.args
@property
def set(self):
return ProductSet(*(domain.set for domain in self.domains))
def __contains__(self, other):
# Split event into each subdomain
for domain in self.domains:
# Collect the parts of this event which associate to this domain
elem = frozenset([item for item in other
if sympify(domain.symbols.contains(item[0]))
is S.true])
# Test this sub-event
if elem not in domain:
return False
# All subevents passed
return True
def as_boolean(self):
return And(*[domain.as_boolean() for domain in self.domains])
def random_symbols(expr):
"""
Returns all RandomSymbols within a SymPy Expression.
"""
atoms = getattr(expr, 'atoms', None)
if atoms is not None:
comp = lambda rv: rv.symbol.name
l = list(atoms(RandomSymbol))
return sorted(l, key=comp)
else:
return []
def pspace(expr):
"""
Returns the underlying Probability Space of a random expression.
For internal use.
Examples
========
>>> from sympy.stats import pspace, Normal
>>> X = Normal('X', 0, 1)
>>> pspace(2*X + 1) == X.pspace
True
"""
expr = sympify(expr)
if isinstance(expr, RandomSymbol) and expr.pspace is not None:
return expr.pspace
if expr.has(RandomMatrixSymbol):
rm = list(expr.atoms(RandomMatrixSymbol))[0]
return rm.pspace
rvs = random_symbols(expr)
if not rvs:
raise ValueError("Expression containing Random Variable expected, not %s" % (expr))
# If only one space present
if all(rv.pspace == rvs[0].pspace for rv in rvs):
return rvs[0].pspace
from sympy.stats.compound_rv import CompoundPSpace
for rv in rvs:
if isinstance(rv.pspace, CompoundPSpace):
return rv.pspace
# Otherwise make a product space
return IndependentProductPSpace(*[rv.pspace for rv in rvs])
def sumsets(sets):
"""
Union of sets
"""
return frozenset().union(*sets)
def rs_swap(a, b):
"""
Build a dictionary to swap RandomSymbols based on their underlying symbol.
i.e.
if ``X = ('x', pspace1)``
and ``Y = ('x', pspace2)``
then ``X`` and ``Y`` match and the key, value pair
``{X:Y}`` will appear in the result
Inputs: collections a and b of random variables which share common symbols
Output: dict mapping RVs in a to RVs in b
"""
d = {}
for rsa in a:
d[rsa] = [rsb for rsb in b if rsa.symbol == rsb.symbol][0]
return d
def given(expr, condition=None, **kwargs):
r""" Conditional Random Expression
From a random expression and a condition on that expression creates a new
probability space from the condition and returns the same expression on that
conditional probability space.
Examples
========
>>> from sympy.stats import given, density, Die
>>> X = Die('X', 6)
>>> Y = given(X, X > 3)
>>> density(Y).dict
{4: 1/3, 5: 1/3, 6: 1/3}
Following convention, if the condition is a random symbol then that symbol
is considered fixed.
>>> from sympy.stats import Normal
>>> from sympy import pprint
>>> from sympy.abc import z
>>> X = Normal('X', 0, 1)
>>> Y = Normal('Y', 0, 1)
>>> pprint(density(X + Y, Y)(z), use_unicode=False)
2
-(-Y + z)
-----------
___ 2
\/ 2 *e
------------------
____
2*\/ pi
"""
if not is_random(condition) or pspace_independent(expr, condition):
return expr
if isinstance(condition, RandomSymbol):
condition = Eq(condition, condition.symbol)
condsymbols = random_symbols(condition)
if (isinstance(condition, Equality) and len(condsymbols) == 1 and
not isinstance(pspace(expr).domain, ConditionalDomain)):
rv = tuple(condsymbols)[0]
results = solveset(condition, rv)
if isinstance(results, Intersection) and S.Reals in results.args:
results = list(results.args[1])
sums = 0
for res in results:
temp = expr.subs(rv, res)
if temp == True:
return True
if temp != False:
# XXX: This seems nonsensical but preserves existing behaviour
# after the change that Relational is no longer a subclass of
# Expr. Here expr is sometimes Relational and sometimes Expr
# but we are trying to add them with +=. This needs to be
# fixed somehow.
if sums == 0 and isinstance(expr, Relational):
sums = expr.subs(rv, res)
else:
sums += expr.subs(rv, res)
if sums == 0:
return False
return sums
# Get full probability space of both the expression and the condition
fullspace = pspace(Tuple(expr, condition))
# Build new space given the condition
space = fullspace.conditional_space(condition, **kwargs)
# Dictionary to swap out RandomSymbols in expr with new RandomSymbols
# That point to the new conditional space
swapdict = rs_swap(fullspace.values, space.values)
# Swap random variables in the expression
expr = expr.xreplace(swapdict)
return expr
def expectation(expr, condition=None, numsamples=None, evaluate=True, **kwargs):
"""
Returns the expected value of a random expression
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the expectation value
given : Expr containing RandomSymbols
A conditional expression. E(X, X>0) is expectation of X given X > 0
numsamples : int
Enables sampling and approximates the expectation with this many samples
evalf : Bool (defaults to True)
If sampling return a number rather than a complex expression
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import E, Die
>>> X = Die('X', 6)
>>> E(X)
7/2
>>> E(2*X + 1)
8
>>> E(X, X > 3) # Expectation of X given that it is above 3
5
"""
if not is_random(expr): # expr isn't random?
return expr
kwargs['numsamples'] = numsamples
from sympy.stats.symbolic_probability import Expectation
if evaluate:
return Expectation(expr, condition).doit(**kwargs)
### TODO: Remove the user warnings in the future releases
message = ("Since version 1.7, using `evaluate=False` returns `Expectation` "
"object. If you want unevaluated Integral/Sum use "
"`E(expr, condition, evaluate=False).rewrite(Integral)`")
warnings.warn(filldedent(message))
return Expectation(expr, condition)
def probability(condition, given_condition=None, numsamples=None,
evaluate=True, **kwargs):
"""
Probability that a condition is true, optionally given a second condition
Parameters
==========
condition : Combination of Relationals containing RandomSymbols
The condition of which you want to compute the probability
given_condition : Combination of Relationals containing RandomSymbols
A conditional expression. P(X > 1, X > 0) is expectation of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the probability with this many samples
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import P, Die
>>> from sympy import Eq
>>> X, Y = Die('X', 6), Die('Y', 6)
>>> P(X > 3)
1/2
>>> P(Eq(X, 5), X > 2) # Probability that X == 5 given that X > 2
1/4
>>> P(X > Y)
5/12
"""
kwargs['numsamples'] = numsamples
from sympy.stats.symbolic_probability import Probability
if evaluate:
return Probability(condition, given_condition).doit(**kwargs)
### TODO: Remove the user warnings in the future releases
message = ("Since version 1.7, using `evaluate=False` returns `Probability` "
"object. If you want unevaluated Integral/Sum use "
"`P(condition, given_condition, evaluate=False).rewrite(Integral)`")
warnings.warn(filldedent(message))
return Probability(condition, given_condition)
class Density(Basic):
expr = property(lambda self: self.args[0])
@property
def condition(self):
if len(self.args) > 1:
return self.args[1]
else:
return None
def doit(self, evaluate=True, **kwargs):
from sympy.stats.random_matrix import RandomMatrixPSpace
from sympy.stats.joint_rv import JointPSpace
from sympy.stats.matrix_distributions import MatrixPSpace
from sympy.stats.compound_rv import CompoundPSpace
from sympy.stats.frv import SingleFiniteDistribution
expr, condition = self.expr, self.condition
if isinstance(expr, SingleFiniteDistribution):
return expr.dict
if condition is not None:
# Recompute on new conditional expr
expr = given(expr, condition, **kwargs)
if not random_symbols(expr):
return Lambda(x, DiracDelta(x - expr))
if isinstance(expr, RandomSymbol):
if isinstance(expr.pspace, (SinglePSpace, JointPSpace, MatrixPSpace)) and \
hasattr(expr.pspace, 'distribution'):
return expr.pspace.distribution
elif isinstance(expr.pspace, RandomMatrixPSpace):
return expr.pspace.model
if isinstance(pspace(expr), CompoundPSpace):
kwargs['compound_evaluate'] = evaluate
result = pspace(expr).compute_density(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def density(expr, condition=None, evaluate=True, numsamples=None, **kwargs):
"""
Probability density of a random expression, optionally given a second
condition.
This density will take on different forms for different types of
probability spaces. Discrete variables produce Dicts. Continuous
variables produce Lambdas.
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the density value
condition : Relational containing RandomSymbols
A conditional expression. density(X > 1, X > 0) is density of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the density with this many samples
Examples
========
>>> from sympy.stats import density, Die, Normal
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> D = Die('D', 6)
>>> X = Normal(x, 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> density(2*D).dict
{2: 1/6, 4: 1/6, 6: 1/6, 8: 1/6, 10: 1/6, 12: 1/6}
>>> density(X)(x)
sqrt(2)*exp(-x**2/2)/(2*sqrt(pi))
"""
if numsamples:
return sampling_density(expr, condition, numsamples=numsamples,
**kwargs)
return Density(expr, condition).doit(evaluate=evaluate, **kwargs)
def cdf(expr, condition=None, evaluate=True, **kwargs):
"""
Cumulative Distribution Function of a random expression.
optionally given a second condition
This density will take on different forms for different types of
probability spaces.
Discrete variables produce Dicts.
Continuous variables produce Lambdas.
Examples
========
>>> from sympy.stats import density, Die, Normal, cdf
>>> D = Die('D', 6)
>>> X = Normal('X', 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> cdf(D)
{1: 1/6, 2: 1/3, 3: 1/2, 4: 2/3, 5: 5/6, 6: 1}
>>> cdf(3*D, D > 2)
{9: 1/4, 12: 1/2, 15: 3/4, 18: 1}
>>> cdf(X)
Lambda(_z, erf(sqrt(2)*_z/2)/2 + 1/2)
"""
if condition is not None: # If there is a condition
# Recompute on new conditional expr
return cdf(given(expr, condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
result = pspace(expr).compute_cdf(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def characteristic_function(expr, condition=None, evaluate=True, **kwargs):
"""
Characteristic function of a random expression, optionally given a second condition
Returns a Lambda
Examples
========
>>> from sympy.stats import Normal, DiscreteUniform, Poisson, characteristic_function
>>> X = Normal('X', 0, 1)
>>> characteristic_function(X)
Lambda(_t, exp(-_t**2/2))
>>> Y = DiscreteUniform('Y', [1, 2, 7])
>>> characteristic_function(Y)
Lambda(_t, exp(7*_t*I)/3 + exp(2*_t*I)/3 + exp(_t*I)/3)
>>> Z = Poisson('Z', 2)
>>> characteristic_function(Z)
Lambda(_t, exp(2*exp(_t*I) - 2))
"""
if condition is not None:
return characteristic_function(given(expr, condition, **kwargs), **kwargs)
result = pspace(expr).compute_characteristic_function(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def moment_generating_function(expr, condition=None, evaluate=True, **kwargs):
if condition is not None:
return moment_generating_function(given(expr, condition, **kwargs), **kwargs)
result = pspace(expr).compute_moment_generating_function(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def where(condition, given_condition=None, **kwargs):
"""
Returns the domain where a condition is True.
Examples
========
>>> from sympy.stats import where, Die, Normal
>>> from sympy import And
>>> D1, D2 = Die('a', 6), Die('b', 6)
>>> a, b = D1.symbol, D2.symbol
>>> X = Normal('x', 0, 1)
>>> where(X**2<1)
Domain: (-1 < x) & (x < 1)
>>> where(X**2<1).set
Interval.open(-1, 1)
>>> where(And(D1<=D2 , D2<3))
Domain: (Eq(a, 1) & Eq(b, 1)) | (Eq(a, 1) & Eq(b, 2)) | (Eq(a, 2) & Eq(b, 2))
"""
if given_condition is not None: # If there is a condition
# Recompute on new conditional expr
return where(given(condition, given_condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
return pspace(condition).where(condition, **kwargs)
def sample(expr, condition=None, size=(), library='scipy', numsamples=1,
**kwargs):
"""
A realization of the random expression
Parameters
==========
expr : Expression of random variables
Expression from which sample is extracted
condition : Expr containing RandomSymbols
A conditional expression
size : int, tuple
Represents size of each sample in numsamples
library : str
- 'scipy' : Sample using scipy
- 'numpy' : Sample using numpy
- 'pymc3' : Sample using PyMC3
Choose any of the available options to sample from as string,
by default is 'scipy'
numsamples : int
Number of samples, each with size as ``size``
Examples
========
>>> from sympy.stats import Die, sample, Normal, Geometric
>>> X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6) # Finite Random Variable
>>> die_roll = sample(X + Y + Z) # doctest: +SKIP
>>> next(die_roll) # doctest: +SKIP
6
>>> N = Normal('N', 3, 4) # Continuous Random Variable
>>> samp = next(sample(N)) # doctest: +SKIP
>>> samp in N.pspace.domain.set # doctest: +SKIP
True
>>> samp = next(sample(N, N>0)) # doctest: +SKIP
>>> samp > 0 # doctest: +SKIP
True
>>> samp_list = next(sample(N, size=4)) # doctest: +SKIP
>>> [sam in N.pspace.domain.set for sam in samp_list] # doctest: +SKIP
[True, True, True, True]
>>> G = Geometric('G', 0.5) # Discrete Random Variable
>>> samp_list = next(sample(G, size=3)) # doctest: +SKIP
>>> samp_list # doctest: +SKIP
array([10, 4, 1])
>>> [sam in G.pspace.domain.set for sam in samp_list] # doctest: +SKIP
[True, True, True]
>>> MN = Normal("MN", [3, 4], [[2, 1], [1, 2]]) # Joint Random Variable
>>> samp_list = next(sample(MN, size=4)) # doctest: +SKIP
>>> samp_list # doctest: +SKIP
array([[4.22564264, 3.23364418],
[3.41002011, 4.60090908],
[3.76151866, 4.77617143],
[4.71440865, 2.65714157]])
>>> [tuple(sam) in MN.pspace.domain.set for sam in samp_list] # doctest: +SKIP
[True, True, True, True]
Returns
=======
sample: iterator object
iterator object containing the sample/samples of given expr
"""
### TODO: Remove the user warnings in the future releases
message = ("The return type of sample has been changed to return an "
"iterator object since version 1.7. For more information see "
"https://github.com/sympy/sympy/issues/19061")
warnings.warn(filldedent(message))
return sample_iter(expr, condition, size=size, library=library,
numsamples=numsamples)
def quantile(expr, evaluate=True, **kwargs):
r"""
Return the :math:`p^{th}` order quantile of a probability distribution.
Quantile is defined as the value at which the probability of the random
variable is less than or equal to the given probability.
..math::
Q(p) = inf{x \in (-\infty, \infty) such that p <= F(x)}
Examples
========
>>> from sympy.stats import quantile, Die, Exponential
>>> from sympy import Symbol, pprint
>>> p = Symbol("p")
>>> l = Symbol("lambda", positive=True)
>>> X = Exponential("x", l)
>>> quantile(X)(p)
-log(1 - p)/lambda
>>> D = Die("d", 6)
>>> pprint(quantile(D)(p), use_unicode=False)
/nan for Or(p > 1, p < 0)
|
| 1 for p <= 1/6
|
| 2 for p <= 1/3
|
< 3 for p <= 1/2
|
| 4 for p <= 2/3
|
| 5 for p <= 5/6
|
\ 6 for p <= 1
"""
result = pspace(expr).compute_quantile(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def sample_iter(expr, condition=None, size=(), library='scipy',
numsamples=S.Infinity, **kwargs):
"""
Returns an iterator of realizations from the expression given a condition
Parameters
==========
expr: Expr
Random expression to be realized
condition: Expr, optional
A conditional expression
size : int, tuple
Represents size of each sample in numsamples
numsamples: integer, optional
Length of the iterator (defaults to infinity)
Examples
========
>>> from sympy.stats import Normal, sample_iter
>>> X = Normal('X', 0, 1)
>>> expr = X*X + 3
>>> iterator = sample_iter(expr, numsamples=3) # doctest: +SKIP
>>> list(iterator) # doctest: +SKIP
[12, 4, 7]
Returns
=======
sample_iter: iterator object
iterator object containing the sample/samples of given expr
See Also
========
sample
sampling_P
sampling_E
"""
from sympy.stats.joint_rv import JointRandomSymbol
if not import_module(library):
raise ValueError("Failed to import %s" % library)
if condition is not None:
ps = pspace(Tuple(expr, condition))
else:
ps = pspace(expr)
rvs = list(ps.values)
if isinstance(expr, JointRandomSymbol):
expr = expr.subs({expr: RandomSymbol(expr.symbol, expr.pspace)})
else:
sub = {}
for arg in expr.args:
if isinstance(arg, JointRandomSymbol):
sub[arg] = RandomSymbol(arg.symbol, arg.pspace)
expr = expr.subs(sub)
if library == 'pymc3':
# Currently unable to lambdify in pymc3
# TODO : Remove 'pymc3' when lambdify accepts 'pymc3' as module
fn = lambdify(rvs, expr, **kwargs)
else:
fn = lambdify(rvs, expr, modules=library, **kwargs)
if condition is not None:
given_fn = lambdify(rvs, condition, **kwargs)
def return_generator():
count = 0
while count < numsamples:
d = ps.sample(size=size, library=library) # a dictionary that maps RVs to values
args = [d[rv] for rv in rvs]
if condition is not None: # Check that these values satisfy the condition
gd = given_fn(*args)
if gd != True and gd != False:
raise ValueError(
"Conditions must not contain free symbols")
if not gd: # If the values don't satisfy then try again
continue
yield fn(*args)
count += 1
return return_generator()
def sample_iter_lambdify(expr, condition=None, size=(), numsamples=S.Infinity,
**kwargs):
return sample_iter(expr, condition=condition, size=size, numsamples=numsamples,
**kwargs)
def sample_iter_subs(expr, condition=None, size=(), numsamples=S.Infinity,
**kwargs):
return sample_iter(expr, condition=condition, size=size, numsamples=numsamples,
**kwargs)
def sampling_P(condition, given_condition=None, library='scipy', numsamples=1,
evalf=True, **kwargs):
"""
Sampling version of P
See Also
========
P
sampling_E
sampling_density
"""
count_true = 0
count_false = 0
samples = sample_iter(condition, given_condition, library=library,
numsamples=numsamples, **kwargs)
for sample in samples:
if sample:
count_true += 1
else:
count_false += 1
result = S(count_true) / numsamples
if evalf:
return result.evalf()
else:
return result
def sampling_E(expr, given_condition=None, library='scipy', numsamples=1,
evalf=True, **kwargs):
"""
Sampling version of E
See Also
========
P
sampling_P
sampling_density
"""
samples = list(sample_iter(expr, given_condition, library=library,
numsamples=numsamples, **kwargs))
result = Add(*[samp for samp in samples]) / numsamples
if evalf:
return result.evalf()
else:
return result
def sampling_density(expr, given_condition=None, library='scipy',
numsamples=1, **kwargs):
"""
Sampling version of density
See Also
========
density
sampling_P
sampling_E
"""
results = {}
for result in sample_iter(expr, given_condition, library=library,
numsamples=numsamples, **kwargs):
results[result] = results.get(result, 0) + 1
return results
def dependent(a, b):
"""
Dependence of two random expressions
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from sympy.stats import Normal, dependent, given
>>> from sympy import Tuple, Eq
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> dependent(X, Y)
False
>>> dependent(2*X + Y, -Y)
True
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> dependent(X, Y)
True
See Also
========
independent
"""
if pspace_independent(a, b):
return False
z = Symbol('z', real=True)
# Dependent if density is unchanged when one is given information about
# the other
return (density(a, Eq(b, z)) != density(a) or
density(b, Eq(a, z)) != density(b))
def independent(a, b):
"""
Independence of two random expressions
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from sympy.stats import Normal, independent, given
>>> from sympy import Tuple, Eq
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> independent(X, Y)
True
>>> independent(2*X + Y, -Y)
False
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> independent(X, Y)
False
See Also
========
dependent
"""
return not dependent(a, b)
def pspace_independent(a, b):
"""
Tests for independence between a and b by checking if their PSpaces have
overlapping symbols. This is a sufficient but not necessary condition for
independence and is intended to be used internally.
Notes
=====
pspace_independent(a, b) implies independent(a, b)
independent(a, b) does not imply pspace_independent(a, b)
"""
a_symbols = set(pspace(b).symbols)
b_symbols = set(pspace(a).symbols)
if len(set(random_symbols(a)).intersection(random_symbols(b))) != 0:
return False
if len(a_symbols.intersection(b_symbols)) == 0:
return True
return None
def rv_subs(expr, symbols=None):
"""
Given a random expression replace all random variables with their symbols.
If symbols keyword is given restrict the swap to only the symbols listed.
"""
if symbols is None:
symbols = random_symbols(expr)
if not symbols:
return expr
swapdict = {rv: rv.symbol for rv in symbols}
return expr.subs(swapdict)
class NamedArgsMixin:
_argnames = () # type: tTuple[str, ...]
def __getattr__(self, attr):
try:
return self.args[self._argnames.index(attr)]
except ValueError:
raise AttributeError("'%s' object has no attribute '%s'" % (
type(self).__name__, attr))
def _value_check(condition, message):
"""
Raise a ValueError with message if condition is False, else
return True if all conditions were True, else False.
Examples
========
>>> from sympy.stats.rv import _value_check
>>> from sympy.abc import a, b, c
>>> from sympy import And, Dummy
>>> _value_check(2 < 3, '')
True
Here, the condition is not False, but it doesn't evaluate to True
so False is returned (but no error is raised). So checking if the
return value is True or False will tell you if all conditions were
evaluated.
>>> _value_check(a < b, '')
False
In this case the condition is False so an error is raised:
>>> r = Dummy(real=True)
>>> _value_check(r < r - 1, 'condition is not true')
Traceback (most recent call last):
...
ValueError: condition is not true
If no condition of many conditions must be False, they can be
checked by passing them as an iterable:
>>> _value_check((a < 0, b < 0, c < 0), '')
False
The iterable can be a generator, too:
>>> _value_check((i < 0 for i in (a, b, c)), '')
False
The following are equivalent to the above but do not pass
an iterable:
>>> all(_value_check(i < 0, '') for i in (a, b, c))
False
>>> _value_check(And(a < 0, b < 0, c < 0), '')
False
"""
from sympy.core.compatibility import iterable
from sympy.core.logic import fuzzy_and
if not iterable(condition):
condition = [condition]
truth = fuzzy_and(condition)
if truth == False:
raise ValueError(message)
return truth == True
def _symbol_converter(sym):
"""
Casts the parameter to Symbol if it is 'str'
otherwise no operation is performed on it.
Parameters
==========
sym
The parameter to be converted.
Returns
=======
Symbol
the parameter converted to Symbol.
Raises
======
TypeError
If the parameter is not an instance of both str and
Symbol.
Examples
========
>>> from sympy import Symbol
>>> from sympy.stats.rv import _symbol_converter
>>> s = _symbol_converter('s')
>>> isinstance(s, Symbol)
True
>>> _symbol_converter(1)
Traceback (most recent call last):
...
TypeError: 1 is neither a Symbol nor a string
>>> r = Symbol('r')
>>> isinstance(r, Symbol)
True
"""
if isinstance(sym, str):
sym = Symbol(sym)
if not isinstance(sym, Symbol):
raise TypeError("%s is neither a Symbol nor a string"%(sym))
return sym
def sample_stochastic_process(process):
"""
This function is used to sample from stochastic process.
Parameters
==========
process: StochasticProcess
Process used to extract the samples. It must be an instance of
StochasticProcess
Examples
========
>>> from sympy.stats import sample_stochastic_process, DiscreteMarkovChain
>>> from sympy import Matrix
>>> T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
>>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
>>> next(sample_stochastic_process(Y)) in Y.state_space # doctest: +SKIP
True
>>> next(sample_stochastic_process(Y)) # doctest: +SKIP
0
>>> next(sample_stochastic_process(Y)) # doctest: +SKIP
2
Returns
=======
sample: iterator object
iterator object containing the sample of given process
"""
from sympy.stats.stochastic_process_types import StochasticProcess
if not isinstance(process, StochasticProcess):
raise ValueError("Process must be an instance of Stochastic Process")
return process.sample()
|
3ec3b41e784781b8dc39b74b7e6ed3ca520d02083b7690e4e9c699edeafd04ea | """
Joint Random Variables Module
See Also
========
sympy.stats.rv
sympy.stats.frv
sympy.stats.crv
sympy.stats.drv
"""
from sympy import (Basic, Lambda, sympify, Indexed, Symbol, ProductSet, S,
Dummy)
from sympy.concrete.products import Product
from sympy.concrete.summations import Sum, summation
from sympy.core.compatibility import iterable
from sympy.core.containers import Tuple
from sympy.integrals.integrals import Integral, integrate
from sympy.matrices import ImmutableMatrix, matrix2numpy, list2numpy
from sympy.stats.crv import SingleContinuousDistribution, SingleContinuousPSpace
from sympy.stats.drv import SingleDiscreteDistribution, SingleDiscretePSpace
from sympy.stats.rv import (ProductPSpace, NamedArgsMixin,
ProductDomain, RandomSymbol, random_symbols, SingleDomain, _symbol_converter)
from sympy.utilities.misc import filldedent
from sympy.external import import_module
# __all__ = ['marginal_distribution']
class JointPSpace(ProductPSpace):
"""
Represents a joint probability space. Represented using symbols for
each component and a distribution.
"""
def __new__(cls, sym, dist):
if isinstance(dist, SingleContinuousDistribution):
return SingleContinuousPSpace(sym, dist)
if isinstance(dist, SingleDiscreteDistribution):
return SingleDiscretePSpace(sym, dist)
sym = _symbol_converter(sym)
return Basic.__new__(cls, sym, dist)
@property
def set(self):
return self.domain.set
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[1]
@property
def value(self):
return JointRandomSymbol(self.symbol, self)
@property
def component_count(self):
_set = self.distribution.set
if isinstance(_set, ProductSet):
return S(len(_set.args))
elif isinstance(_set, Product):
return _set.limits[0][-1]
return S.One
@property
def pdf(self):
sym = [Indexed(self.symbol, i) for i in range(self.component_count)]
return self.distribution(*sym)
@property
def domain(self):
rvs = random_symbols(self.distribution)
if not rvs:
return SingleDomain(self.symbol, self.distribution.set)
return ProductDomain(*[rv.pspace.domain for rv in rvs])
def component_domain(self, index):
return self.set.args[index]
def marginal_distribution(self, *indices):
count = self.component_count
if count.atoms(Symbol):
raise ValueError("Marginal distributions cannot be computed "
"for symbolic dimensions. It is a work under progress.")
orig = [Indexed(self.symbol, i) for i in range(count)]
all_syms = [Symbol(str(i)) for i in orig]
replace_dict = dict(zip(all_syms, orig))
sym = tuple(Symbol(str(Indexed(self.symbol, i))) for i in indices)
limits = list([i,] for i in all_syms if i not in sym)
index = 0
for i in range(count):
if i not in indices:
limits[index].append(self.distribution.set.args[i])
limits[index] = tuple(limits[index])
index += 1
if self.distribution.is_Continuous:
f = Lambda(sym, integrate(self.distribution(*all_syms), *limits))
elif self.distribution.is_Discrete:
f = Lambda(sym, summation(self.distribution(*all_syms), *limits))
return f.xreplace(replace_dict)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
syms = tuple(self.value[i] for i in range(self.component_count))
rvs = rvs or syms
if not any([i in rvs for i in syms]):
return expr
expr = expr*self.pdf
for rv in rvs:
if isinstance(rv, Indexed):
expr = expr.xreplace({rv: Indexed(str(rv.base), rv.args[1])})
elif isinstance(rv, RandomSymbol):
expr = expr.xreplace({rv: rv.symbol})
if self.value in random_symbols(expr):
raise NotImplementedError(filldedent('''
Expectations of expression with unindexed joint random symbols
cannot be calculated yet.'''))
limits = tuple((Indexed(str(rv.base),rv.args[1]),
self.distribution.set.args[rv.args[1]]) for rv in syms)
return Integral(expr, *limits)
def where(self, condition):
raise NotImplementedError()
def compute_density(self, expr):
raise NotImplementedError()
def sample(self, size=(), library='scipy'):
"""
Internal sample method
Returns dictionary mapping RandomSymbol to realization value.
"""
return {RandomSymbol(self.symbol, self): self.distribution.sample(size,
library=library)}
def probability(self, condition):
raise NotImplementedError()
class SampleJointScipy:
"""Returns the sample from scipy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_scipy(dist, size)
@classmethod
def _sample_scipy(cls, dist, size):
"""Sample from SciPy."""
from scipy import stats as scipy_stats
scipy_rv_map = {
'MultivariateNormalDistribution': lambda dist, size: scipy_stats.multivariate_normal.rvs(
mean=matrix2numpy(dist.mu).flatten(),
cov=matrix2numpy(dist.sigma), size=size),
'MultivariateBetaDistribution': lambda dist, size: scipy_stats.dirichlet.rvs(
alpha=list2numpy(dist.alpha, float).flatten(), size=size),
'MultinomialDistribution': lambda dist, size: scipy_stats.multinomial.rvs(
n=int(dist.n), p=list2numpy(dist.p, float).flatten(), size=size)
}
dist_list = scipy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
return scipy_rv_map[dist.__class__.__name__](dist, size)
class SampleJointNumpy:
"""Returns the sample from numpy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_numpy(dist, size)
@classmethod
def _sample_numpy(cls, dist, size):
"""Sample from NumPy."""
import numpy
numpy_rv_map = {
'MultivariateNormalDistribution': lambda dist, size: numpy.random.multivariate_normal(
mean=matrix2numpy(dist.mu, float).flatten(),
cov=matrix2numpy(dist.sigma, float), size=size),
'MultivariateBetaDistribution': lambda dist, size: numpy.random.dirichlet(
alpha=list2numpy(dist.alpha, float).flatten(), size=size),
'MultinomialDistribution': lambda dist, size: numpy.random.multinomial(
n=int(dist.n), pvals=list2numpy(dist.p, float).flatten(), size=size)
}
dist_list = numpy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
return numpy_rv_map[dist.__class__.__name__](dist, size)
class SampleJointPymc:
"""Returns the sample from pymc3 of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_pymc3(dist, size)
@classmethod
def _sample_pymc3(cls, dist, size):
"""Sample from PyMC3."""
import pymc3
pymc3_rv_map = {
'MultivariateNormalDistribution': lambda dist:
pymc3.MvNormal('X', mu=matrix2numpy(dist.mu, float).flatten(),
cov=matrix2numpy(dist.sigma, float), shape=(1, dist.mu.shape[0])),
'MultivariateBetaDistribution': lambda dist:
pymc3.Dirichlet('X', a=list2numpy(dist.alpha, float).flatten()),
'MultinomialDistribution': lambda dist:
pymc3.Multinomial('X', n=int(dist.n),
p=list2numpy(dist.p, float).flatten(), shape=(1, len(dist.p)))
}
dist_list = pymc3_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
with pymc3.Model():
pymc3_rv_map[dist.__class__.__name__](dist)
return pymc3.sample(size, chains=1, progressbar=False)[:]['X']
_get_sample_class_jrv = {
'scipy': SampleJointScipy,
'pymc3': SampleJointPymc,
'numpy': SampleJointNumpy
}
class JointDistribution(Basic, NamedArgsMixin):
"""
Represented by the random variables part of the joint distribution.
Contains methods for PDF, CDF, sampling, marginal densities, etc.
"""
_argnames = ('pdf', )
def __new__(cls, *args):
args = list(map(sympify, args))
for i in range(len(args)):
if isinstance(args[i], list):
args[i] = ImmutableMatrix(args[i])
return Basic.__new__(cls, *args)
@property
def domain(self):
return ProductDomain(self.symbols)
@property
def pdf(self):
return self.density.args[1]
def cdf(self, other):
if not isinstance(other, dict):
raise ValueError("%s should be of type dict, got %s"%(other, type(other)))
rvs = other.keys()
_set = self.domain.set.sets
expr = self.pdf(tuple(i.args[0] for i in self.symbols))
for i in range(len(other)):
if rvs[i].is_Continuous:
density = Integral(expr, (rvs[i], _set[i].inf,
other[rvs[i]]))
elif rvs[i].is_Discrete:
density = Sum(expr, (rvs[i], _set[i].inf,
other[rvs[i]]))
return density
def sample(self, size=(), library='scipy'):
""" A random realization from the distribution """
libraries = ['scipy', 'numpy', 'pymc3']
if library not in libraries:
raise NotImplementedError("Sampling from %s is not supported yet."
% str(library))
if not import_module(library):
raise ValueError("Failed to import %s" % library)
samps = _get_sample_class_jrv[library](self, size)
if samps is not None:
return samps
raise NotImplementedError(
"Sampling for %s is not currently implemented from %s"
% (self.__class__.__name__, library)
)
def __call__(self, *args):
return self.pdf(*args)
class JointRandomSymbol(RandomSymbol):
"""
Representation of random symbols with joint probability distributions
to allow indexing."
"""
def __getitem__(self, key):
if isinstance(self.pspace, JointPSpace):
if (self.pspace.component_count <= key) == True:
raise ValueError("Index keys for %s can only up to %s." %
(self.name, self.pspace.component_count - 1))
return Indexed(self, key)
class MarginalDistribution(Basic):
"""
Represents the marginal distribution of a joint probability space.
Initialised using a probability distribution and random variables(or
their indexed components) which should be a part of the resultant
distribution.
"""
def __new__(cls, dist, *rvs):
if len(rvs) == 1 and iterable(rvs[0]):
rvs = tuple(rvs[0])
if not all([isinstance(rv, (Indexed, RandomSymbol))] for rv in rvs):
raise ValueError(filldedent('''Marginal distribution can be
intitialised only in terms of random variables or indexed random
variables'''))
rvs = Tuple.fromiter(rv for rv in rvs)
if not isinstance(dist, JointDistribution) and len(random_symbols(dist)) == 0:
return dist
return Basic.__new__(cls, dist, rvs)
def check(self):
pass
@property
def set(self):
rvs = [i for i in self.args[1] if isinstance(i, RandomSymbol)]
return ProductSet(*[rv.pspace.set for rv in rvs])
@property
def symbols(self):
rvs = self.args[1]
return {rv.pspace.symbol for rv in rvs}
def pdf(self, *x):
expr, rvs = self.args[0], self.args[1]
marginalise_out = [i for i in random_symbols(expr) if i not in rvs]
if isinstance(expr, JointDistribution):
count = len(expr.domain.args)
x = Dummy('x', real=True, finite=True)
syms = tuple(Indexed(x, i) for i in count)
expr = expr.pdf(syms)
else:
syms = tuple(rv.pspace.symbol if isinstance(rv, RandomSymbol) else rv.args[0] for rv in rvs)
return Lambda(syms, self.compute_pdf(expr, marginalise_out))(*x)
def compute_pdf(self, expr, rvs):
for rv in rvs:
lpdf = 1
if isinstance(rv, RandomSymbol):
lpdf = rv.pspace.pdf
expr = self.marginalise_out(expr*lpdf, rv)
return expr
def marginalise_out(self, expr, rv):
from sympy.concrete.summations import Sum
if isinstance(rv, RandomSymbol):
dom = rv.pspace.set
elif isinstance(rv, Indexed):
dom = rv.base.component_domain(
rv.pspace.component_domain(rv.args[1]))
expr = expr.xreplace({rv: rv.pspace.symbol})
if rv.pspace.is_Continuous:
#TODO: Modify to support integration
#for all kinds of sets.
expr = Integral(expr, (rv.pspace.symbol, dom))
elif rv.pspace.is_Discrete:
#incorporate this into `Sum`/`summation`
if dom in (S.Integers, S.Naturals, S.Naturals0):
dom = (dom.inf, dom.sup)
expr = Sum(expr, (rv.pspace.symbol, dom))
return expr
def __call__(self, *args):
return self.pdf(*args)
|
c30e893f2554adef2f886678a8a85be79908b7c7b9b726943074544f290d8935 | from sympy import (Basic, sympify, symbols, Dummy, Lambda, summation,
Piecewise, S, cacheit, Sum, exp, I, Ne, Eq, poly,
series, factorial, And, lambdify)
from sympy.polys.polyerrors import PolynomialError
from sympy.stats.crv import reduce_rational_inequalities_wrap
from sympy.stats.rv import (NamedArgsMixin, SinglePSpace, SingleDomain,
random_symbols, PSpace, ConditionalDomain, RandomDomain,
ProductDomain)
from sympy.stats.symbolic_probability import Probability
from sympy.sets.fancysets import Range, FiniteSet
from sympy.sets.sets import Union
from sympy.sets.contains import Contains
from sympy.utilities import filldedent
from sympy.core.sympify import _sympify
from sympy.external import import_module
class DiscreteDistribution(Basic):
def __call__(self, *args):
return self.pdf(*args)
class SampleDiscreteScipy:
"""Returns the sample from scipy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_scipy(dist, size)
@classmethod
def _sample_scipy(cls, dist, size):
"""Sample from SciPy."""
from scipy import stats as scipy_stats
scipy_rv_map = {
'GeometricDistribution': lambda dist, size: scipy_stats.geom.rvs(p=float(dist.p),
size=size),
'LogarithmicDistribution': lambda dist, size: scipy_stats.logser.rvs(p=float(dist.p),
size=size),
'NegativeBinomialDistribution': lambda dist, size: scipy_stats.nbinom.rvs(n=float(dist.r),
p=float(dist.p), size=size),
'PoissonDistribution': lambda dist, size: scipy_stats.poisson.rvs(mu=float(dist.lamda),
size=size),
'SkellamDistribution': lambda dist, size: scipy_stats.skellam.rvs(mu1=float(dist.mu1),
mu2=float(dist.mu2), size=size),
'YuleSimonDistribution': lambda dist, size: scipy_stats.yulesimon.rvs(alpha=float(dist.rho),
size=size),
'ZetaDistribution': lambda dist, size: scipy_stats.zipf.rvs(a=float(dist.s),
size=size)
}
dist_list = scipy_rv_map.keys()
if dist.__class__.__name__ == 'DiscreteDistributionHandmade':
from scipy.stats import rv_discrete
z = Dummy('z')
handmade_pmf = lambdify(z, dist.pdf(z), ['numpy', 'scipy'])
class scipy_pmf(rv_discrete):
def _pmf(self, x):
return handmade_pmf(x)
scipy_rv = scipy_pmf(a=float(dist.set._inf), b=float(dist.set._sup),
name='scipy_pmf')
return scipy_rv.rvs(size=size)
if dist.__class__.__name__ not in dist_list:
return None
return scipy_rv_map[dist.__class__.__name__](dist, size)
class SampleDiscreteNumpy:
"""Returns the sample from numpy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_numpy(dist, size)
@classmethod
def _sample_numpy(cls, dist, size):
"""Sample from NumPy."""
import numpy
numpy_rv_map = {
'GeometricDistribution': lambda dist, size: numpy.random.geometric(p=float(dist.p),
size=size),
'PoissonDistribution': lambda dist, size: numpy.random.poisson(lam=float(dist.lamda),
size=size),
'ZetaDistribution': lambda dist, size: numpy.random.zipf(a=float(dist.s),
size=size)
}
dist_list = numpy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
return numpy_rv_map[dist.__class__.__name__](dist, size)
class SampleDiscretePymc:
"""Returns the sample from pymc3 of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_pymc3(dist, size)
@classmethod
def _sample_pymc3(cls, dist, size):
"""Sample from PyMC3."""
import pymc3
pymc3_rv_map = {
'GeometricDistribution': lambda dist: pymc3.Geometric('X', p=float(dist.p)),
'PoissonDistribution': lambda dist: pymc3.Poisson('X', mu=float(dist.lamda)),
'NegativeBinomialDistribution': lambda dist: pymc3.NegativeBinomial('X',
mu=float((dist.p*dist.r)/(1-dist.p)), alpha=float(dist.r))
}
dist_list = pymc3_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
with pymc3.Model():
pymc3_rv_map[dist.__class__.__name__](dist)
return pymc3.sample(size, chains=1, progressbar=False)[:]['X']
_get_sample_class_drv = {
'scipy': SampleDiscreteScipy,
'pymc3': SampleDiscretePymc,
'numpy': SampleDiscreteNumpy
}
class SingleDiscreteDistribution(DiscreteDistribution, NamedArgsMixin):
""" Discrete distribution of a single variable
Serves as superclass for PoissonDistribution etc....
Provides methods for pdf, cdf, and sampling
See Also:
sympy.stats.crv_types.*
"""
set = S.Integers
def __new__(cls, *args):
args = list(map(sympify, args))
return Basic.__new__(cls, *args)
@staticmethod
def check(*args):
pass
def sample(self, size=(), library='scipy'):
""" A random realization from the distribution"""
libraries = ['scipy', 'numpy', 'pymc3']
if library not in libraries:
raise NotImplementedError("Sampling from %s is not supported yet."
% str(library))
if not import_module(library):
raise ValueError("Failed to import %s" % library)
samps = _get_sample_class_drv[library](self, size)
if samps is not None:
return samps
raise NotImplementedError(
"Sampling for %s is not currently implemented from %s"
% (self.__class__.__name__, library)
)
@cacheit
def compute_cdf(self, **kwargs):
""" Compute the CDF from the PDF
Returns a Lambda
"""
x, z = symbols('x, z', integer=True, cls=Dummy)
left_bound = self.set.inf
# CDF is integral of PDF from left bound to z
pdf = self.pdf(x)
cdf = summation(pdf, (x, left_bound, z), **kwargs)
# CDF Ensure that CDF left of left_bound is zero
cdf = Piecewise((cdf, z >= left_bound), (0, True))
return Lambda(z, cdf)
def _cdf(self, x):
return None
def cdf(self, x, **kwargs):
""" Cumulative density function """
if not kwargs:
cdf = self._cdf(x)
if cdf is not None:
return cdf
return self.compute_cdf(**kwargs)(x)
@cacheit
def compute_characteristic_function(self, **kwargs):
""" Compute the characteristic function from the PDF
Returns a Lambda
"""
x, t = symbols('x, t', real=True, cls=Dummy)
pdf = self.pdf(x)
cf = summation(exp(I*t*x)*pdf, (x, self.set.inf, self.set.sup))
return Lambda(t, cf)
def _characteristic_function(self, t):
return None
def characteristic_function(self, t, **kwargs):
""" Characteristic function """
if not kwargs:
cf = self._characteristic_function(t)
if cf is not None:
return cf
return self.compute_characteristic_function(**kwargs)(t)
@cacheit
def compute_moment_generating_function(self, **kwargs):
t = Dummy('t', real=True)
x = Dummy('x', integer=True)
pdf = self.pdf(x)
mgf = summation(exp(t*x)*pdf, (x, self.set.inf, self.set.sup))
return Lambda(t, mgf)
def _moment_generating_function(self, t):
return None
def moment_generating_function(self, t, **kwargs):
if not kwargs:
mgf = self._moment_generating_function(t)
if mgf is not None:
return mgf
return self.compute_moment_generating_function(**kwargs)(t)
@cacheit
def compute_quantile(self, **kwargs):
""" Compute the Quantile from the PDF
Returns a Lambda
"""
x = Dummy('x', integer=True)
p = Dummy('p', real=True)
left_bound = self.set.inf
pdf = self.pdf(x)
cdf = summation(pdf, (x, left_bound, x), **kwargs)
set = ((x, p <= cdf), )
return Lambda(p, Piecewise(*set))
def _quantile(self, x):
return None
def quantile(self, x, **kwargs):
""" Cumulative density function """
if not kwargs:
quantile = self._quantile(x)
if quantile is not None:
return quantile
return self.compute_quantile(**kwargs)(x)
def expectation(self, expr, var, evaluate=True, **kwargs):
""" Expectation of expression over distribution """
# TODO: support discrete sets with non integer stepsizes
if evaluate:
try:
p = poly(expr, var)
t = Dummy('t', real=True)
mgf = self.moment_generating_function(t)
deg = p.degree()
taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t)
result = 0
for k in range(deg+1):
result += p.coeff_monomial(var ** k) * taylor.coeff_monomial(t ** k) * factorial(k)
return result
except PolynomialError:
return summation(expr * self.pdf(var),
(var, self.set.inf, self.set.sup), **kwargs)
else:
return Sum(expr * self.pdf(var),
(var, self.set.inf, self.set.sup), **kwargs)
def __call__(self, *args):
return self.pdf(*args)
class DiscreteDomain(RandomDomain):
"""
A domain with discrete support with step size one.
Represented using symbols and Range.
"""
is_Discrete = True
class SingleDiscreteDomain(DiscreteDomain, SingleDomain):
def as_boolean(self):
return Contains(self.symbol, self.set)
class ConditionalDiscreteDomain(DiscreteDomain, ConditionalDomain):
"""
Domain with discrete support of step size one, that is restricted by
some condition.
"""
@property
def set(self):
rv = self.symbols
if len(self.symbols) > 1:
raise NotImplementedError(filldedent('''
Multivariate conditional domains are not yet implemented.'''))
rv = list(rv)[0]
return reduce_rational_inequalities_wrap(self.condition,
rv).intersect(self.fulldomain.set)
class DiscretePSpace(PSpace):
is_real = True
is_Discrete = True
@property
def pdf(self):
return self.density(*self.symbols)
def where(self, condition):
rvs = random_symbols(condition)
assert all(r.symbol in self.symbols for r in rvs)
if len(rvs) > 1:
raise NotImplementedError(filldedent('''Multivariate discrete
random variables are not yet supported.'''))
conditional_domain = reduce_rational_inequalities_wrap(condition,
rvs[0])
conditional_domain = conditional_domain.intersect(self.domain.set)
return SingleDiscreteDomain(rvs[0].symbol, conditional_domain)
def probability(self, condition):
complement = isinstance(condition, Ne)
if complement:
condition = Eq(condition.args[0], condition.args[1])
try:
_domain = self.where(condition).set
if condition == False or _domain is S.EmptySet:
return S.Zero
if condition == True or _domain == self.domain.set:
return S.One
prob = self.eval_prob(_domain)
except NotImplementedError:
from sympy.stats.rv import density
expr = condition.lhs - condition.rhs
dens = density(expr)
if not isinstance(dens, DiscreteDistribution):
from sympy.stats.drv_types import DiscreteDistributionHandmade
dens = DiscreteDistributionHandmade(dens)
z = Dummy('z', real=True)
space = SingleDiscretePSpace(z, dens)
prob = space.probability(condition.__class__(space.value, 0))
if prob is None:
prob = Probability(condition)
return prob if not complement else S.One - prob
def eval_prob(self, _domain):
sym = list(self.symbols)[0]
if isinstance(_domain, Range):
n = symbols('n', integer=True)
inf, sup, step = (r for r in _domain.args)
summand = ((self.pdf).replace(
sym, n*step))
rv = summation(summand,
(n, inf/step, (sup)/step - 1)).doit()
return rv
elif isinstance(_domain, FiniteSet):
pdf = Lambda(sym, self.pdf)
rv = sum(pdf(x) for x in _domain)
return rv
elif isinstance(_domain, Union):
rv = sum(self.eval_prob(x) for x in _domain.args)
return rv
def conditional_space(self, condition):
# XXX: Converting from set to tuple. The order matters to Lambda
# though so we should be starting with a set...
density = Lambda(tuple(self.symbols), self.pdf/self.probability(condition))
condition = condition.xreplace({rv: rv.symbol for rv in self.values})
domain = ConditionalDiscreteDomain(self.domain, condition)
return DiscretePSpace(domain, density)
class ProductDiscreteDomain(ProductDomain, DiscreteDomain):
def as_boolean(self):
return And(*[domain.as_boolean for domain in self.domains])
class SingleDiscretePSpace(DiscretePSpace, SinglePSpace):
""" Discrete probability space over a single univariate variable """
is_real = True
@property
def set(self):
return self.distribution.set
@property
def domain(self):
return SingleDiscreteDomain(self.symbol, self.set)
def sample(self, size=(), library='scipy'):
"""
Internal sample method
Returns dictionary mapping RandomSymbol to realization value.
"""
return {self.value: self.distribution.sample(size, library=library)}
def compute_expectation(self, expr, rvs=None, evaluate=True, **kwargs):
rvs = rvs or (self.value,)
if self.value not in rvs:
return expr
expr = _sympify(expr)
expr = expr.xreplace({rv: rv.symbol for rv in rvs})
x = self.value.symbol
try:
return self.distribution.expectation(expr, x, evaluate=evaluate,
**kwargs)
except NotImplementedError:
return Sum(expr * self.pdf, (x, self.set.inf, self.set.sup),
**kwargs)
def compute_cdf(self, expr, **kwargs):
if expr == self.value:
x = Dummy("x", real=True)
return Lambda(x, self.distribution.cdf(x, **kwargs))
else:
raise NotImplementedError()
def compute_density(self, expr, **kwargs):
if expr == self.value:
return self.distribution
raise NotImplementedError()
def compute_characteristic_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.characteristic_function(t, **kwargs))
else:
raise NotImplementedError()
def compute_moment_generating_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.moment_generating_function(t, **kwargs))
else:
raise NotImplementedError()
def compute_quantile(self, expr, **kwargs):
if expr == self.value:
p = Dummy("p", real=True)
return Lambda(p, self.distribution.quantile(p, **kwargs))
else:
raise NotImplementedError()
|
c833bb8dafda6367b4f45cddf8245fd73073893589964a5f17201d8342174243 | """
Continuous Random Variables Module
See Also
========
sympy.stats.crv_types
sympy.stats.rv
sympy.stats.frv
"""
from sympy import (Interval, Intersection, symbols, sympify, Dummy, nan,
Integral, And, Or, Piecewise, cacheit, integrate, oo, Lambda,
Basic, S, exp, I, FiniteSet, Ne, Eq, Union, poly, series, factorial,
lambdify)
from sympy.core.function import PoleError
from sympy.functions.special.delta_functions import DiracDelta
from sympy.polys.polyerrors import PolynomialError
from sympy.solvers.solveset import solveset
from sympy.solvers.inequalities import reduce_rational_inequalities
from sympy.core.sympify import _sympify
from sympy.external import import_module
from sympy.stats.rv import (RandomDomain, SingleDomain, ConditionalDomain, is_random,
ProductDomain, PSpace, SinglePSpace, random_symbols, NamedArgsMixin)
class ContinuousDomain(RandomDomain):
"""
A domain with continuous support
Represented using symbols and Intervals.
"""
is_Continuous = True
def as_boolean(self):
raise NotImplementedError("Not Implemented for generic Domains")
class SingleContinuousDomain(ContinuousDomain, SingleDomain):
"""
A univariate domain with continuous support
Represented using a single symbol and interval.
"""
def compute_expectation(self, expr, variables=None, **kwargs):
if variables is None:
variables = self.symbols
if not variables:
return expr
if frozenset(variables) != frozenset(self.symbols):
raise ValueError("Values should be equal")
# assumes only intervals
return Integral(expr, (self.symbol, self.set), **kwargs)
def as_boolean(self):
return self.set.as_relational(self.symbol)
class ProductContinuousDomain(ProductDomain, ContinuousDomain):
"""
A collection of independent domains with continuous support
"""
def compute_expectation(self, expr, variables=None, **kwargs):
if variables is None:
variables = self.symbols
for domain in self.domains:
domain_vars = frozenset(variables) & frozenset(domain.symbols)
if domain_vars:
expr = domain.compute_expectation(expr, domain_vars, **kwargs)
return expr
def as_boolean(self):
return And(*[domain.as_boolean() for domain in self.domains])
class ConditionalContinuousDomain(ContinuousDomain, ConditionalDomain):
"""
A domain with continuous support that has been further restricted by a
condition such as x > 3
"""
def compute_expectation(self, expr, variables=None, **kwargs):
if variables is None:
variables = self.symbols
if not variables:
return expr
# Extract the full integral
fullintgrl = self.fulldomain.compute_expectation(expr, variables)
# separate into integrand and limits
integrand, limits = fullintgrl.function, list(fullintgrl.limits)
conditions = [self.condition]
while conditions:
cond = conditions.pop()
if cond.is_Boolean:
if isinstance(cond, And):
conditions.extend(cond.args)
elif isinstance(cond, Or):
raise NotImplementedError("Or not implemented here")
elif cond.is_Relational:
if cond.is_Equality:
# Add the appropriate Delta to the integrand
integrand *= DiracDelta(cond.lhs - cond.rhs)
else:
symbols = cond.free_symbols & set(self.symbols)
if len(symbols) != 1: # Can't handle x > y
raise NotImplementedError(
"Multivariate Inequalities not yet implemented")
# Can handle x > 0
symbol = symbols.pop()
# Find the limit with x, such as (x, -oo, oo)
for i, limit in enumerate(limits):
if limit[0] == symbol:
# Make condition into an Interval like [0, oo]
cintvl = reduce_rational_inequalities_wrap(
cond, symbol)
# Make limit into an Interval like [-oo, oo]
lintvl = Interval(limit[1], limit[2])
# Intersect them to get [0, oo]
intvl = cintvl.intersect(lintvl)
# Put back into limits list
limits[i] = (symbol, intvl.left, intvl.right)
else:
raise TypeError(
"Condition %s is not a relational or Boolean" % cond)
return Integral(integrand, *limits, **kwargs)
def as_boolean(self):
return And(self.fulldomain.as_boolean(), self.condition)
@property
def set(self):
if len(self.symbols) == 1:
return (self.fulldomain.set & reduce_rational_inequalities_wrap(
self.condition, tuple(self.symbols)[0]))
else:
raise NotImplementedError(
"Set of Conditional Domain not Implemented")
class ContinuousDistribution(Basic):
def __call__(self, *args):
return self.pdf(*args)
class SampleContinuousScipy:
"""Returns the sample from scipy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_scipy(dist, size)
@classmethod
def _sample_scipy(cls, dist, size):
"""Sample from SciPy."""
# scipy does not require map as it can handle using custom distributions
from scipy.stats import rv_continuous
z = Dummy('z')
handmade_pdf = lambdify(z, dist.pdf(z), ['numpy', 'scipy'])
class scipy_pdf(rv_continuous):
def _pdf(self, x):
return handmade_pdf(x)
scipy_rv = scipy_pdf(a=float(dist.set._inf),
b=float(dist.set._sup), name='scipy_pdf')
return scipy_rv.rvs(size=size)
class SampleContinuousNumpy:
"""Returns the sample from numpy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_numpy(dist, size)
@classmethod
def _sample_numpy(cls, dist, size):
"""Sample from NumPy."""
import numpy
numpy_rv_map = {
'BetaDistribution': lambda dist, size: numpy.random.beta(a=float(dist.alpha),
b=float(dist.beta), size=size),
'ChiSquaredDistribution': lambda dist, size: numpy.random.chisquare(
df=float(dist.k), size=size),
'ExponentialDistribution': lambda dist, size: numpy.random.exponential(
1/float(dist.rate), size=size),
'GammaDistribution': lambda dist, size: numpy.random.gamma(float(dist.k),
float(dist.theta), size=size),
'LogNormalDistribution': lambda dist, size: numpy.random.lognormal(
float(dist.mean), float(dist.std), size=size),
'NormalDistribution': lambda dist, size: numpy.random.normal(
float(dist.mean), float(dist.std), size=size),
'ParetoDistribution': lambda dist, size: (numpy.random.pareto(
a=float(dist.alpha), size=size) + 1) * float(dist.xm),
'UniformDistribution': lambda dist, size: numpy.random.uniform(
low=float(dist.left), high=float(dist.right), size=size)
}
dist_list = numpy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
return numpy_rv_map[dist.__class__.__name__](dist, size)
class SampleContinuousPymc:
"""Returns the sample from pymc3 of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_pymc3(dist, size)
@classmethod
def _sample_pymc3(cls, dist, size):
"""Sample from PyMC3."""
import pymc3
pymc3_rv_map = {
'BetaDistribution': lambda dist:
pymc3.Beta('X', alpha=float(dist.alpha), beta=float(dist.beta)),
'CauchyDistribution': lambda dist:
pymc3.Cauchy('X', alpha=float(dist.x0), beta=float(dist.gamma)),
'ChiSquaredDistribution': lambda dist:
pymc3.ChiSquared('X', nu=float(dist.k)),
'ExponentialDistribution': lambda dist:
pymc3.Exponential('X', lam=float(dist.rate)),
'GammaDistribution': lambda dist:
pymc3.Gamma('X', alpha=float(dist.k), beta=1/float(dist.theta)),
'LogNormalDistribution': lambda dist:
pymc3.Lognormal('X', mu=float(dist.mean), sigma=float(dist.std)),
'NormalDistribution': lambda dist:
pymc3.Normal('X', float(dist.mean), float(dist.std)),
'GaussianInverseDistribution': lambda dist:
pymc3.Wald('X', mu=float(dist.mean), lam=float(dist.shape)),
'ParetoDistribution': lambda dist:
pymc3.Pareto('X', alpha=float(dist.alpha), m=float(dist.xm)),
'UniformDistribution': lambda dist:
pymc3.Uniform('X', lower=float(dist.left), upper=float(dist.right))
}
dist_list = pymc3_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
with pymc3.Model():
pymc3_rv_map[dist.__class__.__name__](dist)
return pymc3.sample(size, chains=1, progressbar=False)[:]['X']
_get_sample_class_crv = {
'scipy': SampleContinuousScipy,
'pymc3': SampleContinuousPymc,
'numpy': SampleContinuousNumpy
}
class SingleContinuousDistribution(ContinuousDistribution, NamedArgsMixin):
""" Continuous distribution of a single variable
Serves as superclass for Normal/Exponential/UniformDistribution etc....
Represented by parameters for each of the specific classes. E.g
NormalDistribution is represented by a mean and standard deviation.
Provides methods for pdf, cdf, and sampling
See Also
========
sympy.stats.crv_types.*
"""
set = Interval(-oo, oo)
def __new__(cls, *args):
args = list(map(sympify, args))
return Basic.__new__(cls, *args)
@staticmethod
def check(*args):
pass
def sample(self, size=(), library='scipy'):
""" A random realization from the distribution """
libraries = ['scipy', 'numpy', 'pymc3']
if library not in libraries:
raise NotImplementedError("Sampling from %s is not supported yet."
% str(library))
if not import_module(library):
raise ValueError("Failed to import %s" % library)
samps = _get_sample_class_crv[library](self, size)
if samps is not None:
return samps
raise NotImplementedError(
"Sampling for %s is not currently implemented from %s"
% (self.__class__.__name__, library)
)
@cacheit
def compute_cdf(self, **kwargs):
""" Compute the CDF from the PDF
Returns a Lambda
"""
x, z = symbols('x, z', real=True, cls=Dummy)
left_bound = self.set.start
# CDF is integral of PDF from left bound to z
pdf = self.pdf(x)
cdf = integrate(pdf.doit(), (x, left_bound, z), **kwargs)
# CDF Ensure that CDF left of left_bound is zero
cdf = Piecewise((cdf, z >= left_bound), (0, True))
return Lambda(z, cdf)
def _cdf(self, x):
return None
def cdf(self, x, **kwargs):
""" Cumulative density function """
if len(kwargs) == 0:
cdf = self._cdf(x)
if cdf is not None:
return cdf
return self.compute_cdf(**kwargs)(x)
@cacheit
def compute_characteristic_function(self, **kwargs):
""" Compute the characteristic function from the PDF
Returns a Lambda
"""
x, t = symbols('x, t', real=True, cls=Dummy)
pdf = self.pdf(x)
cf = integrate(exp(I*t*x)*pdf, (x, self.set))
return Lambda(t, cf)
def _characteristic_function(self, t):
return None
def characteristic_function(self, t, **kwargs):
""" Characteristic function """
if len(kwargs) == 0:
cf = self._characteristic_function(t)
if cf is not None:
return cf
return self.compute_characteristic_function(**kwargs)(t)
@cacheit
def compute_moment_generating_function(self, **kwargs):
""" Compute the moment generating function from the PDF
Returns a Lambda
"""
x, t = symbols('x, t', real=True, cls=Dummy)
pdf = self.pdf(x)
mgf = integrate(exp(t * x) * pdf, (x, self.set))
return Lambda(t, mgf)
def _moment_generating_function(self, t):
return None
def moment_generating_function(self, t, **kwargs):
""" Moment generating function """
if not kwargs:
mgf = self._moment_generating_function(t)
if mgf is not None:
return mgf
return self.compute_moment_generating_function(**kwargs)(t)
def expectation(self, expr, var, evaluate=True, **kwargs):
""" Expectation of expression over distribution """
if evaluate:
try:
p = poly(expr, var)
t = Dummy('t', real=True)
mgf = self._moment_generating_function(t)
if mgf is None:
return integrate(expr * self.pdf(var), (var, self.set), **kwargs)
deg = p.degree()
taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t)
result = 0
for k in range(deg+1):
result += p.coeff_monomial(var ** k) * taylor.coeff_monomial(t ** k) * factorial(k)
return result
except PolynomialError:
return integrate(expr * self.pdf(var), (var, self.set), **kwargs)
else:
return Integral(expr * self.pdf(var), (var, self.set), **kwargs)
@cacheit
def compute_quantile(self, **kwargs):
""" Compute the Quantile from the PDF
Returns a Lambda
"""
x, p = symbols('x, p', real=True, cls=Dummy)
left_bound = self.set.start
pdf = self.pdf(x)
cdf = integrate(pdf, (x, left_bound, x), **kwargs)
quantile = solveset(cdf - p, x, self.set)
return Lambda(p, Piecewise((quantile, (p >= 0) & (p <= 1) ), (nan, True)))
def _quantile(self, x):
return None
def quantile(self, x, **kwargs):
""" Cumulative density function """
if len(kwargs) == 0:
quantile = self._quantile(x)
if quantile is not None:
return quantile
return self.compute_quantile(**kwargs)(x)
class ContinuousPSpace(PSpace):
""" Continuous Probability Space
Represents the likelihood of an event space defined over a continuum.
Represented with a ContinuousDomain and a PDF (Lambda-Like)
"""
is_Continuous = True
is_real = True
@property
def pdf(self):
return self.density(*self.domain.symbols)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
if rvs is None:
rvs = self.values
else:
rvs = frozenset(rvs)
expr = expr.xreplace({rv: rv.symbol for rv in rvs})
domain_symbols = frozenset(rv.symbol for rv in rvs)
return self.domain.compute_expectation(self.pdf * expr,
domain_symbols, **kwargs)
def compute_density(self, expr, **kwargs):
# Common case Density(X) where X in self.values
if expr in self.values:
# Marginalize all other random symbols out of the density
randomsymbols = tuple(set(self.values) - frozenset([expr]))
symbols = tuple(rs.symbol for rs in randomsymbols)
pdf = self.domain.compute_expectation(self.pdf, symbols, **kwargs)
return Lambda(expr.symbol, pdf)
z = Dummy('z', real=True)
return Lambda(z, self.compute_expectation(DiracDelta(expr - z), **kwargs))
@cacheit
def compute_cdf(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise ValueError(
"CDF not well defined on multivariate expressions")
d = self.compute_density(expr, **kwargs)
x, z = symbols('x, z', real=True, cls=Dummy)
left_bound = self.domain.set.start
# CDF is integral of PDF from left bound to z
cdf = integrate(d(x), (x, left_bound, z), **kwargs)
# CDF Ensure that CDF left of left_bound is zero
cdf = Piecewise((cdf, z >= left_bound), (0, True))
return Lambda(z, cdf)
@cacheit
def compute_characteristic_function(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise NotImplementedError("Characteristic function of multivariate expressions not implemented")
d = self.compute_density(expr, **kwargs)
x, t = symbols('x, t', real=True, cls=Dummy)
cf = integrate(exp(I*t*x)*d(x), (x, -oo, oo), **kwargs)
return Lambda(t, cf)
@cacheit
def compute_moment_generating_function(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise NotImplementedError("Moment generating function of multivariate expressions not implemented")
d = self.compute_density(expr, **kwargs)
x, t = symbols('x, t', real=True, cls=Dummy)
mgf = integrate(exp(t * x) * d(x), (x, -oo, oo), **kwargs)
return Lambda(t, mgf)
@cacheit
def compute_quantile(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise ValueError(
"Quantile not well defined on multivariate expressions")
d = self.compute_cdf(expr, **kwargs)
x = Dummy('x', real=True)
p = Dummy('p', positive=True)
quantile = solveset(d(x) - p, x, self.set)
return Lambda(p, quantile)
def probability(self, condition, **kwargs):
z = Dummy('z', real=True)
cond_inv = False
if isinstance(condition, Ne):
condition = Eq(condition.args[0], condition.args[1])
cond_inv = True
# Univariate case can be handled by where
try:
domain = self.where(condition)
rv = [rv for rv in self.values if rv.symbol == domain.symbol][0]
# Integrate out all other random variables
pdf = self.compute_density(rv, **kwargs)
# return S.Zero if `domain` is empty set
if domain.set is S.EmptySet or isinstance(domain.set, FiniteSet):
return S.Zero if not cond_inv else S.One
if isinstance(domain.set, Union):
return sum(
Integral(pdf(z), (z, subset), **kwargs) for subset in
domain.set.args if isinstance(subset, Interval))
# Integrate out the last variable over the special domain
return Integral(pdf(z), (z, domain.set), **kwargs)
# Other cases can be turned into univariate case
# by computing a density handled by density computation
except NotImplementedError:
from sympy.stats.rv import density
expr = condition.lhs - condition.rhs
if not is_random(expr):
dens = self.density
comp = condition.rhs
else:
dens = density(expr, **kwargs)
comp = 0
if not isinstance(dens, ContinuousDistribution):
from sympy.stats.crv_types import ContinuousDistributionHandmade
dens = ContinuousDistributionHandmade(dens, set=self.domain.set)
# Turn problem into univariate case
space = SingleContinuousPSpace(z, dens)
result = space.probability(condition.__class__(space.value, comp))
return result if not cond_inv else S.One - result
def where(self, condition):
rvs = frozenset(random_symbols(condition))
if not (len(rvs) == 1 and rvs.issubset(self.values)):
raise NotImplementedError(
"Multiple continuous random variables not supported")
rv = tuple(rvs)[0]
interval = reduce_rational_inequalities_wrap(condition, rv)
interval = interval.intersect(self.domain.set)
return SingleContinuousDomain(rv.symbol, interval)
def conditional_space(self, condition, normalize=True, **kwargs):
condition = condition.xreplace({rv: rv.symbol for rv in self.values})
domain = ConditionalContinuousDomain(self.domain, condition)
if normalize:
# create a clone of the variable to
# make sure that variables in nested integrals are different
# from the variables outside the integral
# this makes sure that they are evaluated separately
# and in the correct order
replacement = {rv: Dummy(str(rv)) for rv in self.symbols}
norm = domain.compute_expectation(self.pdf, **kwargs)
pdf = self.pdf / norm.xreplace(replacement)
# XXX: Converting set to tuple. The order matters to Lambda though
# so we shouldn't be starting with a set here...
density = Lambda(tuple(domain.symbols), pdf)
return ContinuousPSpace(domain, density)
class SingleContinuousPSpace(ContinuousPSpace, SinglePSpace):
"""
A continuous probability space over a single univariate variable
These consist of a Symbol and a SingleContinuousDistribution
This class is normally accessed through the various random variable
functions, Normal, Exponential, Uniform, etc....
"""
@property
def set(self):
return self.distribution.set
@property
def domain(self):
return SingleContinuousDomain(sympify(self.symbol), self.set)
def sample(self, size=(), library='scipy'):
"""
Internal sample method
Returns dictionary mapping RandomSymbol to realization value.
"""
return {self.value: self.distribution.sample(size, library=library)}
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
rvs = rvs or (self.value,)
if self.value not in rvs:
return expr
expr = _sympify(expr)
expr = expr.xreplace({rv: rv.symbol for rv in rvs})
x = self.value.symbol
try:
return self.distribution.expectation(expr, x, evaluate=evaluate, **kwargs)
except PoleError:
return Integral(expr * self.pdf, (x, self.set), **kwargs)
def compute_cdf(self, expr, **kwargs):
if expr == self.value:
z = Dummy("z", real=True)
return Lambda(z, self.distribution.cdf(z, **kwargs))
else:
return ContinuousPSpace.compute_cdf(self, expr, **kwargs)
def compute_characteristic_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.characteristic_function(t, **kwargs))
else:
return ContinuousPSpace.compute_characteristic_function(self, expr, **kwargs)
def compute_moment_generating_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.moment_generating_function(t, **kwargs))
else:
return ContinuousPSpace.compute_moment_generating_function(self, expr, **kwargs)
def compute_density(self, expr, **kwargs):
# https://en.wikipedia.org/wiki/Random_variable#Functions_of_random_variables
if expr == self.value:
return self.density
y = Dummy('y', real=True)
gs = solveset(expr - y, self.value, S.Reals)
if isinstance(gs, Intersection) and S.Reals in gs.args:
gs = list(gs.args[1])
if not gs:
raise ValueError("Can not solve %s for %s"%(expr, self.value))
fx = self.compute_density(self.value)
fy = sum(fx(g) * abs(g.diff(y)) for g in gs)
return Lambda(y, fy)
def compute_quantile(self, expr, **kwargs):
if expr == self.value:
p = Dummy("p", real=True)
return Lambda(p, self.distribution.quantile(p, **kwargs))
else:
return ContinuousPSpace.compute_quantile(self, expr, **kwargs)
def _reduce_inequalities(conditions, var, **kwargs):
try:
return reduce_rational_inequalities(conditions, var, **kwargs)
except PolynomialError:
raise ValueError("Reduction of condition failed %s\n" % conditions[0])
def reduce_rational_inequalities_wrap(condition, var):
if condition.is_Relational:
return _reduce_inequalities([[condition]], var, relational=False)
if isinstance(condition, Or):
return Union(*[_reduce_inequalities([[arg]], var, relational=False)
for arg in condition.args])
if isinstance(condition, And):
intervals = [_reduce_inequalities([[arg]], var, relational=False)
for arg in condition.args]
I = intervals[0]
for i in intervals:
I = I.intersect(i)
return I
|
83472ee206ac646412cae109c880a39296e5fb4293fea8195be98dfdbbb14919 | from sympy import Basic, Sum, Dummy, Lambda, Integral
from sympy.stats.rv import (NamedArgsMixin, random_symbols, _symbol_converter,
PSpace, RandomSymbol, is_random)
from sympy.stats.crv import ContinuousDistribution, SingleContinuousPSpace
from sympy.stats.drv import DiscreteDistribution, SingleDiscretePSpace
from sympy.stats.frv import SingleFiniteDistribution, SingleFinitePSpace
from sympy.stats.crv_types import ContinuousDistributionHandmade
from sympy.stats.drv_types import DiscreteDistributionHandmade
from sympy.stats.frv_types import FiniteDistributionHandmade
class CompoundPSpace(PSpace):
"""
A temporary Probability Space for the Compound Distribution. After
Marginalization, this returns the corresponding Probability Space of the
parent distribution.
"""
def __new__(cls, s, distribution):
s = _symbol_converter(s)
if isinstance(distribution, ContinuousDistribution):
return SingleContinuousPSpace(s, distribution)
if isinstance(distribution, DiscreteDistribution):
return SingleDiscretePSpace(s, distribution)
if isinstance(distribution, SingleFiniteDistribution):
return SingleFinitePSpace(s, distribution)
if not isinstance(distribution, CompoundDistribution):
raise ValueError("%s should be an isinstance of "
"CompoundDistribution"%(distribution))
return Basic.__new__(cls, s, distribution)
@property
def value(self):
return RandomSymbol(self.symbol, self)
@property
def symbol(self):
return self.args[0]
@property
def is_Continuous(self):
return self.distribution.is_Continuous
@property
def is_Finite(self):
return self.distribution.is_Finite
@property
def is_Discrete(self):
return self.distribution.is_Discrete
@property
def distribution(self):
return self.args[1]
@property
def pdf(self):
return self.distribution.pdf(self.symbol)
@property
def set(self):
return self.distribution.set
@property
def domain(self):
return self._get_newpspace().domain
def _get_newpspace(self, evaluate=False):
x = Dummy('x')
parent_dist = self.distribution.args[0]
func = Lambda(x, self.distribution.pdf(x, evaluate))
new_pspace = self._transform_pspace(self.symbol, parent_dist, func)
if new_pspace is not None:
return new_pspace
message = ("Compound Distribution for %s is not implemeted yet" % str(parent_dist))
raise NotImplementedError(message)
def _transform_pspace(self, sym, dist, pdf):
"""
This function returns the new pspace of the distribution using handmade
Distributions and their corresponding pspace.
"""
pdf = Lambda(sym, pdf(sym))
_set = dist.set
if isinstance(dist, ContinuousDistribution):
return SingleContinuousPSpace(sym, ContinuousDistributionHandmade(pdf, _set))
elif isinstance(dist, DiscreteDistribution):
return SingleDiscretePSpace(sym, DiscreteDistributionHandmade(pdf, _set))
elif isinstance(dist, SingleFiniteDistribution):
dens = {k: pdf(k) for k in _set}
return SingleFinitePSpace(sym, FiniteDistributionHandmade(dens))
def compute_density(self, expr, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
expr = expr.subs({self.value: new_pspace.value})
return new_pspace.compute_density(expr, **kwargs)
def compute_cdf(self, expr, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
expr = expr.subs({self.value: new_pspace.value})
return new_pspace.compute_cdf(expr, **kwargs)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
new_pspace = self._get_newpspace(evaluate)
expr = expr.subs({self.value: new_pspace.value})
if rvs:
rvs = rvs.subs({self.value: new_pspace.value})
if isinstance(new_pspace, SingleFinitePSpace):
return new_pspace.compute_expectation(expr, rvs, **kwargs)
return new_pspace.compute_expectation(expr, rvs, evaluate, **kwargs)
def probability(self, condition, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
condition = condition.subs({self.value: new_pspace.value})
return new_pspace.probability(condition)
def conditional_space(self, condition, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
condition = condition.subs({self.value: new_pspace.value})
return new_pspace.conditional_space(condition)
class CompoundDistribution(Basic, NamedArgsMixin):
"""
Class for Compound Distributions.
Parameters
==========
dist : Distribution
Distribution must contain a random parameter
Examples
========
>>> from sympy.stats.compound_rv import CompoundDistribution
>>> from sympy.stats.crv_types import NormalDistribution
>>> from sympy.stats import Normal
>>> from sympy.abc import x
>>> X = Normal('X', 2, 4)
>>> N = NormalDistribution(X, 4)
>>> C = CompoundDistribution(N)
>>> C.set
Interval(-oo, oo)
>>> C.pdf(x, evaluate=True).simplify()
exp(-x**2/64 + x/16 - 1/16)/(8*sqrt(pi))
References
==========
.. [1] https://en.wikipedia.org/wiki/Compound_probability_distribution
"""
def __new__(cls, dist):
if not isinstance(dist, (ContinuousDistribution,
SingleFiniteDistribution, DiscreteDistribution)):
message = "Compound Distribution for %s is not implemeted yet" % str(dist)
raise NotImplementedError(message)
if not cls._compound_check(dist):
return dist
return Basic.__new__(cls, dist)
@property
def set(self):
return self.args[0].set
@property
def is_Continuous(self):
return isinstance(self.args[0], ContinuousDistribution)
@property
def is_Finite(self):
return isinstance(self.args[0], SingleFiniteDistribution)
@property
def is_Discrete(self):
return isinstance(self.args[0], DiscreteDistribution)
def pdf(self, x, evaluate=False):
dist = self.args[0]
randoms = [rv for rv in dist.args if is_random(rv)]
if isinstance(dist, SingleFiniteDistribution):
y = Dummy('y', integer=True, negative=False)
expr = dist.pmf(y)
else:
y = Dummy('y')
expr = dist.pdf(y)
for rv in randoms:
expr = self._marginalise(expr, rv, evaluate)
return Lambda(y, expr)(x)
def _marginalise(self, expr, rv, evaluate):
if isinstance(rv.pspace.distribution, SingleFiniteDistribution):
rv_dens = rv.pspace.distribution.pmf(rv)
else:
rv_dens = rv.pspace.distribution.pdf(rv)
rv_dom = rv.pspace.domain.set
if rv.pspace.is_Discrete or rv.pspace.is_Finite:
expr = Sum(expr*rv_dens, (rv, rv_dom._inf,
rv_dom._sup))
else:
expr = Integral(expr*rv_dens, (rv, rv_dom._inf,
rv_dom._sup))
if evaluate:
return expr.doit()
return expr
@classmethod
def _compound_check(self, dist):
"""
Checks if the given distribution contains random parameters.
"""
randoms = []
for arg in dist.args:
randoms.extend(random_symbols(arg))
if len(randoms) == 0:
return False
return True
|
8c20011283a91f32ae0eedbc864de362e825d8582d0fe6667c951b9bcb9ef67c | import itertools
from sympy import (Expr, Add, Mul, S, Integral, Eq, Sum, Symbol,
expand as _expand, Not)
from sympy.core.compatibility import default_sort_key
from sympy.core.parameters import global_parameters
from sympy.core.sympify import _sympify
from sympy.core.relational import Relational
from sympy.logic.boolalg import Boolean
from sympy.stats import variance, covariance
from sympy.stats.rv import (RandomSymbol, pspace, dependent,
given, sampling_E, RandomIndexedSymbol, is_random,
PSpace, sampling_P, random_symbols)
__all__ = ['Probability', 'Expectation', 'Variance', 'Covariance']
@is_random.register(Expr)
def _(x):
atoms = x.free_symbols
if len(atoms) == 1 and next(iter(atoms)) == x:
return False
return any([is_random(i) for i in atoms])
@is_random.register(RandomSymbol) # type: ignore
def _(x):
return True
class Probability(Expr):
"""
Symbolic expression for the probability.
Examples
========
>>> from sympy.stats import Probability, Normal
>>> from sympy import Integral
>>> X = Normal("X", 0, 1)
>>> prob = Probability(X > 1)
>>> prob
Probability(X > 1)
Integral representation:
>>> prob.rewrite(Integral)
Integral(sqrt(2)*exp(-_z**2/2)/(2*sqrt(pi)), (_z, 1, oo))
Evaluation of the integral:
>>> prob.evaluate_integral()
sqrt(2)*(-sqrt(2)*sqrt(pi)*erf(sqrt(2)/2) + sqrt(2)*sqrt(pi))/(4*sqrt(pi))
"""
def __new__(cls, prob, condition=None, **kwargs):
prob = _sympify(prob)
if condition is None:
obj = Expr.__new__(cls, prob)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, prob, condition)
obj._condition = condition
return obj
def doit(self, **hints):
condition = self.args[0]
given_condition = self._condition
numsamples = hints.get('numsamples', False)
for_rewrite = not hints.get('for_rewrite', False)
if isinstance(condition, Not):
return S.One - self.func(condition.args[0], given_condition,
evaluate=for_rewrite).doit(**hints)
if condition.has(RandomIndexedSymbol):
return pspace(condition).probability(condition, given_condition,
evaluate=for_rewrite)
if isinstance(given_condition, RandomSymbol):
condrv = random_symbols(condition)
if len(condrv) == 1 and condrv[0] == given_condition:
from sympy.stats.frv_types import BernoulliDistribution
return BernoulliDistribution(self.func(condition).doit(**hints), 0, 1)
if any([dependent(rv, given_condition) for rv in condrv]):
return Probability(condition, given_condition)
else:
return Probability(condition).doit()
if given_condition is not None and \
not isinstance(given_condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (given_condition))
if given_condition == False or condition is S.false:
return S.Zero
if not isinstance(condition, (Relational, Boolean)):
raise ValueError("%s is not a relational or combination of relationals"
% (condition))
if condition is S.true:
return S.One
if numsamples:
return sampling_P(condition, given_condition, numsamples=numsamples)
if given_condition is not None: # If there is a condition
# Recompute on new conditional expr
return Probability(given(condition, given_condition)).doit()
# Otherwise pass work off to the ProbabilitySpace
if pspace(condition) == PSpace():
return Probability(condition, given_condition)
result = pspace(condition).probability(condition)
if hasattr(result, 'doit') and for_rewrite:
return result.doit()
else:
return result
def _eval_rewrite_as_Integral(self, arg, condition=None, **kwargs):
return self.func(arg, condition=condition).doit(for_rewrite=True)
_eval_rewrite_as_Sum = _eval_rewrite_as_Integral
def evaluate_integral(self):
return self.rewrite(Integral).doit()
class Expectation(Expr):
"""
Symbolic expression for the expectation.
Examples
========
>>> from sympy.stats import Expectation, Normal, Probability, Poisson
>>> from sympy import symbols, Integral, Sum
>>> mu = symbols("mu")
>>> sigma = symbols("sigma", positive=True)
>>> X = Normal("X", mu, sigma)
>>> Expectation(X)
Expectation(X)
>>> Expectation(X).evaluate_integral().simplify()
mu
To get the integral expression of the expectation:
>>> Expectation(X).rewrite(Integral)
Integral(sqrt(2)*X*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo))
The same integral expression, in more abstract terms:
>>> Expectation(X).rewrite(Probability)
Integral(x*Probability(Eq(X, x)), (x, -oo, oo))
To get the Summation expression of the expectation for discrete random variables:
>>> lamda = symbols('lamda', positive=True)
>>> Z = Poisson('Z', lamda)
>>> Expectation(Z).rewrite(Sum)
Sum(Z*lamda**Z*exp(-lamda)/factorial(Z), (Z, 0, oo))
This class is aware of some properties of the expectation:
>>> from sympy.abc import a
>>> Expectation(a*X)
Expectation(a*X)
>>> Y = Normal("Y", 1, 2)
>>> Expectation(X + Y)
Expectation(X + Y)
To expand the ``Expectation`` into its expression, use ``expand()``:
>>> Expectation(X + Y).expand()
Expectation(X) + Expectation(Y)
>>> Expectation(a*X + Y).expand()
a*Expectation(X) + Expectation(Y)
>>> Expectation(a*X + Y)
Expectation(a*X + Y)
>>> Expectation((X + Y)*(X - Y)).expand()
Expectation(X**2) - Expectation(Y**2)
To evaluate the ``Expectation``, use ``doit()``:
>>> Expectation(X + Y).doit()
mu + 1
>>> Expectation(X + Expectation(Y + Expectation(2*X))).doit()
3*mu + 1
To prevent evaluating nested ``Expectation``, use ``doit(deep=False)``
>>> Expectation(X + Expectation(Y)).doit(deep=False)
mu + Expectation(Expectation(Y))
>>> Expectation(X + Expectation(Y + Expectation(2*X))).doit(deep=False)
mu + Expectation(Expectation(Y + Expectation(2*X)))
"""
def __new__(cls, expr, condition=None, **kwargs):
expr = _sympify(expr)
if expr.is_Matrix:
from sympy.stats.symbolic_multivariate_probability import ExpectationMatrix
return ExpectationMatrix(expr, condition)
if condition is None:
if not is_random(expr):
return expr
obj = Expr.__new__(cls, expr)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, expr, condition)
obj._condition = condition
return obj
def expand(self, **hints):
expr = self.args[0]
condition = self._condition
if not is_random(expr):
return expr
if isinstance(expr, Add):
return Add.fromiter(Expectation(a, condition=condition).expand()
for a in expr.args)
expand_expr = _expand(expr)
if isinstance(expand_expr, Add):
return Add.fromiter(Expectation(a, condition=condition).expand()
for a in expand_expr.args)
elif isinstance(expr, Mul):
rv = []
nonrv = []
for a in expr.args:
if is_random(a):
rv.append(a)
else:
nonrv.append(a)
return Mul.fromiter(nonrv)*Expectation(Mul.fromiter(rv), condition=condition)
return self
def doit(self, **hints):
deep = hints.get('deep', True)
condition = self._condition
expr = self.args[0]
numsamples = hints.get('numsamples', False)
for_rewrite = not hints.get('for_rewrite', False)
if deep:
expr = expr.doit(**hints)
if not is_random(expr) or isinstance(expr, Expectation): # expr isn't random?
return expr
if numsamples: # Computing by monte carlo sampling?
evalf = hints.get('evalf', True)
return sampling_E(expr, condition, numsamples=numsamples, evalf=evalf)
if expr.has(RandomIndexedSymbol):
return pspace(expr).compute_expectation(expr, condition)
# Create new expr and recompute E
if condition is not None: # If there is a condition
return self.func(given(expr, condition)).doit(**hints)
# A few known statements for efficiency
if expr.is_Add: # We know that E is Linear
return Add(*[self.func(arg, condition).doit(**hints)
if not isinstance(arg, Expectation) else self.func(arg, condition)
for arg in expr.args])
if expr.is_Mul:
if expr.atoms(Expectation):
return expr
if pspace(expr) == PSpace():
return self.func(expr)
# Otherwise case is simple, pass work off to the ProbabilitySpace
result = pspace(expr).compute_expectation(expr, evaluate=for_rewrite)
if hasattr(result, 'doit') and for_rewrite:
return result.doit(**hints)
else:
return result
def _eval_rewrite_as_Probability(self, arg, condition=None, **kwargs):
rvs = arg.atoms(RandomSymbol)
if len(rvs) > 1:
raise NotImplementedError()
if len(rvs) == 0:
return arg
rv = rvs.pop()
if rv.pspace is None:
raise ValueError("Probability space not known")
symbol = rv.symbol
if symbol.name[0].isupper():
symbol = Symbol(symbol.name.lower())
else :
symbol = Symbol(symbol.name + "_1")
if rv.pspace.is_Continuous:
return Integral(arg.replace(rv, symbol)*Probability(Eq(rv, symbol), condition), (symbol, rv.pspace.domain.set.inf, rv.pspace.domain.set.sup))
else:
if rv.pspace.is_Finite:
raise NotImplementedError
else:
return Sum(arg.replace(rv, symbol)*Probability(Eq(rv, symbol), condition), (symbol, rv.pspace.domain.set.inf, rv.pspace.set.sup))
def _eval_rewrite_as_Integral(self, arg, condition=None, **kwargs):
return self.func(arg, condition=condition).doit(deep=False, for_rewrite=True)
_eval_rewrite_as_Sum = _eval_rewrite_as_Integral # For discrete this will be Sum
def evaluate_integral(self):
return self.rewrite(Integral).doit()
evaluate_sum = evaluate_integral
class Variance(Expr):
"""
Symbolic expression for the variance.
Examples
========
>>> from sympy import symbols, Integral
>>> from sympy.stats import Normal, Expectation, Variance, Probability
>>> mu = symbols("mu", positive=True)
>>> sigma = symbols("sigma", positive=True)
>>> X = Normal("X", mu, sigma)
>>> Variance(X)
Variance(X)
>>> Variance(X).evaluate_integral()
sigma**2
Integral representation of the underlying calculations:
>>> Variance(X).rewrite(Integral)
Integral(sqrt(2)*(X - Integral(sqrt(2)*X*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo)))**2*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo))
Integral representation, without expanding the PDF:
>>> Variance(X).rewrite(Probability)
-Integral(x*Probability(Eq(X, x)), (x, -oo, oo))**2 + Integral(x**2*Probability(Eq(X, x)), (x, -oo, oo))
Rewrite the variance in terms of the expectation
>>> Variance(X).rewrite(Expectation)
-Expectation(X)**2 + Expectation(X**2)
Some transformations based on the properties of the variance may happen:
>>> from sympy.abc import a
>>> Y = Normal("Y", 0, 1)
>>> Variance(a*X)
Variance(a*X)
To expand the variance in its expression, use ``expand()``:
>>> Variance(a*X).expand()
a**2*Variance(X)
>>> Variance(X + Y)
Variance(X + Y)
>>> Variance(X + Y).expand()
2*Covariance(X, Y) + Variance(X) + Variance(Y)
"""
def __new__(cls, arg, condition=None, **kwargs):
arg = _sympify(arg)
if arg.is_Matrix:
from sympy.stats.symbolic_multivariate_probability import VarianceMatrix
return VarianceMatrix(arg, condition)
if condition is None:
obj = Expr.__new__(cls, arg)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, arg, condition)
obj._condition = condition
return obj
def expand(self, **hints):
arg = self.args[0]
condition = self._condition
if not is_random(arg):
return S.Zero
if isinstance(arg, RandomSymbol):
return self
elif isinstance(arg, Add):
rv = []
for a in arg.args:
if is_random(a):
rv.append(a)
variances = Add(*map(lambda xv: Variance(xv, condition).expand(), rv))
map_to_covar = lambda x: 2*Covariance(*x, condition=condition).expand()
covariances = Add(*map(map_to_covar, itertools.combinations(rv, 2)))
return variances + covariances
elif isinstance(arg, Mul):
nonrv = []
rv = []
for a in arg.args:
if is_random(a):
rv.append(a)
else:
nonrv.append(a**2)
if len(rv) == 0:
return S.Zero
return Mul.fromiter(nonrv)*Variance(Mul.fromiter(rv), condition)
# this expression contains a RandomSymbol somehow:
return self
def _eval_rewrite_as_Expectation(self, arg, condition=None, **kwargs):
e1 = Expectation(arg**2, condition)
e2 = Expectation(arg, condition)**2
return e1 - e2
def _eval_rewrite_as_Probability(self, arg, condition=None, **kwargs):
return self.rewrite(Expectation).rewrite(Probability)
def _eval_rewrite_as_Integral(self, arg, condition=None, **kwargs):
return variance(self.args[0], self._condition, evaluate=False)
_eval_rewrite_as_Sum = _eval_rewrite_as_Integral
def evaluate_integral(self):
return self.rewrite(Integral).doit()
class Covariance(Expr):
"""
Symbolic expression for the covariance.
Examples
========
>>> from sympy.stats import Covariance
>>> from sympy.stats import Normal
>>> X = Normal("X", 3, 2)
>>> Y = Normal("Y", 0, 1)
>>> Z = Normal("Z", 0, 1)
>>> W = Normal("W", 0, 1)
>>> cexpr = Covariance(X, Y)
>>> cexpr
Covariance(X, Y)
Evaluate the covariance, `X` and `Y` are independent,
therefore zero is the result:
>>> cexpr.evaluate_integral()
0
Rewrite the covariance expression in terms of expectations:
>>> from sympy.stats import Expectation
>>> cexpr.rewrite(Expectation)
Expectation(X*Y) - Expectation(X)*Expectation(Y)
In order to expand the argument, use ``expand()``:
>>> from sympy.abc import a, b, c, d
>>> Covariance(a*X + b*Y, c*Z + d*W)
Covariance(a*X + b*Y, c*Z + d*W)
>>> Covariance(a*X + b*Y, c*Z + d*W).expand()
a*c*Covariance(X, Z) + a*d*Covariance(W, X) + b*c*Covariance(Y, Z) + b*d*Covariance(W, Y)
This class is aware of some properties of the covariance:
>>> Covariance(X, X).expand()
Variance(X)
>>> Covariance(a*X, b*Y).expand()
a*b*Covariance(X, Y)
"""
def __new__(cls, arg1, arg2, condition=None, **kwargs):
arg1 = _sympify(arg1)
arg2 = _sympify(arg2)
if arg1.is_Matrix or arg2.is_Matrix:
from sympy.stats.symbolic_multivariate_probability import CrossCovarianceMatrix
return CrossCovarianceMatrix(arg1, arg2, condition)
if kwargs.pop('evaluate', global_parameters.evaluate):
arg1, arg2 = sorted([arg1, arg2], key=default_sort_key)
if condition is None:
obj = Expr.__new__(cls, arg1, arg2)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, arg1, arg2, condition)
obj._condition = condition
return obj
def expand(self, **hints):
arg1 = self.args[0]
arg2 = self.args[1]
condition = self._condition
if arg1 == arg2:
return Variance(arg1, condition).expand()
if not is_random(arg1):
return S.Zero
if not is_random(arg2):
return S.Zero
arg1, arg2 = sorted([arg1, arg2], key=default_sort_key)
if isinstance(arg1, RandomSymbol) and isinstance(arg2, RandomSymbol):
return Covariance(arg1, arg2, condition)
coeff_rv_list1 = self._expand_single_argument(arg1.expand())
coeff_rv_list2 = self._expand_single_argument(arg2.expand())
addends = [a*b*Covariance(*sorted([r1, r2], key=default_sort_key), condition=condition)
for (a, r1) in coeff_rv_list1 for (b, r2) in coeff_rv_list2]
return Add.fromiter(addends)
@classmethod
def _expand_single_argument(cls, expr):
# return (coefficient, random_symbol) pairs:
if isinstance(expr, RandomSymbol):
return [(S.One, expr)]
elif isinstance(expr, Add):
outval = []
for a in expr.args:
if isinstance(a, Mul):
outval.append(cls._get_mul_nonrv_rv_tuple(a))
elif is_random(a):
outval.append((S.One, a))
return outval
elif isinstance(expr, Mul):
return [cls._get_mul_nonrv_rv_tuple(expr)]
elif is_random(expr):
return [(S.One, expr)]
@classmethod
def _get_mul_nonrv_rv_tuple(cls, m):
rv = []
nonrv = []
for a in m.args:
if is_random(a):
rv.append(a)
else:
nonrv.append(a)
return (Mul.fromiter(nonrv), Mul.fromiter(rv))
def _eval_rewrite_as_Expectation(self, arg1, arg2, condition=None, **kwargs):
e1 = Expectation(arg1*arg2, condition)
e2 = Expectation(arg1, condition)*Expectation(arg2, condition)
return e1 - e2
def _eval_rewrite_as_Probability(self, arg1, arg2, condition=None, **kwargs):
return self.rewrite(Expectation).rewrite(Probability)
def _eval_rewrite_as_Integral(self, arg1, arg2, condition=None, **kwargs):
return covariance(self.args[0], self.args[1], self._condition, evaluate=False)
_eval_rewrite_as_Sum = _eval_rewrite_as_Integral
def evaluate_integral(self):
return self.rewrite(Integral).doit()
class Moment(Expr):
"""
Symbolic class for Moment
Examples
========
>>> from sympy import Symbol, Integral
>>> from sympy.stats import Normal, Expectation, Probability, Moment
>>> mu = Symbol('mu', real=True)
>>> sigma = Symbol('sigma', real=True, positive=True)
>>> X = Normal('X', mu, sigma)
>>> M = Moment(X, 3, 1)
To evaluate the result of Moment use `doit`:
>>> M.doit()
mu**3 - 3*mu**2 + 3*mu*sigma**2 + 3*mu - 3*sigma**2 - 1
Rewrite the Moment expression in terms of Expectation:
>>> M.rewrite(Expectation)
Expectation((X - 1)**3)
Rewrite the Moment expression in terms of Probability:
>>> M.rewrite(Probability)
Integral((x - 1)**3*Probability(Eq(X, x)), (x, -oo, oo))
Rewrite the Moment expression in terms of Integral:
>>> M.rewrite(Integral)
Integral(sqrt(2)*(X - 1)**3*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo))
"""
def __new__(cls, X, n, c=0, condition=None, **kwargs):
X = _sympify(X)
n = _sympify(n)
c = _sympify(c)
if condition is not None:
condition = _sympify(condition)
return Expr.__new__(cls, X, n, c, condition)
def doit(self, **hints):
if not is_random(self.args[0]):
return self.args[0]
return self.rewrite(Expectation).doit(**hints)
def _eval_rewrite_as_Expectation(self, X, n, c=0, condition=None, **kwargs):
return Expectation((X - c)**n, condition)
def _eval_rewrite_as_Probability(self, X, n, c=0, condition=None, **kwargs):
return self.rewrite(Expectation).rewrite(Probability)
def _eval_rewrite_as_Integral(self, X, n, c=0, condition=None, **kwargs):
return self.rewrite(Expectation).rewrite(Integral)
class CentralMoment(Expr):
"""
Symbolic class Central Moment
Examples
========
>>> from sympy import Symbol, Integral
>>> from sympy.stats import Normal, Expectation, Probability, CentralMoment
>>> mu = Symbol('mu', real=True)
>>> sigma = Symbol('sigma', real=True, positive=True)
>>> X = Normal('X', mu, sigma)
>>> CM = CentralMoment(X, 4)
To evaluate the result of CentralMoment use `doit`:
>>> CM.doit().simplify()
3*sigma**4
Rewrite the CentralMoment expression in terms of Expectation:
>>> CM.rewrite(Expectation)
Expectation((X - Expectation(X))**4)
Rewrite the CentralMoment expression in terms of Probability:
>>> CM.rewrite(Probability)
Integral((x - Integral(x*Probability(True), (x, -oo, oo)))**4*Probability(Eq(X, x)), (x, -oo, oo))
Rewrite the CentralMoment expression in terms of Integral:
>>> CM.rewrite(Integral)
Integral(sqrt(2)*(X - Integral(sqrt(2)*X*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo)))**4*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo))
"""
def __new__(cls, X, n, condition=None, **kwargs):
X = _sympify(X)
n = _sympify(n)
if condition is not None:
condition = _sympify(condition)
return Expr.__new__(cls, X, n, condition)
def doit(self, **hints):
if not is_random(self.args[0]):
return self.args[0]
return self.rewrite(Expectation).doit(**hints)
def _eval_rewrite_as_Expectation(self, X, n, condition=None, **kwargs):
mu = Expectation(X, condition, **kwargs)
return Moment(X, n, mu, condition, **kwargs).rewrite(Expectation)
def _eval_rewrite_as_Probability(self, X, n, condition=None, **kwargs):
return self.rewrite(Expectation).rewrite(Probability)
def _eval_rewrite_as_Integral(self, X, n, condition=None, **kwargs):
return self.rewrite(Expectation).rewrite(Integral)
|
c9e18e2774e69ca828c9df209d651ec4aff2aaabb0eafbc599719921fd59f224 | from sympy import (Basic, exp, pi, Lambda, Trace, S, MatrixSymbol, Integral,
gamma, Product, Dummy, Sum, Abs, IndexedBase, I)
from sympy.core.sympify import _sympify
from sympy.stats.rv import _symbol_converter, Density, RandomMatrixSymbol, is_random
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.random_matrix import RandomMatrixPSpace
from sympy.tensor.array import ArrayComprehension
__all__ = [
'CircularEnsemble',
'CircularUnitaryEnsemble',
'CircularOrthogonalEnsemble',
'CircularSymplecticEnsemble',
'GaussianEnsemble',
'GaussianUnitaryEnsemble',
'GaussianOrthogonalEnsemble',
'GaussianSymplecticEnsemble',
'joint_eigen_distribution',
'JointEigenDistribution',
'level_spacing_distribution'
]
@is_random.register(RandomMatrixSymbol)
def _(x):
return True
class RandomMatrixEnsembleModel(Basic):
"""
Base class for random matrix ensembles.
It acts as an umbrella and contains
the methods common to all the ensembles
defined in sympy.stats.random_matrix_models.
"""
def __new__(cls, sym, dim=None):
sym, dim = _symbol_converter(sym), _sympify(dim)
if dim.is_integer == False:
raise ValueError("Dimension of the random matrices must be "
"integers, received %s instead."%(dim))
return Basic.__new__(cls, sym, dim)
symbol = property(lambda self: self.args[0])
dimension = property(lambda self: self.args[1])
def density(self, expr):
return Density(expr)
def __call__(self, expr):
return self.density(expr)
class GaussianEnsembleModel(RandomMatrixEnsembleModel):
"""
Abstract class for Gaussian ensembles.
Contains the properties common to all the
gaussian ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Random_matrix#Gaussian_ensembles
.. [2] https://arxiv.org/pdf/1712.07903.pdf
"""
def _compute_normalization_constant(self, beta, n):
"""
Helper function for computing normalization
constant for joint probability density of eigen
values of Gaussian ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Selberg_integral#Mehta's_integral
"""
n = S(n)
prod_term = lambda j: gamma(1 + beta*S(j)/2)/gamma(S.One + beta/S(2))
j = Dummy('j', integer=True, positive=True)
term1 = Product(prod_term(j), (j, 1, n)).doit()
term2 = (2/(beta*n))**(beta*n*(n - 1)/4 + n/2)
term3 = (2*pi)**(n/2)
return term1 * term2 * term3
def _compute_joint_eigen_distribution(self, beta):
"""
Helper function for computing the joint
probability distribution of eigen values
of the random matrix.
"""
n = self.dimension
Zbn = self._compute_normalization_constant(beta, n)
l = IndexedBase('l')
i = Dummy('i', integer=True, positive=True)
j = Dummy('j', integer=True, positive=True)
k = Dummy('k', integer=True, positive=True)
term1 = exp((-S(n)/2) * Sum(l[k]**2, (k, 1, n)).doit())
sub_term = Lambda(i, Product(Abs(l[j] - l[i])**beta, (j, i + 1, n)))
term2 = Product(sub_term(i).doit(), (i, 1, n - 1)).doit()
syms = ArrayComprehension(l[k], (k, 1, n)).doit()
return Lambda(tuple(syms), (term1 * term2)/Zbn)
class GaussianUnitaryEnsembleModel(GaussianEnsembleModel):
@property
def normalization_constant(self):
n = self.dimension
return 2**(S(n)/2) * pi**(S(n**2)/2)
def density(self, expr):
n, ZGUE = self.dimension, self.normalization_constant
h_pspace = RandomMatrixPSpace('P', model=self)
H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
return Lambda(H, exp(-S(n)/2 * Trace(H**2))/ZGUE)(expr)
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(2))
def level_spacing_distribution(self):
s = Dummy('s')
f = (32/pi**2)*(s**2)*exp((-4/pi)*s**2)
return Lambda(s, f)
class GaussianOrthogonalEnsembleModel(GaussianEnsembleModel):
@property
def normalization_constant(self):
n = self.dimension
_H = MatrixSymbol('_H', n, n)
return Integral(exp(-S(n)/4 * Trace(_H**2)))
def density(self, expr):
n, ZGOE = self.dimension, self.normalization_constant
h_pspace = RandomMatrixPSpace('P', model=self)
H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
return Lambda(H, exp(-S(n)/4 * Trace(H**2))/ZGOE)(expr)
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S.One)
def level_spacing_distribution(self):
s = Dummy('s')
f = (pi/2)*s*exp((-pi/4)*s**2)
return Lambda(s, f)
class GaussianSymplecticEnsembleModel(GaussianEnsembleModel):
@property
def normalization_constant(self):
n = self.dimension
_H = MatrixSymbol('_H', n, n)
return Integral(exp(-S(n) * Trace(_H**2)))
def density(self, expr):
n, ZGSE = self.dimension, self.normalization_constant
h_pspace = RandomMatrixPSpace('P', model=self)
H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
return Lambda(H, exp(-S(n) * Trace(H**2))/ZGSE)(expr)
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(4))
def level_spacing_distribution(self):
s = Dummy('s')
f = ((S(2)**18)/((S(3)**6)*(pi**3)))*(s**4)*exp((-64/(9*pi))*s**2)
return Lambda(s, f)
def GaussianEnsemble(sym, dim):
sym, dim = _symbol_converter(sym), _sympify(dim)
model = GaussianEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def GaussianUnitaryEnsemble(sym, dim):
"""
Represents Gaussian Unitary Ensembles.
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE, density
>>> from sympy import MatrixSymbol
>>> G = GUE('U', 2)
>>> X = MatrixSymbol('X', 2, 2)
>>> density(G)(X)
exp(-Trace(X**2))/(2*pi**2)
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = GaussianUnitaryEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def GaussianOrthogonalEnsemble(sym, dim):
"""
Represents Gaussian Orthogonal Ensembles.
Examples
========
>>> from sympy.stats import GaussianOrthogonalEnsemble as GOE, density
>>> from sympy import MatrixSymbol
>>> G = GOE('U', 2)
>>> X = MatrixSymbol('X', 2, 2)
>>> density(G)(X)
exp(-Trace(X**2)/2)/Integral(exp(-Trace(_H**2)/2), _H)
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = GaussianOrthogonalEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def GaussianSymplecticEnsemble(sym, dim):
"""
Represents Gaussian Symplectic Ensembles.
Examples
========
>>> from sympy.stats import GaussianSymplecticEnsemble as GSE, density
>>> from sympy import MatrixSymbol
>>> G = GSE('U', 2)
>>> X = MatrixSymbol('X', 2, 2)
>>> density(G)(X)
exp(-2*Trace(X**2))/Integral(exp(-2*Trace(_H**2)), _H)
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = GaussianSymplecticEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
class CircularEnsembleModel(RandomMatrixEnsembleModel):
"""
Abstract class for Circular ensembles.
Contains the properties and methods
common to all the circular ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Circular_ensemble
"""
def density(self, expr):
# TODO : Add support for Lie groups(as extensions of sympy.diffgeom)
# and define measures on them
raise NotImplementedError("Support for Haar measure hasn't been "
"implemented yet, therefore the density of "
"%s cannot be computed."%(self))
def _compute_joint_eigen_distribution(self, beta):
"""
Helper function to compute the joint distribution of phases
of the complex eigen values of matrices belonging to any
circular ensembles.
"""
n = self.dimension
Zbn = ((2*pi)**n)*(gamma(beta*n/2 + 1)/S(gamma(beta/2 + 1))**n)
t = IndexedBase('t')
i, j, k = (Dummy('i', integer=True), Dummy('j', integer=True),
Dummy('k', integer=True))
syms = ArrayComprehension(t[i], (i, 1, n)).doit()
f = Product(Product(Abs(exp(I*t[k]) - exp(I*t[j]))**beta, (j, k + 1, n)).doit(),
(k, 1, n - 1)).doit()
return Lambda(tuple(syms), f/Zbn)
class CircularUnitaryEnsembleModel(CircularEnsembleModel):
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(2))
class CircularOrthogonalEnsembleModel(CircularEnsembleModel):
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S.One)
class CircularSymplecticEnsembleModel(CircularEnsembleModel):
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(4))
def CircularEnsemble(sym, dim):
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def CircularUnitaryEnsemble(sym, dim):
"""
Represents Cicular Unitary Ensembles.
Examples
========
>>> from sympy.stats import CircularUnitaryEnsemble as CUE
>>> from sympy.stats import joint_eigen_distribution
>>> C = CUE('U', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**2, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarUnitaryEnsemble
is not evaluated becuase the exact definition is based on haar measure of
unitary group which is not unique.
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularUnitaryEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def CircularOrthogonalEnsemble(sym, dim):
"""
Represents Cicular Orthogonal Ensembles.
Examples
========
>>> from sympy.stats import CircularOrthogonalEnsemble as COE
>>> from sympy.stats import joint_eigen_distribution
>>> C = COE('O', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k])), (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarOrthogonalEnsemble
is not evaluated becuase the exact definition is based on haar measure of
unitary group which is not unique.
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularOrthogonalEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def CircularSymplecticEnsemble(sym, dim):
"""
Represents Cicular Symplectic Ensembles.
Examples
========
>>> from sympy.stats import CircularSymplecticEnsemble as CSE
>>> from sympy.stats import joint_eigen_distribution
>>> C = CSE('S', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarSymplecticEnsemble
is not evaluated becuase the exact definition is based on haar measure of
unitary group which is not unique.
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularSymplecticEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def joint_eigen_distribution(mat):
"""
For obtaining joint probability distribution
of eigen values of random matrix.
Parameters
==========
mat: RandomMatrixSymbol
The matrix symbol whose eigen values are to be considered.
Returns
=======
Lambda
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE
>>> from sympy.stats import joint_eigen_distribution
>>> U = GUE('U', 2)
>>> joint_eigen_distribution(U)
Lambda((l[1], l[2]), exp(-l[1]**2 - l[2]**2)*Product(Abs(l[_i] - l[_j])**2, (_j, _i + 1, 2), (_i, 1, 1))/pi)
"""
if not isinstance(mat, RandomMatrixSymbol):
raise ValueError("%s is not of type, RandomMatrixSymbol."%(mat))
return mat.pspace.model.joint_eigen_distribution()
def JointEigenDistribution(mat):
"""
Creates joint distribution of eigen values of matrices with random
expressions.
Parameters
==========
mat: Matrix
The matrix under consideration
Returns
=======
JointDistributionHandmade
Examples
========
>>> from sympy.stats import Normal, JointEigenDistribution
>>> from sympy import Matrix
>>> A = [[Normal('A00', 0, 1), Normal('A01', 0, 1)],
... [Normal('A10', 0, 1), Normal('A11', 0, 1)]]
>>> JointEigenDistribution(Matrix(A))
JointDistributionHandmade(-sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2
+ A00/2 + A11/2, sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2 + A00/2 + A11/2)
"""
eigenvals = mat.eigenvals(multiple=True)
if any(not is_random(eigenval) for eigenval in set(eigenvals)):
raise ValueError("Eigen values don't have any random expression, "
"joint distribution cannot be generated.")
return JointDistributionHandmade(*eigenvals)
def level_spacing_distribution(mat):
"""
For obtaining distribution of level spacings.
Parameters
==========
mat: RandomMatrixSymbol
The random matrix symbol whose eigen values are
to be considered for finding the level spacings.
Returns
=======
Lambda
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE
>>> from sympy.stats import level_spacing_distribution
>>> U = GUE('U', 2)
>>> level_spacing_distribution(U)
Lambda(_s, 32*_s**2*exp(-4*_s**2/pi)/pi**2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Random_matrix#Distribution_of_level_spacings
"""
return mat.pspace.model.level_spacing_distribution()
|
06c176ccf7c83f0d5dfc72b2be45b9b18c5cb1de057e1229f9cd948b4792175a | """
Finite Discrete Random Variables Module
See Also
========
sympy.stats.frv_types
sympy.stats.rv
sympy.stats.crv
"""
from itertools import product
from sympy import (Basic, Symbol, cacheit, sympify, Mul,
And, Or, Piecewise, Eq, Lambda, exp, I, Dummy, nan,
Sum, Intersection, S)
from sympy.core.containers import Dict
from sympy.core.logic import Logic
from sympy.core.relational import Relational
from sympy.core.sympify import _sympify
from sympy.sets.sets import FiniteSet
from sympy.stats.rv import (RandomDomain, ProductDomain, ConditionalDomain,
PSpace, IndependentProductPSpace, SinglePSpace, random_symbols,
sumsets, rv_subs, NamedArgsMixin, Density)
from sympy.external import import_module
class FiniteDensity(dict):
"""
A domain with Finite Density.
"""
def __call__(self, item):
"""
Make instance of a class callable.
If item belongs to current instance of a class, return it.
Otherwise, return 0.
"""
item = sympify(item)
if item in self:
return self[item]
else:
return 0
@property
def dict(self):
"""
Return item as dictionary.
"""
return dict(self)
class FiniteDomain(RandomDomain):
"""
A domain with discrete finite support
Represented using a FiniteSet.
"""
is_Finite = True
@property
def symbols(self):
return FiniteSet(sym for sym, val in self.elements)
@property
def elements(self):
return self.args[0]
@property
def dict(self):
return FiniteSet(*[Dict(dict(el)) for el in self.elements])
def __contains__(self, other):
return other in self.elements
def __iter__(self):
return self.elements.__iter__()
def as_boolean(self):
return Or(*[And(*[Eq(sym, val) for sym, val in item]) for item in self])
class SingleFiniteDomain(FiniteDomain):
"""
A FiniteDomain over a single symbol/set
Example: The possibilities of a *single* die roll.
"""
def __new__(cls, symbol, set):
if not isinstance(set, FiniteSet) and \
not isinstance(set, Intersection):
set = FiniteSet(*set)
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
@property
def set(self):
return self.args[1]
@property
def elements(self):
return FiniteSet(*[frozenset(((self.symbol, elem), )) for elem in self.set])
def __iter__(self):
return (frozenset(((self.symbol, elem),)) for elem in self.set)
def __contains__(self, other):
sym, val = tuple(other)[0]
return sym == self.symbol and val in self.set
class ProductFiniteDomain(ProductDomain, FiniteDomain):
"""
A Finite domain consisting of several other FiniteDomains
Example: The possibilities of the rolls of three independent dice
"""
def __iter__(self):
proditer = product(*self.domains)
return (sumsets(items) for items in proditer)
@property
def elements(self):
return FiniteSet(*self)
class ConditionalFiniteDomain(ConditionalDomain, ProductFiniteDomain):
"""
A FiniteDomain that has been restricted by a condition
Example: The possibilities of a die roll under the condition that the
roll is even.
"""
def __new__(cls, domain, condition):
"""
Create a new instance of ConditionalFiniteDomain class
"""
if condition is True:
return domain
cond = rv_subs(condition)
return Basic.__new__(cls, domain, cond)
def _test(self, elem):
"""
Test the value. If value is boolean, return it. If value is equality
relational (two objects are equal), return it with left-hand side
being equal to right-hand side. Otherwise, raise ValueError exception.
"""
val = self.condition.xreplace(dict(elem))
if val in [True, False]:
return val
elif val.is_Equality:
return val.lhs == val.rhs
raise ValueError("Undecidable if %s" % str(val))
def __contains__(self, other):
return other in self.fulldomain and self._test(other)
def __iter__(self):
return (elem for elem in self.fulldomain if self._test(elem))
@property
def set(self):
if isinstance(self.fulldomain, SingleFiniteDomain):
return FiniteSet(*[elem for elem in self.fulldomain.set
if frozenset(((self.fulldomain.symbol, elem),)) in self])
else:
raise NotImplementedError(
"Not implemented on multi-dimensional conditional domain")
def as_boolean(self):
return FiniteDomain.as_boolean(self)
class SingleFiniteDistribution(Basic, NamedArgsMixin):
def __new__(cls, *args):
args = list(map(sympify, args))
return Basic.__new__(cls, *args)
@staticmethod
def check(*args):
pass
@property # type: ignore
@cacheit
def dict(self):
if self.is_symbolic:
return Density(self)
return {k: self.pmf(k) for k in self.set}
def pmf(self, *args): # to be overridden by specific distribution
raise NotImplementedError()
@property
def set(self): # to be overridden by specific distribution
raise NotImplementedError()
values = property(lambda self: self.dict.values)
items = property(lambda self: self.dict.items)
is_symbolic = property(lambda self: False)
__iter__ = property(lambda self: self.dict.__iter__)
__getitem__ = property(lambda self: self.dict.__getitem__)
def __call__(self, *args):
return self.pmf(*args)
def __contains__(self, other):
return other in self.set
#=============================================
#========= Probability Space ===============
#=============================================
class FinitePSpace(PSpace):
"""
A Finite Probability Space
Represents the probabilities of a finite number of events.
"""
is_Finite = True
def __new__(cls, domain, density):
density = {sympify(key): sympify(val)
for key, val in density.items()}
public_density = Dict(density)
obj = PSpace.__new__(cls, domain, public_density)
obj._density = density
return obj
def prob_of(self, elem):
elem = sympify(elem)
density = self._density
if isinstance(list(density.keys())[0], FiniteSet):
return density.get(elem, S.Zero)
return density.get(tuple(elem)[0][1], S.Zero)
def where(self, condition):
assert all(r.symbol in self.symbols for r in random_symbols(condition))
return ConditionalFiniteDomain(self.domain, condition)
def compute_density(self, expr):
expr = rv_subs(expr, self.values)
d = FiniteDensity()
for elem in self.domain:
val = expr.xreplace(dict(elem))
prob = self.prob_of(elem)
d[val] = d.get(val, S.Zero) + prob
return d
@cacheit
def compute_cdf(self, expr):
d = self.compute_density(expr)
cum_prob = S.Zero
cdf = []
for key in sorted(d):
prob = d[key]
cum_prob += prob
cdf.append((key, cum_prob))
return dict(cdf)
@cacheit
def sorted_cdf(self, expr, python_float=False):
cdf = self.compute_cdf(expr)
items = list(cdf.items())
sorted_items = sorted(items, key=lambda val_cumprob: val_cumprob[1])
if python_float:
sorted_items = [(v, float(cum_prob))
for v, cum_prob in sorted_items]
return sorted_items
@cacheit
def compute_characteristic_function(self, expr):
d = self.compute_density(expr)
t = Dummy('t', real=True)
return Lambda(t, sum(exp(I*k*t)*v for k,v in d.items()))
@cacheit
def compute_moment_generating_function(self, expr):
d = self.compute_density(expr)
t = Dummy('t', real=True)
return Lambda(t, sum(exp(k*t)*v for k,v in d.items()))
def compute_expectation(self, expr, rvs=None, **kwargs):
rvs = rvs or self.values
expr = rv_subs(expr, rvs)
probs = [self.prob_of(elem) for elem in self.domain]
if isinstance(expr, (Logic, Relational)):
parse_domain = [tuple(elem)[0][1] for elem in self.domain]
bools = [expr.xreplace(dict(elem)) for elem in self.domain]
else:
parse_domain = [expr.xreplace(dict(elem)) for elem in self.domain]
bools = [True for elem in self.domain]
return sum([Piecewise((prob * elem, blv), (S.Zero, True))
for prob, elem, blv in zip(probs, parse_domain, bools)])
def compute_quantile(self, expr):
cdf = self.compute_cdf(expr)
p = Dummy('p', real=True)
set = ((nan, (p < 0) | (p > 1)),)
for key, value in cdf.items():
set = set + ((key, p <= value), )
return Lambda(p, Piecewise(*set))
def probability(self, condition):
cond_symbols = frozenset(rs.symbol for rs in random_symbols(condition))
cond = rv_subs(condition)
if not cond_symbols.issubset(self.symbols):
raise ValueError("Cannot compare foreign random symbols, %s"
%(str(cond_symbols - self.symbols)))
if isinstance(condition, Relational) and \
(not cond.free_symbols.issubset(self.domain.free_symbols)):
rv = condition.lhs if isinstance(condition.rhs, Symbol) else condition.rhs
return sum(Piecewise(
(self.prob_of(elem), condition.subs(rv, list(elem)[0][1])),
(S.Zero, True)) for elem in self.domain)
return sympify(sum(self.prob_of(elem) for elem in self.where(condition)))
def conditional_space(self, condition):
domain = self.where(condition)
prob = self.probability(condition)
density = {key: val / prob
for key, val in self._density.items() if domain._test(key)}
return FinitePSpace(domain, density)
def sample(self, size=(), library='scipy'):
"""
Internal sample method
Returns dictionary mapping RandomSymbol to realization value.
"""
libraries = ['scipy', 'numpy', 'pymc3']
if library not in libraries:
raise NotImplementedError("Sampling from %s is not supported yet."
% str(library))
if not import_module(library):
raise ValueError("Failed to import %s" % library)
samps = _get_sample_class_frv[library](self.distribution, size)
if samps is not None:
return {self.value: samps}
raise NotImplementedError(
"Sampling for %s is not currently implemented from %s"
% (self.__class__.__name__, library)
)
class SampleFiniteScipy:
"""Returns the sample from scipy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_scipy(dist, size)
@classmethod
def _sample_scipy(cls, dist, size):
"""Sample from SciPy."""
# scipy can handle with custom distributions
from scipy.stats import rv_discrete
density_ = dist.dict
x, y = [], []
for k, v in density_.items():
x.append(int(k))
y.append(float(v))
scipy_rv = rv_discrete(name='scipy_rv', values=(x, y))
return scipy_rv.rvs(size=size)
class SampleFiniteNumpy:
"""Returns the sample from numpy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_numpy(dist, size)
@classmethod
def _sample_numpy(cls, dist, size):
"""Sample from NumPy."""
import numpy
numpy_rv_map = {
'BinomialDistribution': lambda dist, size: numpy.random.binomial(n=int(dist.n),
p=float(dist.p), size=size)
}
dist_list = numpy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
return numpy_rv_map[dist.__class__.__name__](dist, size)
class SampleFinitePymc:
"""Returns the sample from pymc3 of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_pymc3(dist, size)
@classmethod
def _sample_pymc3(cls, dist, size):
"""Sample from PyMC3."""
import pymc3
pymc3_rv_map = {
'BernoulliDistribution': lambda dist: pymc3.Bernoulli('X', p=float(dist.p)),
'BinomialDistribution': lambda dist: pymc3.Binomial('X', n=int(dist.n),
p=float(dist.p))
}
dist_list = pymc3_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
with pymc3.Model():
pymc3_rv_map[dist.__class__.__name__](dist)
return pymc3.sample(size, chains=1, progressbar=False)[:]['X']
_get_sample_class_frv = {
'scipy': SampleFiniteScipy,
'pymc3': SampleFinitePymc,
'numpy': SampleFiniteNumpy
}
class SingleFinitePSpace(SinglePSpace, FinitePSpace):
"""
A single finite probability space
Represents the probabilities of a set of random events that can be
attributed to a single variable/symbol.
This class is implemented by many of the standard FiniteRV types such as
Die, Bernoulli, Coin, etc....
"""
@property
def domain(self):
return SingleFiniteDomain(self.symbol, self.distribution.set)
@property
def _is_symbolic(self):
"""
Helper property to check if the distribution
of the random variable is having symbolic
dimension.
"""
return self.distribution.is_symbolic
@property
def distribution(self):
return self.args[1]
def pmf(self, expr):
return self.distribution.pmf(expr)
@property # type: ignore
@cacheit
def _density(self):
return {FiniteSet((self.symbol, val)): prob
for val, prob in self.distribution.dict.items()}
@cacheit
def compute_characteristic_function(self, expr):
if self._is_symbolic:
d = self.compute_density(expr)
t = Dummy('t', real=True)
ki = Dummy('ki')
return Lambda(t, Sum(d(ki)*exp(I*ki*t), (ki, self.args[1].low, self.args[1].high)))
expr = rv_subs(expr, self.values)
return FinitePSpace(self.domain, self.distribution).compute_characteristic_function(expr)
@cacheit
def compute_moment_generating_function(self, expr):
if self._is_symbolic:
d = self.compute_density(expr)
t = Dummy('t', real=True)
ki = Dummy('ki')
return Lambda(t, Sum(d(ki)*exp(ki*t), (ki, self.args[1].low, self.args[1].high)))
expr = rv_subs(expr, self.values)
return FinitePSpace(self.domain, self.distribution).compute_moment_generating_function(expr)
def compute_quantile(self, expr):
if self._is_symbolic:
raise NotImplementedError("Computing quantile for random variables "
"with symbolic dimension because the bounds of searching the required "
"value is undetermined.")
expr = rv_subs(expr, self.values)
return FinitePSpace(self.domain, self.distribution).compute_quantile(expr)
def compute_density(self, expr):
if self._is_symbolic:
rv = list(random_symbols(expr))[0]
k = Dummy('k', integer=True)
cond = True if not isinstance(expr, (Relational, Logic)) \
else expr.subs(rv, k)
return Lambda(k,
Piecewise((self.pmf(k), And(k >= self.args[1].low,
k <= self.args[1].high, cond)), (S.Zero, True)))
expr = rv_subs(expr, self.values)
return FinitePSpace(self.domain, self.distribution).compute_density(expr)
def compute_cdf(self, expr):
if self._is_symbolic:
d = self.compute_density(expr)
k = Dummy('k')
ki = Dummy('ki')
return Lambda(k, Sum(d(ki), (ki, self.args[1].low, k)))
expr = rv_subs(expr, self.values)
return FinitePSpace(self.domain, self.distribution).compute_cdf(expr)
def compute_expectation(self, expr, rvs=None, **kwargs):
if self._is_symbolic:
rv = random_symbols(expr)[0]
k = Dummy('k', integer=True)
expr = expr.subs(rv, k)
cond = True if not isinstance(expr, (Relational, Logic)) \
else expr
func = self.pmf(k) * k if cond != True else self.pmf(k) * expr
return Sum(Piecewise((func, cond), (S.Zero, True)),
(k, self.distribution.low, self.distribution.high)).doit()
expr = _sympify(expr)
expr = rv_subs(expr, rvs)
return FinitePSpace(self.domain, self.distribution).compute_expectation(expr, rvs, **kwargs)
def probability(self, condition):
if self._is_symbolic:
#TODO: Implement the mechanism for handling queries for symbolic sized distributions.
raise NotImplementedError("Currently, probability queries are not "
"supported for random variables with symbolic sized distributions.")
condition = rv_subs(condition)
return FinitePSpace(self.domain, self.distribution).probability(condition)
def conditional_space(self, condition):
"""
This method is used for transferring the
computation to probability method because
conditional space of random variables with
symbolic dimensions is currently not possible.
"""
if self._is_symbolic:
self
domain = self.where(condition)
prob = self.probability(condition)
density = {key: val / prob
for key, val in self._density.items() if domain._test(key)}
return FinitePSpace(domain, density)
class ProductFinitePSpace(IndependentProductPSpace, FinitePSpace):
"""
A collection of several independent finite probability spaces
"""
@property
def domain(self):
return ProductFiniteDomain(*[space.domain for space in self.spaces])
@property # type: ignore
@cacheit
def _density(self):
proditer = product(*[iter(space._density.items())
for space in self.spaces])
d = {}
for items in proditer:
elems, probs = list(zip(*items))
elem = sumsets(elems)
prob = Mul(*probs)
d[elem] = d.get(elem, S.Zero) + prob
return Dict(d)
@property # type: ignore
@cacheit
def density(self):
return Dict(self._density)
def probability(self, condition):
return FinitePSpace.probability(self, condition)
def compute_density(self, expr):
return FinitePSpace.compute_density(self, expr)
|
7e6a197e0deb9c9f325e29a7ee46107dcff7a3760a051d0aec207cdba04111fa | from sympy import Basic
from sympy.stats.joint_rv import ProductPSpace
from sympy.stats.rv import ProductDomain, _symbol_converter
class StochasticPSpace(ProductPSpace):
"""
Represents probability space of stochastic processes
and their random variables. Contains mechanics to do
computations for queries of stochastic processes.
Initialized by symbol, the specific process and
distribution(optional) if the random indexed symbols
of the process follows any specific distribution, like,
in Bernoulli Process, each random indexed symbol follows
Bernoulli distribution. For processes with memory, this
parameter should not be passed.
"""
def __new__(cls, sym, process, distribution=None):
sym = _symbol_converter(sym)
from sympy.stats.stochastic_process_types import StochasticProcess
if not isinstance(process, StochasticProcess):
raise TypeError("`process` must be an instance of StochasticProcess.")
return Basic.__new__(cls, sym, process, distribution)
@property
def process(self):
"""
The associated stochastic process.
"""
return self.args[1]
@property
def domain(self):
return ProductDomain(self.process.index_set,
self.process.state_space)
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[2]
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Transfers the task of handling queries to the specific stochastic
process because every process has their own logic of handling such
queries.
"""
return self.process.probability(condition, given_condition, evaluate, **kwargs)
def compute_expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Transfers the task of handling queries to the specific stochastic
process because every process has their own logic of handling such
queries.
"""
return self.process.expectation(expr, condition, evaluate, **kwargs)
|
f06411db8a8e5e375294eaaa3908318a126056525e5a0881a1245a7e56f61d8a | from sympy.combinatorics import Permutation
from sympy.combinatorics.util import _distribute_gens_by_base
rmul = Permutation.rmul
def _cmp_perm_lists(first, second):
"""
Compare two lists of permutations as sets.
Explanation
===========
This is used for testing purposes. Since the array form of a
permutation is currently a list, Permutation is not hashable
and cannot be put into a set.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _cmp_perm_lists
>>> a = Permutation([0, 2, 3, 4, 1])
>>> b = Permutation([1, 2, 0, 4, 3])
>>> c = Permutation([3, 4, 0, 1, 2])
>>> ls1 = [a, b, c]
>>> ls2 = [b, c, a]
>>> _cmp_perm_lists(ls1, ls2)
True
"""
return {tuple(a) for a in first} == \
{tuple(a) for a in second}
def _naive_list_centralizer(self, other, af=False):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Return a list of elements for the centralizer of a subgroup/set/element.
Explanation
===========
This is a brute force implementation that goes over all elements of the
group and checks for membership in the centralizer. It is used to
test ``.centralizer()`` from ``sympy.combinatorics.perm_groups``.
Examples
========
>>> from sympy.combinatorics.testutil import _naive_list_centralizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> _naive_list_centralizer(D, D)
[Permutation([0, 1, 2, 3]), Permutation([2, 3, 0, 1])]
See Also
========
sympy.combinatorics.perm_groups.centralizer
"""
from sympy.combinatorics.permutations import _af_commutes_with
if hasattr(other, 'generators'):
elements = list(self.generate_dimino(af=True))
gens = [x._array_form for x in other.generators]
commutes_with_gens = lambda x: all(_af_commutes_with(x, gen) for gen in gens)
centralizer_list = []
if not af:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(Permutation._af_new(element))
else:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(element)
return centralizer_list
elif hasattr(other, 'getitem'):
return _naive_list_centralizer(self, PermutationGroup(other), af)
elif hasattr(other, 'array_form'):
return _naive_list_centralizer(self, PermutationGroup([other]), af)
def _verify_bsgs(group, base, gens):
"""
Verify the correctness of a base and strong generating set.
Explanation
===========
This is a naive implementation using the definition of a base and a strong
generating set relative to it. There are other procedures for
verifying a base and strong generating set, but this one will
serve for more robust testing.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> _verify_bsgs(A, A.base, A.strong_gens)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
"""
from sympy.combinatorics.perm_groups import PermutationGroup
strong_gens_distr = _distribute_gens_by_base(base, gens)
current_stabilizer = group
for i in range(len(base)):
candidate = PermutationGroup(strong_gens_distr[i])
if current_stabilizer.order() != candidate.order():
return False
current_stabilizer = current_stabilizer.stabilizer(base[i])
if current_stabilizer.order() != 1:
return False
return True
def _verify_centralizer(group, arg, centr=None):
"""
Verify the centralizer of a group/set/element inside another group.
This is used for testing ``.centralizer()`` from
``sympy.combinatorics.perm_groups``
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _verify_centralizer
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])])
>>> _verify_centralizer(S, A, centr)
True
See Also
========
_naive_list_centralizer,
sympy.combinatorics.perm_groups.PermutationGroup.centralizer,
_cmp_perm_lists
"""
if centr is None:
centr = group.centralizer(arg)
centr_list = list(centr.generate_dimino(af=True))
centr_list_naive = _naive_list_centralizer(group, arg, af=True)
return _cmp_perm_lists(centr_list, centr_list_naive)
def _verify_normal_closure(group, arg, closure=None):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Verify the normal closure of a subgroup/subset/element in a group.
This is used to test
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_normal_closure
>>> S = SymmetricGroup(3)
>>> A = AlternatingGroup(3)
>>> _verify_normal_closure(S, A, closure=A)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
"""
if closure is None:
closure = group.normal_closure(arg)
conjugates = set()
if hasattr(arg, 'generators'):
subgr_gens = arg.generators
elif hasattr(arg, '__getitem__'):
subgr_gens = arg
elif hasattr(arg, 'array_form'):
subgr_gens = [arg]
for el in group.generate_dimino():
for gen in subgr_gens:
conjugates.add(gen ^ el)
naive_closure = PermutationGroup(list(conjugates))
return closure.is_subgroup(naive_closure)
def canonicalize_naive(g, dummies, sym, *v):
"""
Canonicalize tensor formed by tensors of the different types.
Explanation
===========
sym_i symmetry under exchange of two component tensors of type `i`
None no symmetry
0 commuting
1 anticommuting
Parameters
==========
g : Permutation representing the tensor.
dummies : List of dummy indices.
msym : Symmetry of the metric.
v : A list of (base_i, gens_i, n_i, sym_i) for tensors of type `i`.
base_i, gens_i BSGS for tensors of this type
n_i number ot tensors of type `i`
Returns
=======
Returns 0 if the tensor is zero, else returns the array form of
the permutation representing the canonical form of the tensor.
Examples
========
>>> from sympy.combinatorics.testutil import canonicalize_naive
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> from sympy.combinatorics import Permutation
>>> g = Permutation([1, 3, 2, 0, 4, 5])
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0))
[0, 2, 1, 3, 4, 5]
"""
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.tensor_can import gens_products, dummy_sgs
from sympy.combinatorics.permutations import Permutation, _af_rmul
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
v1.append((base_i, gens_i, [[]]*n_i, sym_i))
size, sbase, sgens = gens_products(*v1)
dgens = dummy_sgs(dummies, sym, size-2)
if isinstance(sym, int):
num_types = 1
dummies = [dummies]
sym = [sym]
else:
num_types = len(sym)
dgens = []
for i in range(num_types):
dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2))
S = PermutationGroup(sgens)
D = PermutationGroup([Permutation(x) for x in dgens])
dlist = list(D.generate(af=True))
g = g.array_form
st = set()
for s in S.generate(af=True):
h = _af_rmul(g, s)
for d in dlist:
q = tuple(_af_rmul(d, h))
st.add(q)
a = list(st)
a.sort()
prev = (0,)*size
for h in a:
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
prev = h
return list(a[0])
def graph_certificate(gr):
"""
Return a certificate for the graph
Parameters
==========
gr : adjacency list
Explanation
===========
The graph is assumed to be unoriented and without
external lines.
Associate to each vertex of the graph a symmetric tensor with
number of indices equal to the degree of the vertex; indices
are contracted when they correspond to the same line of the graph.
The canonical form of the tensor gives a certificate for the graph.
This is not an efficient algorithm to get the certificate of a graph.
Examples
========
>>> from sympy.combinatorics.testutil import graph_certificate
>>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]}
>>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]}
>>> c1 = graph_certificate(gr1)
>>> c2 = graph_certificate(gr2)
>>> c1
[0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21]
>>> c1 == c2
True
"""
from sympy.combinatorics.permutations import _af_invert
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize
items = list(gr.items())
items.sort(key=lambda x: len(x[1]), reverse=True)
pvert = [x[0] for x in items]
pvert = _af_invert(pvert)
# the indices of the tensor are twice the number of lines of the graph
num_indices = 0
for v, neigh in items:
num_indices += len(neigh)
# associate to each vertex its indices; for each line
# between two vertices assign the
# even index to the vertex which comes first in items,
# the odd index to the other vertex
vertices = [[] for i in items]
i = 0
for v, neigh in items:
for v2 in neigh:
if pvert[v] < pvert[v2]:
vertices[pvert[v]].append(i)
vertices[pvert[v2]].append(i+1)
i += 2
g = []
for v in vertices:
g.extend(v)
assert len(g) == num_indices
g += [num_indices, num_indices + 1]
size = num_indices + 2
assert sorted(g) == list(range(size))
g = Permutation(g)
vlen = [0]*(len(vertices[0])+1)
for neigh in vertices:
vlen[len(neigh)] += 1
v = []
for i in range(len(vlen)):
n = vlen[i]
if n:
base, gens = get_symmetric_group_sgs(i)
v.append((base, gens, n, 0))
v.reverse()
dummies = list(range(num_indices))
can = canonicalize(g, dummies, 0, *v)
return can
|
3d1a58d440d65b2b096de9751356033803b160020f51a13da8e5b54795a73acb | from random import randrange, choice
from math import log
from sympy.ntheory import primefactors
from sympy import multiplicity, factorint, Symbol
from sympy.combinatorics import Permutation
from sympy.combinatorics.permutations import (_af_commutes_with, _af_invert,
_af_rmul, _af_rmuln, _af_pow, Cycle)
from sympy.combinatorics.util import (_check_cycles_alt_sym,
_distribute_gens_by_base, _orbits_transversals_from_bsgs,
_handle_precomputed_bsgs, _base_ordering, _strong_gens_from_distr,
_strip, _strip_af)
from sympy.core import Basic
from sympy.functions.combinatorial.factorials import factorial
from sympy.ntheory import sieve
from sympy.utilities.iterables import has_variety, is_sequence, uniq
from sympy.testing.randtest import _randrange
from itertools import islice
from sympy.core.sympify import _sympify
rmul = Permutation.rmul_with_af
_af_new = Permutation._af_new
class PermutationGroup(Basic):
"""The class defining a Permutation group.
Explanation
===========
PermutationGroup([p1, p2, ..., pn]) returns the permutation group
generated by the list of permutations. This group can be supplied
to Polyhedron if one desires to decorate the elements to which the
indices of the permutation refer.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.polyhedron import Polyhedron
>>> from sympy.combinatorics.perm_groups import PermutationGroup
The permutations corresponding to motion of the front, right and
bottom face of a 2x2 Rubik's cube are defined:
>>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5)
>>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9)
>>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21)
These are passed as permutations to PermutationGroup:
>>> G = PermutationGroup(F, R, D)
>>> G.order()
3674160
The group can be supplied to a Polyhedron in order to track the
objects being moved. An example involving the 2x2 Rubik's cube is
given there, but here is a simple demonstration:
>>> a = Permutation(2, 1)
>>> b = Permutation(1, 0)
>>> G = PermutationGroup(a, b)
>>> P = Polyhedron(list('ABC'), pgroup=G)
>>> P.corners
(A, B, C)
>>> P.rotate(0) # apply permutation 0
>>> P.corners
(A, C, B)
>>> P.reset()
>>> P.corners
(A, B, C)
Or one can make a permutation as a product of selected permutations
and apply them to an iterable directly:
>>> P10 = G.make_perm([0, 1])
>>> P10('ABC')
['C', 'A', 'B']
See Also
========
sympy.combinatorics.polyhedron.Polyhedron,
sympy.combinatorics.permutations.Permutation
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
.. [2] Seress, A.
"Permutation Group Algorithms"
.. [3] https://en.wikipedia.org/wiki/Schreier_vector
.. [4] https://en.wikipedia.org/wiki/Nielsen_transformation#Product_replacement_algorithm
.. [5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray,
Alice C.Niemeyer, and E.A.O'Brien. "Generating Random
Elements of a Finite Group"
.. [6] https://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29
.. [7] http://www.algorithmist.com/index.php/Union_Find
.. [8] https://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups
.. [9] https://en.wikipedia.org/wiki/Center_%28group_theory%29
.. [10] https://en.wikipedia.org/wiki/Centralizer_and_normalizer
.. [11] http://groupprops.subwiki.org/wiki/Derived_subgroup
.. [12] https://en.wikipedia.org/wiki/Nilpotent_group
.. [13] http://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf
.. [14] https://www.gap-system.org/Manuals/doc/ref/manual.pdf
"""
is_group = True
def __new__(cls, *args, dups=True, **kwargs):
"""The default constructor. Accepts Cycle and Permutation forms.
Removes duplicates unless ``dups`` keyword is ``False``.
"""
if not args:
args = [Permutation()]
else:
args = list(args[0] if is_sequence(args[0]) else args)
if not args:
args = [Permutation()]
if any(isinstance(a, Cycle) for a in args):
args = [Permutation(a) for a in args]
if has_variety(a.size for a in args):
degree = kwargs.pop('degree', None)
if degree is None:
degree = max(a.size for a in args)
for i in range(len(args)):
if args[i].size != degree:
args[i] = Permutation(args[i], size=degree)
if dups:
args = list(uniq([_af_new(list(a)) for a in args]))
if len(args) > 1:
args = [g for g in args if not g.is_identity]
obj = Basic.__new__(cls, *args, **kwargs)
obj._generators = args
obj._order = None
obj._center = []
obj._is_abelian = None
obj._is_transitive = None
obj._is_sym = None
obj._is_alt = None
obj._is_primitive = None
obj._is_nilpotent = None
obj._is_solvable = None
obj._is_trivial = None
obj._transitivity_degree = None
obj._max_div = None
obj._is_perfect = None
obj._is_cyclic = None
obj._r = len(obj._generators)
obj._degree = obj._generators[0].size
# these attributes are assigned after running schreier_sims
obj._base = []
obj._strong_gens = []
obj._strong_gens_slp = []
obj._basic_orbits = []
obj._transversals = []
obj._transversal_slp = []
# these attributes are assigned after running _random_pr_init
obj._random_gens = []
# finite presentation of the group as an instance of `FpGroup`
obj._fp_presentation = None
return obj
def __getitem__(self, i):
return self._generators[i]
def __contains__(self, i):
"""Return ``True`` if *i* is contained in PermutationGroup.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = Permutation(1, 2, 3)
>>> Permutation(3) in PermutationGroup(p)
True
"""
if not isinstance(i, Permutation):
raise TypeError("A PermutationGroup contains only Permutations as "
"elements, not elements of type %s" % type(i))
return self.contains(i)
def __len__(self):
return len(self._generators)
def __eq__(self, other):
"""Return ``True`` if PermutationGroup generated by elements in the
group are same i.e they represent the same PermutationGroup.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G = PermutationGroup([p, p**2])
>>> H = PermutationGroup([p**2, p])
>>> G.generators == H.generators
False
>>> G == H
True
"""
if not isinstance(other, PermutationGroup):
return False
set_self_gens = set(self.generators)
set_other_gens = set(other.generators)
# before reaching the general case there are also certain
# optimisation and obvious cases requiring less or no actual
# computation.
if set_self_gens == set_other_gens:
return True
# in the most general case it will check that each generator of
# one group belongs to the other PermutationGroup and vice-versa
for gen1 in set_self_gens:
if not other.contains(gen1):
return False
for gen2 in set_other_gens:
if not self.contains(gen2):
return False
return True
def __hash__(self):
return super().__hash__()
def __mul__(self, other):
"""
Return the direct product of two permutation groups as a permutation
group.
Explanation
===========
This implementation realizes the direct product by shifting the index
set for the generators of the second group: so if we have ``G`` acting
on ``n1`` points and ``H`` acting on ``n2`` points, ``G*H`` acts on
``n1 + n2`` points.
Examples
========
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(5)
>>> H = G*G
>>> H
PermutationGroup([
(9)(0 1 2 3 4),
(5 6 7 8 9)])
>>> H.order()
25
"""
if isinstance(other, Permutation):
return Coset(other, self, dir='+')
gens1 = [perm._array_form for perm in self.generators]
gens2 = [perm._array_form for perm in other.generators]
n1 = self._degree
n2 = other._degree
start = list(range(n1))
end = list(range(n1, n1 + n2))
for i in range(len(gens2)):
gens2[i] = [x + n1 for x in gens2[i]]
gens2 = [start + gen for gen in gens2]
gens1 = [gen + end for gen in gens1]
together = gens1 + gens2
gens = [_af_new(x) for x in together]
return PermutationGroup(gens)
def _random_pr_init(self, r, n, _random_prec_n=None):
r"""Initialize random generators for the product replacement algorithm.
Explanation
===========
The implementation uses a modification of the original product
replacement algorithm due to Leedham-Green, as described in [1],
pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical
analysis of the original product replacement algorithm, and [4].
The product replacement algorithm is used for producing random,
uniformly distributed elements of a group `G` with a set of generators
`S`. For the initialization ``_random_pr_init``, a list ``R`` of
`\max\{r, |S|\}` group generators is created as the attribute
``G._random_gens``, repeating elements of `S` if necessary, and the
identity element of `G` is appended to ``R`` - we shall refer to this
last element as the accumulator. Then the function ``random_pr()``
is called ``n`` times, randomizing the list ``R`` while preserving
the generation of `G` by ``R``. The function ``random_pr()`` itself
takes two random elements ``g, h`` among all elements of ``R`` but
the accumulator and replaces ``g`` with a randomly chosen element
from `\{gh, g(~h), hg, (~h)g\}`. Then the accumulator is multiplied
by whatever ``g`` was replaced by. The new value of the accumulator is
then returned by ``random_pr()``.
The elements returned will eventually (for ``n`` large enough) become
uniformly distributed across `G` ([5]). For practical purposes however,
the values ``n = 50, r = 11`` are suggested in [1].
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute
self._random_gens
See Also
========
random_pr
"""
deg = self.degree
random_gens = [x._array_form for x in self.generators]
k = len(random_gens)
if k < r:
for i in range(k, r):
random_gens.append(random_gens[i - k])
acc = list(range(deg))
random_gens.append(acc)
self._random_gens = random_gens
# handle randomized input for testing purposes
if _random_prec_n is None:
for i in range(n):
self.random_pr()
else:
for i in range(n):
self.random_pr(_random_prec=_random_prec_n[i])
def _union_find_merge(self, first, second, ranks, parents, not_rep):
"""Merges two classes in a union-find data structure.
Explanation
===========
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. The class merging process uses union by rank as an
optimization. ([7])
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, the list of class sizes, ``ranks``, and the list of
elements that are not representatives, ``not_rep``, are changed due to
class merging.
See Also
========
minimal_block, _union_find_rep
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
.. [7] http://www.algorithmist.com/index.php/Union_Find
"""
rep_first = self._union_find_rep(first, parents)
rep_second = self._union_find_rep(second, parents)
if rep_first != rep_second:
# union by rank
if ranks[rep_first] >= ranks[rep_second]:
new_1, new_2 = rep_first, rep_second
else:
new_1, new_2 = rep_second, rep_first
total_rank = ranks[new_1] + ranks[new_2]
if total_rank > self.max_div:
return -1
parents[new_2] = new_1
ranks[new_1] = total_rank
not_rep.append(new_2)
return 1
return 0
def _union_find_rep(self, num, parents):
"""Find representative of a class in a union-find data structure.
Explanation
===========
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. After the representative of the class to which ``num``
belongs is found, path compression is performed as an optimization
([7]).
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, is altered due to path compression.
See Also
========
minimal_block, _union_find_merge
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
.. [7] http://www.algorithmist.com/index.php/Union_Find
"""
rep, parent = num, parents[num]
while parent != rep:
rep = parent
parent = parents[rep]
# path compression
temp, parent = num, parents[num]
while parent != rep:
parents[temp] = rep
temp = parent
parent = parents[temp]
return rep
@property
def base(self):
"""Return a base from the Schreier-Sims algorithm.
Explanation
===========
For a permutation group `G`, a base is a sequence of points
`B = (b_1, b_2, ..., b_k)` such that no element of `G` apart
from the identity fixes all the points in `B`. The concepts of
a base and strong generating set and their applications are
discussed in depth in [1], pp. 87-89 and [2], pp. 55-57.
An alternative way to think of `B` is that it gives the
indices of the stabilizer cosets that contain more than the
identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> G = PermutationGroup([Permutation(0, 1, 3)(2, 4)])
>>> G.base
[0, 2]
See Also
========
strong_gens, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._base == []:
self.schreier_sims()
return self._base
def baseswap(self, base, strong_gens, pos, randomized=False,
transversals=None, basic_orbits=None, strong_gens_distr=None):
r"""Swap two consecutive base points in base and strong generating set.
Explanation
===========
If a base for a group `G` is given by `(b_1, b_2, ..., b_k)`, this
function returns a base `(b_1, b_2, ..., b_{i+1}, b_i, ..., b_k)`,
where `i` is given by ``pos``, and a strong generating set relative
to that base. The original base and strong generating set are not
modified.
The randomized version (default) is of Las Vegas type.
Parameters
==========
base, strong_gens
The base and strong generating set.
pos
The position at which swapping is performed.
randomized
A switch between randomized and deterministic version.
transversals
The transversals for the basic orbits, if known.
basic_orbits
The basic orbits, if known.
strong_gens_distr
The strong generators distributed by basic stabilizers, if known.
Returns
=======
(base, strong_gens)
``base`` is the new base, and ``strong_gens`` is a generating set
relative to it.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> S.base
[0, 1, 2]
>>> base, gens = S.baseswap(S.base, S.strong_gens, 1, randomized=False)
>>> base, gens
([0, 2, 1],
[(0 1 2 3), (3)(0 1), (1 3 2),
(2 3), (1 3)])
check that base, gens is a BSGS
>>> S1 = PermutationGroup(gens)
>>> _verify_bsgs(S1, base, gens)
True
See Also
========
schreier_sims
Notes
=====
The deterministic version of the algorithm is discussed in
[1], pp. 102-103; the randomized version is discussed in [1], p.103, and
[2], p.98. It is of Las Vegas type.
Notice that [1] contains a mistake in the pseudocode and
discussion of BASESWAP: on line 3 of the pseudocode,
`|\beta_{i+1}^{\left\langle T\right\rangle}|` should be replaced by
`|\beta_{i}^{\left\langle T\right\rangle}|`, and the same for the
discussion of the algorithm.
"""
# construct the basic orbits, generators for the stabilizer chain
# and transversal elements from whatever was provided
transversals, basic_orbits, strong_gens_distr = \
_handle_precomputed_bsgs(base, strong_gens, transversals,
basic_orbits, strong_gens_distr)
base_len = len(base)
degree = self.degree
# size of orbit of base[pos] under the stabilizer we seek to insert
# in the stabilizer chain at position pos + 1
size = len(basic_orbits[pos])*len(basic_orbits[pos + 1]) \
//len(_orbit(degree, strong_gens_distr[pos], base[pos + 1]))
# initialize the wanted stabilizer by a subgroup
if pos + 2 > base_len - 1:
T = []
else:
T = strong_gens_distr[pos + 2][:]
# randomized version
if randomized is True:
stab_pos = PermutationGroup(strong_gens_distr[pos])
schreier_vector = stab_pos.schreier_vector(base[pos + 1])
# add random elements of the stabilizer until they generate it
while len(_orbit(degree, T, base[pos])) != size:
new = stab_pos.random_stab(base[pos + 1],
schreier_vector=schreier_vector)
T.append(new)
# deterministic version
else:
Gamma = set(basic_orbits[pos])
Gamma.remove(base[pos])
if base[pos + 1] in Gamma:
Gamma.remove(base[pos + 1])
# add elements of the stabilizer until they generate it by
# ruling out member of the basic orbit of base[pos] along the way
while len(_orbit(degree, T, base[pos])) != size:
gamma = next(iter(Gamma))
x = transversals[pos][gamma]
temp = x._array_form.index(base[pos + 1]) # (~x)(base[pos + 1])
if temp not in basic_orbits[pos + 1]:
Gamma = Gamma - _orbit(degree, T, gamma)
else:
y = transversals[pos + 1][temp]
el = rmul(x, y)
if el(base[pos]) not in _orbit(degree, T, base[pos]):
T.append(el)
Gamma = Gamma - _orbit(degree, T, base[pos])
# build the new base and strong generating set
strong_gens_new_distr = strong_gens_distr[:]
strong_gens_new_distr[pos + 1] = T
base_new = base[:]
base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos]
strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr)
for gen in T:
if gen not in strong_gens_new:
strong_gens_new.append(gen)
return base_new, strong_gens_new
@property
def basic_orbits(self):
"""
Return the basic orbits relative to a base and strong generating set.
Explanation
===========
If `(b_1, b_2, ..., b_k)` is a base for a group `G`, and
`G^{(i)} = G_{b_1, b_2, ..., b_{i-1}}` is the ``i``-th basic stabilizer
(so that `G^{(1)} = G`), the ``i``-th basic orbit relative to this base
is the orbit of `b_i` under `G^{(i)}`. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(4)
>>> S.basic_orbits
[[0, 1, 2, 3], [1, 2, 3], [2, 3]]
See Also
========
base, strong_gens, basic_transversals, basic_stabilizers
"""
if self._basic_orbits == []:
self.schreier_sims()
return self._basic_orbits
@property
def basic_stabilizers(self):
"""
Return a chain of stabilizers relative to a base and strong generating
set.
Explanation
===========
The ``i``-th basic stabilizer `G^{(i)}` relative to a base
`(b_1, b_2, ..., b_k)` is `G_{b_1, b_2, ..., b_{i-1}}`. For more
information, see [1], pp. 87-89.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> A.base
[0, 1]
>>> for g in A.basic_stabilizers:
... print(g)
...
PermutationGroup([
(3)(0 1 2),
(1 2 3)])
PermutationGroup([
(1 2 3)])
See Also
========
base, strong_gens, basic_orbits, basic_transversals
"""
if self._transversals == []:
self.schreier_sims()
strong_gens = self._strong_gens
base = self._base
if not base: # e.g. if self is trivial
return []
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_stabilizers = []
for gens in strong_gens_distr:
basic_stabilizers.append(PermutationGroup(gens))
return basic_stabilizers
@property
def basic_transversals(self):
"""
Return basic transversals relative to a base and strong generating set.
Explanation
===========
The basic transversals are transversals of the basic orbits. They
are provided as a list of dictionaries, each dictionary having
keys - the elements of one of the basic orbits, and values - the
corresponding transversal elements. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.basic_transversals
[{0: (3), 1: (3)(0 1 2), 2: (3)(0 2 1), 3: (0 3 1)}, {1: (3), 2: (1 2 3), 3: (1 3 2)}]
See Also
========
strong_gens, base, basic_orbits, basic_stabilizers
"""
if self._transversals == []:
self.schreier_sims()
return self._transversals
def composition_series(self):
r"""
Return the composition series for a group as a list
of permutation groups.
Explanation
===========
The composition series for a group `G` is defined as a
subnormal series `G = H_0 > H_1 > H_2 \ldots` A composition
series is a subnormal series such that each factor group
`H(i+1) / H(i)` is simple.
A subnormal series is a composition series only if it is of
maximum length.
The algorithm works as follows:
Starting with the derived series the idea is to fill
the gap between `G = der[i]` and `H = der[i+1]` for each
`i` independently. Since, all subgroups of the abelian group
`G/H` are normal so, first step is to take the generators
`g` of `G` and add them to generators of `H` one by one.
The factor groups formed are not simple in general. Each
group is obtained from the previous one by adding one
generator `g`, if the previous group is denoted by `H`
then the next group `K` is generated by `g` and `H`.
The factor group `K/H` is cyclic and it's order is
`K.order()//G.order()`. The series is then extended between
`K` and `H` by groups generated by powers of `g` and `H`.
The series formed is then prepended to the already existing
series.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> S = SymmetricGroup(12)
>>> G = S.sylow_subgroup(2)
>>> C = G.composition_series()
>>> [H.order() for H in C]
[1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
>>> G = S.sylow_subgroup(3)
>>> C = G.composition_series()
>>> [H.order() for H in C]
[243, 81, 27, 9, 3, 1]
>>> G = CyclicGroup(12)
>>> C = G.composition_series()
>>> [H.order() for H in C]
[12, 6, 3, 1]
"""
der = self.derived_series()
if not (all(g.is_identity for g in der[-1].generators)):
raise NotImplementedError('Group should be solvable')
series = []
for i in range(len(der)-1):
H = der[i+1]
up_seg = []
for g in der[i].generators:
K = PermutationGroup([g] + H.generators)
order = K.order() // H.order()
down_seg = []
for p, e in factorint(order).items():
for j in range(e):
down_seg.append(PermutationGroup([g] + H.generators))
g = g**p
up_seg = down_seg + up_seg
H = K
up_seg[0] = der[i]
series.extend(up_seg)
series.append(der[-1])
return series
def coset_transversal(self, H):
"""Return a transversal of the right cosets of self by its subgroup H
using the second method described in [1], Subsection 4.6.7
"""
if not H.is_subgroup(self):
raise ValueError("The argument must be a subgroup")
if H.order() == 1:
return self._elements
self._schreier_sims(base=H.base) # make G.base an extension of H.base
base = self.base
base_ordering = _base_ordering(base, self.degree)
identity = Permutation(self.degree - 1)
transversals = self.basic_transversals[:]
# transversals is a list of dictionaries. Get rid of the keys
# so that it is a list of lists and sort each list in
# the increasing order of base[l]^x
for l, t in enumerate(transversals):
transversals[l] = sorted(t.values(),
key = lambda x: base_ordering[base[l]^x])
orbits = H.basic_orbits
h_stabs = H.basic_stabilizers
g_stabs = self.basic_stabilizers
indices = [x.order()//y.order() for x, y in zip(g_stabs, h_stabs)]
# T^(l) should be a right transversal of H^(l) in G^(l) for
# 1<=l<=len(base). While H^(l) is the trivial group, T^(l)
# contains all the elements of G^(l) so we might just as well
# start with l = len(h_stabs)-1
if len(g_stabs) > len(h_stabs):
T = g_stabs[len(h_stabs)]._elements
else:
T = [identity]
l = len(h_stabs)-1
t_len = len(T)
while l > -1:
T_next = []
for u in transversals[l]:
if u == identity:
continue
b = base_ordering[base[l]^u]
for t in T:
p = t*u
if all([base_ordering[h^p] >= b for h in orbits[l]]):
T_next.append(p)
if t_len + len(T_next) == indices[l]:
break
if t_len + len(T_next) == indices[l]:
break
T += T_next
t_len += len(T_next)
l -= 1
T.remove(identity)
T = [identity] + T
return T
def _coset_representative(self, g, H):
"""Return the representative of Hg from the transversal that
would be computed by ``self.coset_transversal(H)``.
"""
if H.order() == 1:
return g
# The base of self must be an extension of H.base.
if not(self.base[:len(H.base)] == H.base):
self._schreier_sims(base=H.base)
orbits = H.basic_orbits[:]
h_transversals = [list(_.values()) for _ in H.basic_transversals]
transversals = [list(_.values()) for _ in self.basic_transversals]
base = self.base
base_ordering = _base_ordering(base, self.degree)
def step(l, x):
gamma = sorted(orbits[l], key = lambda y: base_ordering[y^x])[0]
i = [base[l]^h for h in h_transversals[l]].index(gamma)
x = h_transversals[l][i]*x
if l < len(orbits)-1:
for u in transversals[l]:
if base[l]^u == base[l]^x:
break
x = step(l+1, x*u**-1)*u
return x
return step(0, g)
def coset_table(self, H):
"""Return the standardised (right) coset table of self in H as
a list of lists.
"""
# Maybe this should be made to return an instance of CosetTable
# from fp_groups.py but the class would need to be changed first
# to be compatible with PermutationGroups
from itertools import chain, product
if not H.is_subgroup(self):
raise ValueError("The argument must be a subgroup")
T = self.coset_transversal(H)
n = len(T)
A = list(chain.from_iterable((gen, gen**-1)
for gen in self.generators))
table = []
for i in range(n):
row = [self._coset_representative(T[i]*x, H) for x in A]
row = [T.index(r) for r in row]
table.append(row)
# standardize (this is the same as the algorithm used in coset_table)
# If CosetTable is made compatible with PermutationGroups, this
# should be replaced by table.standardize()
A = range(len(A))
gamma = 1
for alpha, a in product(range(n), A):
beta = table[alpha][a]
if beta >= gamma:
if beta > gamma:
for x in A:
z = table[gamma][x]
table[gamma][x] = table[beta][x]
table[beta][x] = z
for i in range(n):
if table[i][x] == beta:
table[i][x] = gamma
elif table[i][x] == gamma:
table[i][x] = beta
gamma += 1
if gamma >= n-1:
return table
def center(self):
r"""
Return the center of a permutation group.
Explanation
===========
The center for a group `G` is defined as
`Z(G) = \{z\in G | \forall g\in G, zg = gz \}`,
the set of elements of `G` that commute with all elements of `G`.
It is equal to the centralizer of `G` inside `G`, and is naturally a
subgroup of `G` ([9]).
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> G = D.center()
>>> G.order()
2
See Also
========
centralizer
Notes
=====
This is a naive implementation that is a straightforward application
of ``.centralizer()``
"""
return self.centralizer(self)
def centralizer(self, other):
r"""
Return the centralizer of a group/set/element.
Explanation
===========
The centralizer of a set of permutations ``S`` inside
a group ``G`` is the set of elements of ``G`` that commute with all
elements of ``S``::
`C_G(S) = \{ g \in G | gs = sg \forall s \in S\}` ([10])
Usually, ``S`` is a subset of ``G``, but if ``G`` is a proper subgroup of
the full symmetric group, we allow for ``S`` to have elements outside
``G``.
It is naturally a subgroup of ``G``; the centralizer of a permutation
group is equal to the centralizer of any set of generators for that
group, since any element commuting with the generators commutes with
any product of the generators.
Parameters
==========
other
a permutation group/list of permutations/single permutation
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> S = SymmetricGroup(6)
>>> C = CyclicGroup(6)
>>> H = S.centralizer(C)
>>> H.is_subgroup(C)
True
See Also
========
subgroup_search
Notes
=====
The implementation is an application of ``.subgroup_search()`` with
tests using a specific base for the group ``G``.
"""
if hasattr(other, 'generators'):
if other.is_trivial or self.is_trivial:
return self
degree = self.degree
identity = _af_new(list(range(degree)))
orbits = other.orbits()
num_orbits = len(orbits)
orbits.sort(key=lambda x: -len(x))
long_base = []
orbit_reps = [None]*num_orbits
orbit_reps_indices = [None]*num_orbits
orbit_descr = [None]*degree
for i in range(num_orbits):
orbit = list(orbits[i])
orbit_reps[i] = orbit[0]
orbit_reps_indices[i] = len(long_base)
for point in orbit:
orbit_descr[point] = i
long_base = long_base + orbit
base, strong_gens = self.schreier_sims_incremental(base=long_base)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
i = 0
for i in range(len(base)):
if strong_gens_distr[i] == [identity]:
break
base = base[:i]
base_len = i
for j in range(num_orbits):
if base[base_len - 1] in orbits[j]:
break
rel_orbits = orbits[: j + 1]
num_rel_orbits = len(rel_orbits)
transversals = [None]*num_rel_orbits
for j in range(num_rel_orbits):
rep = orbit_reps[j]
transversals[j] = dict(
other.orbit_transversal(rep, pairs=True))
trivial_test = lambda x: True
tests = [None]*base_len
for l in range(base_len):
if base[l] in orbit_reps:
tests[l] = trivial_test
else:
def test(computed_words, l=l):
g = computed_words[l]
rep_orb_index = orbit_descr[base[l]]
rep = orbit_reps[rep_orb_index]
im = g._array_form[base[l]]
im_rep = g._array_form[rep]
tr_el = transversals[rep_orb_index][base[l]]
# using the definition of transversal,
# base[l]^g = rep^(tr_el*g);
# if g belongs to the centralizer, then
# base[l]^g = (rep^g)^tr_el
return im == tr_el._array_form[im_rep]
tests[l] = test
def prop(g):
return [rmul(g, gen) for gen in other.generators] == \
[rmul(gen, g) for gen in other.generators]
return self.subgroup_search(prop, base=base,
strong_gens=strong_gens, tests=tests)
elif hasattr(other, '__getitem__'):
gens = list(other)
return self.centralizer(PermutationGroup(gens))
elif hasattr(other, 'array_form'):
return self.centralizer(PermutationGroup([other]))
def commutator(self, G, H):
"""
Return the commutator of two subgroups.
Explanation
===========
For a permutation group ``K`` and subgroups ``G``, ``H``, the
commutator of ``G`` and ``H`` is defined as the group generated
by all the commutators `[g, h] = hgh^{-1}g^{-1}` for ``g`` in ``G`` and
``h`` in ``H``. It is naturally a subgroup of ``K`` ([1], p.27).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> G = S.commutator(S, A)
>>> G.is_subgroup(A)
True
See Also
========
derived_subgroup
Notes
=====
The commutator of two subgroups `H, G` is equal to the normal closure
of the commutators of all the generators, i.e. `hgh^{-1}g^{-1}` for `h`
a generator of `H` and `g` a generator of `G` ([1], p.28)
"""
ggens = G.generators
hgens = H.generators
commutators = []
for ggen in ggens:
for hgen in hgens:
commutator = rmul(hgen, ggen, ~hgen, ~ggen)
if commutator not in commutators:
commutators.append(commutator)
res = self.normal_closure(commutators)
return res
def coset_factor(self, g, factor_index=False):
"""Return ``G``'s (self's) coset factorization of ``g``
Explanation
===========
If ``g`` is an element of ``G`` then it can be written as the product
of permutations drawn from the Schreier-Sims coset decomposition,
The permutations returned in ``f`` are those for which
the product gives ``g``: ``g = f[n]*...f[1]*f[0]`` where ``n = len(B)``
and ``B = G.base``. f[i] is one of the permutations in
``self._basic_orbits[i]``.
If factor_index==True,
returns a tuple ``[b[0],..,b[n]]``, where ``b[i]``
belongs to ``self._basic_orbits[i]``
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
Define g:
>>> g = Permutation(7)(1, 2, 4)(3, 6, 5)
Confirm that it is an element of G:
>>> G.contains(g)
True
Thus, it can be written as a product of factors (up to
3) drawn from u. See below that a factor from u1 and u2
and the Identity permutation have been used:
>>> f = G.coset_factor(g)
>>> f[2]*f[1]*f[0] == g
True
>>> f1 = G.coset_factor(g, True); f1
[0, 4, 4]
>>> tr = G.basic_transversals
>>> f[0] == tr[0][f1[0]]
True
If g is not an element of G then [] is returned:
>>> c = Permutation(5, 6, 7)
>>> G.coset_factor(c)
[]
See Also
========
sympy.combinatorics.util._strip
"""
if isinstance(g, (Cycle, Permutation)):
g = g.list()
if len(g) != self._degree:
# this could either adjust the size or return [] immediately
# but we don't choose between the two and just signal a possible
# error
raise ValueError('g should be the same size as permutations of G')
I = list(range(self._degree))
basic_orbits = self.basic_orbits
transversals = self._transversals
factors = []
base = self.base
h = g
for i in range(len(base)):
beta = h[base[i]]
if beta == base[i]:
factors.append(beta)
continue
if beta not in basic_orbits[i]:
return []
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
factors.append(beta)
if h != I:
return []
if factor_index:
return factors
tr = self.basic_transversals
factors = [tr[i][factors[i]] for i in range(len(base))]
return factors
def generator_product(self, g, original=False):
'''
Return a list of strong generators `[s1, ..., sn]`
s.t `g = sn*...*s1`. If `original=True`, make the list
contain only the original group generators
'''
product = []
if g.is_identity:
return []
if g in self.strong_gens:
if not original or g in self.generators:
return [g]
else:
slp = self._strong_gens_slp[g]
for s in slp:
product.extend(self.generator_product(s, original=True))
return product
elif g**-1 in self.strong_gens:
g = g**-1
if not original or g in self.generators:
return [g**-1]
else:
slp = self._strong_gens_slp[g]
for s in slp:
product.extend(self.generator_product(s, original=True))
l = len(product)
product = [product[l-i-1]**-1 for i in range(l)]
return product
f = self.coset_factor(g, True)
for i, j in enumerate(f):
slp = self._transversal_slp[i][j]
for s in slp:
if not original:
product.append(self.strong_gens[s])
else:
s = self.strong_gens[s]
product.extend(self.generator_product(s, original=True))
return product
def coset_rank(self, g):
"""rank using Schreier-Sims representation.
Explanation
===========
The coset rank of ``g`` is the ordering number in which
it appears in the lexicographic listing according to the
coset decomposition
The ordering is the same as in G.generate(method='coset').
If ``g`` does not belong to the group it returns None.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
>>> c = Permutation(7)(2, 4)(3, 5)
>>> G.coset_rank(c)
16
>>> G.coset_unrank(16)
(7)(2 4)(3 5)
See Also
========
coset_factor
"""
factors = self.coset_factor(g, True)
if not factors:
return None
rank = 0
b = 1
transversals = self._transversals
base = self._base
basic_orbits = self._basic_orbits
for i in range(len(base)):
k = factors[i]
j = basic_orbits[i].index(k)
rank += b*j
b = b*len(transversals[i])
return rank
def coset_unrank(self, rank, af=False):
"""unrank using Schreier-Sims representation
coset_unrank is the inverse operation of coset_rank
if 0 <= rank < order; otherwise it returns None.
"""
if rank < 0 or rank >= self.order():
return None
base = self.base
transversals = self.basic_transversals
basic_orbits = self.basic_orbits
m = len(base)
v = [0]*m
for i in range(m):
rank, c = divmod(rank, len(transversals[i]))
v[i] = basic_orbits[i][c]
a = [transversals[i][v[i]]._array_form for i in range(m)]
h = _af_rmuln(*a)
if af:
return h
else:
return _af_new(h)
@property
def degree(self):
"""Returns the size of the permutations in the group.
Explanation
===========
The number of permutations comprising the group is given by
``len(group)``; the number of permutations that can be generated
by the group is given by ``group.order()``.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[(2), (2)(0 1)]
See Also
========
order
"""
return self._degree
@property
def identity(self):
'''
Return the identity element of the permutation group.
'''
return _af_new(list(range(self.degree)))
@property
def elements(self):
"""Returns all the elements of the permutation group as a set
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = PermutationGroup(Permutation(1, 3), Permutation(1, 2))
>>> p.elements
{(1 2 3), (1 3 2), (1 3), (2 3), (3), (3)(1 2)}
"""
return set(self._elements)
@property
def _elements(self):
"""Returns all the elements of the permutation group as a list
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = PermutationGroup(Permutation(1, 3), Permutation(1, 2))
>>> p._elements
[(3), (3)(1 2), (1 3), (2 3), (1 2 3), (1 3 2)]
"""
return list(islice(self.generate(), None))
def derived_series(self):
r"""Return the derived series for the group.
Explanation
===========
The derived series for a group `G` is defined as
`G = G_0 > G_1 > G_2 > \ldots` where `G_i = [G_{i-1}, G_{i-1}]`,
i.e. `G_i` is the derived subgroup of `G_{i-1}`, for
`i\in\mathbb{N}`. When we have `G_k = G_{k-1}` for some
`k\in\mathbb{N}`, the series terminates.
Returns
=======
A list of permutation groups containing the members of the derived
series in the order `G = G_0, G_1, G_2, \ldots`.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup, DihedralGroup)
>>> A = AlternatingGroup(5)
>>> len(A.derived_series())
1
>>> S = SymmetricGroup(4)
>>> len(S.derived_series())
4
>>> S.derived_series()[1].is_subgroup(AlternatingGroup(4))
True
>>> S.derived_series()[2].is_subgroup(DihedralGroup(2))
True
See Also
========
derived_subgroup
"""
res = [self]
current = self
next = self.derived_subgroup()
while not current.is_subgroup(next):
res.append(next)
current = next
next = next.derived_subgroup()
return res
def derived_subgroup(self):
r"""Compute the derived subgroup.
Explanation
===========
The derived subgroup, or commutator subgroup is the subgroup generated
by all commutators `[g, h] = hgh^{-1}g^{-1}` for `g, h\in G` ; it is
equal to the normal closure of the set of commutators of the generators
([1], p.28, [11]).
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2, 4, 3])
>>> b = Permutation([0, 1, 3, 2, 4])
>>> G = PermutationGroup([a, b])
>>> C = G.derived_subgroup()
>>> list(C.generate(af=True))
[[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]]
See Also
========
derived_series
"""
r = self._r
gens = [p._array_form for p in self.generators]
set_commutators = set()
degree = self._degree
rng = list(range(degree))
for i in range(r):
for j in range(r):
p1 = gens[i]
p2 = gens[j]
c = list(range(degree))
for k in rng:
c[p2[p1[k]]] = p1[p2[k]]
ct = tuple(c)
if not ct in set_commutators:
set_commutators.add(ct)
cms = [_af_new(p) for p in set_commutators]
G2 = self.normal_closure(cms)
return G2
def generate(self, method="coset", af=False):
"""Return iterator to generate the elements of the group.
Explanation
===========
Iteration is done with one of these methods::
method='coset' using the Schreier-Sims coset representation
method='dimino' using the Dimino method
If ``af = True`` it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import PermutationGroup
>>> from sympy.combinatorics.polyhedron import tetrahedron
The permutation group given in the tetrahedron object is also
true groups:
>>> G = tetrahedron.pgroup
>>> G.is_group
True
Also the group generated by the permutations in the tetrahedron
pgroup -- even the first two -- is a proper group:
>>> H = PermutationGroup(G[0], G[1])
>>> J = PermutationGroup(list(H.generate())); J
PermutationGroup([
(0 1)(2 3),
(1 2 3),
(1 3 2),
(0 3 1),
(0 2 3),
(0 3)(1 2),
(0 1 3),
(3)(0 2 1),
(0 3 2),
(3)(0 1 2),
(0 2)(1 3)])
>>> _.is_group
True
"""
if method == "coset":
return self.generate_schreier_sims(af)
elif method == "dimino":
return self.generate_dimino(af)
else:
raise NotImplementedError('No generation defined for %s' % method)
def generate_dimino(self, af=False):
"""Yield group elements using Dimino's algorithm.
If ``af == True`` it yields the array form of the permutations.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_dimino(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1],
[0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]]
References
==========
.. [1] The Implementation of Various Algorithms for Permutation Groups in
the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis
"""
idn = list(range(self.degree))
order = 0
element_list = [idn]
set_element_list = {tuple(idn)}
if af:
yield idn
else:
yield _af_new(idn)
gens = [p._array_form for p in self.generators]
for i in range(len(gens)):
# D elements of the subgroup G_i generated by gens[:i]
D = element_list[:]
N = [idn]
while N:
A = N
N = []
for a in A:
for g in gens[:i + 1]:
ag = _af_rmul(a, g)
if tuple(ag) not in set_element_list:
# produce G_i*g
for d in D:
order += 1
ap = _af_rmul(d, ag)
if af:
yield ap
else:
p = _af_new(ap)
yield p
element_list.append(ap)
set_element_list.add(tuple(ap))
N.append(ap)
self._order = len(element_list)
def generate_schreier_sims(self, af=False):
"""Yield group elements using the Schreier-Sims representation
in coset_rank order
If ``af = True`` it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 3, 2, 1],
[0, 1, 3, 2], [0, 2, 3, 1], [0, 3, 1, 2]]
"""
n = self._degree
u = self.basic_transversals
basic_orbits = self._basic_orbits
if len(u) == 0:
for x in self.generators:
if af:
yield x._array_form
else:
yield x
return
if len(u) == 1:
for i in basic_orbits[0]:
if af:
yield u[0][i]._array_form
else:
yield u[0][i]
return
u = list(reversed(u))
basic_orbits = basic_orbits[::-1]
# stg stack of group elements
stg = [list(range(n))]
posmax = [len(x) for x in u]
n1 = len(posmax) - 1
pos = [0]*n1
h = 0
while 1:
# backtrack when finished iterating over coset
if pos[h] >= posmax[h]:
if h == 0:
return
pos[h] = 0
h -= 1
stg.pop()
continue
p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])
pos[h] += 1
stg.append(p)
h += 1
if h == n1:
if af:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
yield p
else:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
p1 = _af_new(p)
yield p1
stg.pop()
h -= 1
@property
def generators(self):
"""Returns the generators of the group.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.generators
[(1 2), (2)(0 1)]
"""
return self._generators
def contains(self, g, strict=True):
"""Test if permutation ``g`` belong to self, ``G``.
Explanation
===========
If ``g`` is an element of ``G`` it can be written as a product
of factors drawn from the cosets of ``G``'s stabilizers. To see
if ``g`` is one of the actual generators defining the group use
``G.has(g)``.
If ``strict`` is not ``True``, ``g`` will be resized, if necessary,
to match the size of permutations in ``self``.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(1, 2)
>>> b = Permutation(2, 3, 1)
>>> G = PermutationGroup(a, b, degree=5)
>>> G.contains(G[0]) # trivial check
True
>>> elem = Permutation([[2, 3]], size=5)
>>> G.contains(elem)
True
>>> G.contains(Permutation(4)(0, 1, 2, 3))
False
If strict is False, a permutation will be resized, if
necessary:
>>> H = PermutationGroup(Permutation(5))
>>> H.contains(Permutation(3))
False
>>> H.contains(Permutation(3), strict=False)
True
To test if a given permutation is present in the group:
>>> elem in G.generators
False
>>> G.has(elem)
False
See Also
========
coset_factor, sympy.core.basic.Basic.has, __contains__
"""
if not isinstance(g, Permutation):
return False
if g.size != self.degree:
if strict:
return False
g = Permutation(g, size=self.degree)
if g in self.generators:
return True
return bool(self.coset_factor(g.array_form, True))
@property
def is_perfect(self):
"""Return ``True`` if the group is perfect.
A group is perfect if it equals to its derived subgroup.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(1,2,3)(4,5)
>>> b = Permutation(1,2,3,4,5)
>>> G = PermutationGroup([a, b])
>>> G.is_perfect
False
"""
if self._is_perfect is None:
self._is_perfect = self == self.derived_subgroup()
return self._is_perfect
@property
def is_abelian(self):
"""Test if the group is Abelian.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.is_abelian
False
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_abelian
True
"""
if self._is_abelian is not None:
return self._is_abelian
self._is_abelian = True
gens = [p._array_form for p in self.generators]
for x in gens:
for y in gens:
if y <= x:
continue
if not _af_commutes_with(x, y):
self._is_abelian = False
return False
return True
def abelian_invariants(self):
"""
Returns the abelian invariants for the given group.
Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to
the direct product of finitely many nontrivial cyclic groups of
prime-power order.
Explanation
===========
The prime-powers that occur as the orders of the factors are uniquely
determined by G. More precisely, the primes that occur in the orders of the
factors in any such decomposition of ``G`` are exactly the primes that divide
``|G|`` and for any such prime ``p``, if the orders of the factors that are
p-groups in one such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``,
then the orders of the factors that are p-groups in any such decomposition of ``G``
are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``.
The uniquely determined integers ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, taken
for all primes that divide ``|G|`` are called the invariants of the nontrivial
group ``G`` as suggested in ([14], p. 542).
Notes
=====
We adopt the convention that the invariants of a trivial group are [].
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.abelian_invariants()
[2]
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(7)
>>> G.abelian_invariants()
[7]
"""
if self.is_trivial:
return []
gns = self.generators
inv = []
G = self
H = G.derived_subgroup()
Hgens = H.generators
for p in primefactors(G.order()):
ranks = []
while True:
pows = []
for g in gns:
elm = g**p
if not H.contains(elm):
pows.append(elm)
K = PermutationGroup(Hgens + pows) if pows else H
r = G.order()//K.order()
G = K
gns = pows
if r == 1:
break;
ranks.append(multiplicity(p, r))
if ranks:
pows = [1]*ranks[0]
for i in ranks:
for j in range(0, i):
pows[j] = pows[j]*p
inv.extend(pows)
inv.sort()
return inv
def is_elementary(self, p):
"""Return ``True`` if the group is elementary abelian. An elementary
abelian group is a finite abelian group, where every nontrivial
element has order `p`, where `p` is a prime.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_elementary(2)
True
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([3, 1, 2, 0])
>>> G = PermutationGroup([a, b])
>>> G.is_elementary(2)
True
>>> G.is_elementary(3)
False
"""
return self.is_abelian and all(g.order() == p for g in self.generators)
def _eval_is_alt_sym_naive(self, only_sym=False, only_alt=False):
"""A naive test using the group order."""
if only_sym and only_alt:
raise ValueError(
"Both {} and {} cannot be set to True"
.format(only_sym, only_alt))
n = self.degree
sym_order = 1
for i in range(2, n+1):
sym_order *= i
order = self.order()
if order == sym_order:
self._is_sym = True
self._is_alt = False
if only_alt:
return False
return True
elif 2*order == sym_order:
self._is_sym = False
self._is_alt = True
if only_sym:
return False
return True
return False
def _eval_is_alt_sym_monte_carlo(self, eps=0.05, perms=None):
"""A test using monte-carlo algorithm.
Parameters
==========
eps : float, optional
The criterion for the incorrect ``False`` return.
perms : list[Permutation], optional
If explicitly given, it tests over the given candidats
for testing.
If ``None``, it randomly computes ``N_eps`` and chooses
``N_eps`` sample of the permutation from the group.
See Also
========
_check_cycles_alt_sym
"""
if perms is None:
n = self.degree
if n < 17:
c_n = 0.34
else:
c_n = 0.57
d_n = (c_n*log(2))/log(n)
N_eps = int(-log(eps)/d_n)
perms = (self.random_pr() for i in range(N_eps))
return self._eval_is_alt_sym_monte_carlo(perms=perms)
for perm in perms:
if _check_cycles_alt_sym(perm):
return True
return False
def is_alt_sym(self, eps=0.05, _random_prec=None):
r"""Monte Carlo test for the symmetric/alternating group for degrees
>= 8.
Explanation
===========
More specifically, it is one-sided Monte Carlo with the
answer True (i.e., G is symmetric/alternating) guaranteed to be
correct, and the answer False being incorrect with probability eps.
For degree < 8, the order of the group is checked so the test
is deterministic.
Notes
=====
The algorithm itself uses some nontrivial results from group theory and
number theory:
1) If a transitive group ``G`` of degree ``n`` contains an element
with a cycle of length ``n/2 < p < n-2`` for ``p`` a prime, ``G`` is the
symmetric or alternating group ([1], pp. 81-82)
2) The proportion of elements in the symmetric/alternating group having
the property described in 1) is approximately `\log(2)/\log(n)`
([1], p.82; [2], pp. 226-227).
The helper function ``_check_cycles_alt_sym`` is used to
go over the cycles in a permutation and look for ones satisfying 1).
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_alt_sym()
False
See Also
========
_check_cycles_alt_sym
"""
if _random_prec is not None:
N_eps = _random_prec['N_eps']
perms= (_random_prec[i] for i in range(N_eps))
return self._eval_is_alt_sym_monte_carlo(perms=perms)
if self._is_sym or self._is_alt:
return True
if self._is_sym is False and self._is_alt is False:
return False
n = self.degree
if n < 8:
return self._eval_is_alt_sym_naive()
elif self.is_transitive():
return self._eval_is_alt_sym_monte_carlo(eps=eps)
self._is_sym, self._is_alt = False, False
return False
@property
def is_nilpotent(self):
"""Test if the group is nilpotent.
Explanation
===========
A group `G` is nilpotent if it has a central series of finite length.
Alternatively, `G` is nilpotent if its lower central series terminates
with the trivial group. Every nilpotent group is also solvable
([1], p.29, [12]).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> C = CyclicGroup(6)
>>> C.is_nilpotent
True
>>> S = SymmetricGroup(5)
>>> S.is_nilpotent
False
See Also
========
lower_central_series, is_solvable
"""
if self._is_nilpotent is None:
lcs = self.lower_central_series()
terminator = lcs[len(lcs) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
self._is_nilpotent = True
return True
else:
self._is_nilpotent = False
return False
else:
return self._is_nilpotent
def is_normal(self, gr, strict=True):
"""Test if ``G=self`` is a normal subgroup of ``gr``.
Explanation
===========
G is normal in gr if
for each g2 in G, g1 in gr, ``g = g1*g2*g1**-1`` belongs to G
It is sufficient to check this for each g1 in gr.generators and
g2 in G.generators.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G1 = PermutationGroup([a, Permutation([2, 0, 1])])
>>> G1.is_normal(G)
True
"""
if not self.is_subgroup(gr, strict=strict):
return False
d_self = self.degree
d_gr = gr.degree
if self.is_trivial and (d_self == d_gr or not strict):
return True
if self._is_abelian:
return True
new_self = self.copy()
if not strict and d_self != d_gr:
if d_self < d_gr:
new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)])
else:
gr = PermGroup(gr.generators + [Permutation(d_self - 1)])
gens2 = [p._array_form for p in new_self.generators]
gens1 = [p._array_form for p in gr.generators]
for g1 in gens1:
for g2 in gens2:
p = _af_rmuln(g1, g2, _af_invert(g1))
if not new_self.coset_factor(p, True):
return False
return True
def is_primitive(self, randomized=True):
r"""Test if a group is primitive.
Explanation
===========
A permutation group ``G`` acting on a set ``S`` is called primitive if
``S`` contains no nontrivial block under the action of ``G``
(a block is nontrivial if its cardinality is more than ``1``).
Notes
=====
The algorithm is described in [1], p.83, and uses the function
minimal_block to search for blocks of the form `\{0, k\}` for ``k``
ranging over representatives for the orbits of `G_0`, the stabilizer of
``0``. This algorithm has complexity `O(n^2)` where ``n`` is the degree
of the group, and will perform badly if `G_0` is small.
There are two implementations offered: one finds `G_0`
deterministically using the function ``stabilizer``, and the other
(default) produces random elements of `G_0` using ``random_stab``,
hoping that they generate a subgroup of `G_0` with not too many more
orbits than `G_0` (this is suggested in [1], p.83). Behavior is changed
by the ``randomized`` flag.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_primitive()
False
See Also
========
minimal_block, random_stab
"""
if self._is_primitive is not None:
return self._is_primitive
if self.is_transitive() is False:
return False
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for i in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0 and any(e != 0 for e in self.minimal_block([0, x])):
self._is_primitive = False
return False
self._is_primitive = True
return True
def minimal_blocks(self, randomized=True):
'''
For a transitive group, return the list of all minimal
block systems. If a group is intransitive, return `False`.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> DihedralGroup(6).minimal_blocks()
[[0, 1, 0, 1, 0, 1], [0, 1, 2, 0, 1, 2]]
>>> G = PermutationGroup(Permutation(1,2,5))
>>> G.minimal_blocks()
False
See Also
========
minimal_block, is_transitive, is_primitive
'''
def _number_blocks(blocks):
# number the blocks of a block system
# in order and return the number of
# blocks and the tuple with the
# reordering
n = len(blocks)
appeared = {}
m = 0
b = [None]*n
for i in range(n):
if blocks[i] not in appeared:
appeared[blocks[i]] = m
b[i] = m
m += 1
else:
b[i] = appeared[blocks[i]]
return tuple(b), m
if not self.is_transitive():
return False
blocks = []
num_blocks = []
rep_blocks = []
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for i in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0:
block = self.minimal_block([0, x])
num_block, m = _number_blocks(block)
# a representative block (containing 0)
rep = {j for j in range(self.degree) if num_block[j] == 0}
# check if the system is minimal with
# respect to the already discovere ones
minimal = True
blocks_remove_mask = [False] * len(blocks)
for i, r in enumerate(rep_blocks):
if len(r) > len(rep) and rep.issubset(r):
# i-th block system is not minimal
blocks_remove_mask[i] = True
elif len(r) < len(rep) and r.issubset(rep):
# the system being checked is not minimal
minimal = False
break
# remove non-minimal representative blocks
blocks = [b for i, b in enumerate(blocks) if not blocks_remove_mask[i]]
num_blocks = [n for i, n in enumerate(num_blocks) if not blocks_remove_mask[i]]
rep_blocks = [r for i, r in enumerate(rep_blocks) if not blocks_remove_mask[i]]
if minimal and num_block not in num_blocks:
blocks.append(block)
num_blocks.append(num_block)
rep_blocks.append(rep)
return blocks
@property
def is_solvable(self):
"""Test if the group is solvable.
``G`` is solvable if its derived series terminates with the trivial
group ([1], p.29).
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(3)
>>> S.is_solvable
True
See Also
========
is_nilpotent, derived_series
"""
if self._is_solvable is None:
if self.order() % 2 != 0:
return True
ds = self.derived_series()
terminator = ds[len(ds) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
return True
else:
self._is_solvable = False
return False
else:
return self._is_solvable
def is_subgroup(self, G, strict=True):
"""Return ``True`` if all elements of ``self`` belong to ``G``.
If ``strict`` is ``False`` then if ``self``'s degree is smaller
than ``G``'s, the elements will be resized to have the same degree.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
Testing is strict by default: the degree of each group must be the
same:
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G1 = PermutationGroup([Permutation(0, 1, 2), Permutation(0, 1)])
>>> G2 = PermutationGroup([Permutation(0, 2), Permutation(0, 1, 2)])
>>> G3 = PermutationGroup([p, p**2])
>>> assert G1.order() == G2.order() == G3.order() == 6
>>> G1.is_subgroup(G2)
True
>>> G1.is_subgroup(G3)
False
>>> G3.is_subgroup(PermutationGroup(G3[1]))
False
>>> G3.is_subgroup(PermutationGroup(G3[0]))
True
To ignore the size, set ``strict`` to ``False``:
>>> S3 = SymmetricGroup(3)
>>> S5 = SymmetricGroup(5)
>>> S3.is_subgroup(S5, strict=False)
True
>>> C7 = CyclicGroup(7)
>>> G = S5*C7
>>> S5.is_subgroup(G, False)
True
>>> C7.is_subgroup(G, 0)
False
"""
if isinstance(G, SymmetricPermutationGroup):
if self.degree != G.degree:
return False
return True
if not isinstance(G, PermutationGroup):
return False
if self == G or self.generators[0]==Permutation():
return True
if G.order() % self.order() != 0:
return False
if self.degree == G.degree or \
(self.degree < G.degree and not strict):
gens = self.generators
else:
return False
return all(G.contains(g, strict=strict) for g in gens)
@property
def is_polycyclic(self):
"""Return ``True`` if a group is polycyclic. A group is polycyclic if
it has a subnormal series with cyclic factors. For finite groups,
this is the same as if the group is solvable.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G = PermutationGroup([a, b])
>>> G.is_polycyclic
True
"""
return self.is_solvable
def is_transitive(self, strict=True):
"""Test if the group is transitive.
Explanation
===========
A group is transitive if it has a single orbit.
If ``strict`` is ``False`` the group is transitive if it has
a single orbit of length different from 1.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G1 = PermutationGroup([a, b])
>>> G1.is_transitive()
False
>>> G1.is_transitive(strict=False)
True
>>> c = Permutation([2, 3, 0, 1])
>>> G2 = PermutationGroup([a, c])
>>> G2.is_transitive()
True
>>> d = Permutation([1, 0, 2, 3])
>>> e = Permutation([0, 1, 3, 2])
>>> G3 = PermutationGroup([d, e])
>>> G3.is_transitive() or G3.is_transitive(strict=False)
False
"""
if self._is_transitive: # strict or not, if True then True
return self._is_transitive
if strict:
if self._is_transitive is not None: # we only store strict=True
return self._is_transitive
ans = len(self.orbit(0)) == self.degree
self._is_transitive = ans
return ans
got_orb = False
for x in self.orbits():
if len(x) > 1:
if got_orb:
return False
got_orb = True
return got_orb
@property
def is_trivial(self):
"""Test if the group is the trivial group.
This is true if the group contains only the identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> G = PermutationGroup([Permutation([0, 1, 2])])
>>> G.is_trivial
True
"""
if self._is_trivial is None:
self._is_trivial = len(self) == 1 and self[0].is_Identity
return self._is_trivial
def lower_central_series(self):
r"""Return the lower central series for the group.
The lower central series for a group `G` is the series
`G = G_0 > G_1 > G_2 > \ldots` where
`G_k = [G, G_{k-1}]`, i.e. every term after the first is equal to the
commutator of `G` and the previous term in `G1` ([1], p.29).
Returns
=======
A list of permutation groups in the order `G = G_0, G_1, G_2, \ldots`
Examples
========
>>> from sympy.combinatorics.named_groups import (AlternatingGroup,
... DihedralGroup)
>>> A = AlternatingGroup(4)
>>> len(A.lower_central_series())
2
>>> A.lower_central_series()[1].is_subgroup(DihedralGroup(2))
True
See Also
========
commutator, derived_series
"""
res = [self]
current = self
next = self.commutator(self, current)
while not current.is_subgroup(next):
res.append(next)
current = next
next = self.commutator(self, current)
return res
@property
def max_div(self):
"""Maximum proper divisor of the degree of a permutation group.
Explanation
===========
Obviously, this is the degree divided by its minimal proper divisor
(larger than ``1``, if one exists). As it is guaranteed to be prime,
the ``sieve`` from ``sympy.ntheory`` is used.
This function is also used as an optimization tool for the functions
``minimal_block`` and ``_union_find_merge``.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> G = PermutationGroup([Permutation([0, 2, 1, 3])])
>>> G.max_div
2
See Also
========
minimal_block, _union_find_merge
"""
if self._max_div is not None:
return self._max_div
n = self.degree
if n == 1:
return 1
for x in sieve:
if n % x == 0:
d = n//x
self._max_div = d
return d
def minimal_block(self, points):
r"""For a transitive group, finds the block system generated by
``points``.
Explanation
===========
If a group ``G`` acts on a set ``S``, a nonempty subset ``B`` of ``S``
is called a block under the action of ``G`` if for all ``g`` in ``G``
we have ``gB = B`` (``g`` fixes ``B``) or ``gB`` and ``B`` have no
common points (``g`` moves ``B`` entirely). ([1], p.23; [6]).
The distinct translates ``gB`` of a block ``B`` for ``g`` in ``G``
partition the set ``S`` and this set of translates is known as a block
system. Moreover, we obviously have that all blocks in the partition
have the same size, hence the block size divides ``|S|`` ([1], p.23).
A ``G``-congruence is an equivalence relation ``~`` on the set ``S``
such that ``a ~ b`` implies ``g(a) ~ g(b)`` for all ``g`` in ``G``.
For a transitive group, the equivalence classes of a ``G``-congruence
and the blocks of a block system are the same thing ([1], p.23).
The algorithm below checks the group for transitivity, and then finds
the ``G``-congruence generated by the pairs ``(p_0, p_1), (p_0, p_2),
..., (p_0,p_{k-1})`` which is the same as finding the maximal block
system (i.e., the one with minimum block size) such that
``p_0, ..., p_{k-1}`` are in the same block ([1], p.83).
It is an implementation of Atkinson's algorithm, as suggested in [1],
and manipulates an equivalence relation on the set ``S`` using a
union-find data structure. The running time is just above
`O(|points||S|)`. ([1], pp. 83-87; [7]).
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.minimal_block([0, 5])
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
>>> D.minimal_block([0, 1])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
See Also
========
_union_find_rep, _union_find_merge, is_transitive, is_primitive
"""
if not self.is_transitive():
return False
n = self.degree
gens = self.generators
# initialize the list of equivalence class representatives
parents = list(range(n))
ranks = [1]*n
not_rep = []
k = len(points)
# the block size must divide the degree of the group
if k > self.max_div:
return [0]*n
for i in range(k - 1):
parents[points[i + 1]] = points[0]
not_rep.append(points[i + 1])
ranks[points[0]] = k
i = 0
len_not_rep = k - 1
while i < len_not_rep:
gamma = not_rep[i]
i += 1
for gen in gens:
# find has side effects: performs path compression on the list
# of representatives
delta = self._union_find_rep(gamma, parents)
# union has side effects: performs union by rank on the list
# of representatives
temp = self._union_find_merge(gen(gamma), gen(delta), ranks,
parents, not_rep)
if temp == -1:
return [0]*n
len_not_rep += temp
for i in range(n):
# force path compression to get the final state of the equivalence
# relation
self._union_find_rep(i, parents)
# rewrite result so that block representatives are minimal
new_reps = {}
return [new_reps.setdefault(r, i) for i, r in enumerate(parents)]
def conjugacy_class(self, x):
r"""Return the conjugacy class of an element in the group.
Explanation
===========
The conjugacy class of an element ``g`` in a group ``G`` is the set of
elements ``x`` in ``G`` that are conjugate with ``g``, i.e. for which
``g = xax^{-1}``
for some ``a`` in ``G``.
Note that conjugacy is an equivalence relation, and therefore that
conjugacy classes are partitions of ``G``. For a list of all the
conjugacy classes of the group, use the conjugacy_classes() method.
In a permutation group, each conjugacy class corresponds to a particular
`cycle structure': for example, in ``S_3``, the conjugacy classes are:
* the identity class, ``{()}``
* all transpositions, ``{(1 2), (1 3), (2 3)}``
* all 3-cycles, ``{(1 2 3), (1 3 2)}``
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S3 = SymmetricGroup(3)
>>> S3.conjugacy_class(Permutation(0, 1, 2))
{(0 1 2), (0 2 1)}
Notes
=====
This procedure computes the conjugacy class directly by finding the
orbit of the element under conjugation in G. This algorithm is only
feasible for permutation groups of relatively small order, but is like
the orbit() function itself in that respect.
"""
# Ref: "Computing the conjugacy classes of finite groups"; Butler, G.
# Groups '93 Galway/St Andrews; edited by Campbell, C. M.
new_class = {x}
last_iteration = new_class
while len(last_iteration) > 0:
this_iteration = set()
for y in last_iteration:
for s in self.generators:
conjugated = s * y * (~s)
if conjugated not in new_class:
this_iteration.add(conjugated)
new_class.update(last_iteration)
last_iteration = this_iteration
return new_class
def conjugacy_classes(self):
r"""Return the conjugacy classes of the group.
Explanation
===========
As described in the documentation for the .conjugacy_class() function,
conjugacy is an equivalence relation on a group G which partitions the
set of elements. This method returns a list of all these conjugacy
classes of G.
Examples
========
>>> from sympy.combinatorics import SymmetricGroup
>>> SymmetricGroup(3).conjugacy_classes()
[{(2)}, {(0 1 2), (0 2 1)}, {(0 2), (1 2), (2)(0 1)}]
"""
identity = _af_new(list(range(self.degree)))
known_elements = {identity}
classes = [known_elements.copy()]
for x in self.generate():
if x not in known_elements:
new_class = self.conjugacy_class(x)
classes.append(new_class)
known_elements.update(new_class)
return classes
def normal_closure(self, other, k=10):
r"""Return the normal closure of a subgroup/set of permutations.
Explanation
===========
If ``S`` is a subset of a group ``G``, the normal closure of ``A`` in ``G``
is defined as the intersection of all normal subgroups of ``G`` that
contain ``A`` ([1], p.14). Alternatively, it is the group generated by
the conjugates ``x^{-1}yx`` for ``x`` a generator of ``G`` and ``y`` a
generator of the subgroup ``\left\langle S\right\rangle`` generated by
``S`` (for some chosen generating set for ``\left\langle S\right\rangle``)
([1], p.73).
Parameters
==========
other
a subgroup/list of permutations/single permutation
k
an implementation-specific parameter that determines the number
of conjugates that are adjoined to ``other`` at once
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup, AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> C = CyclicGroup(5)
>>> G = S.normal_closure(C)
>>> G.order()
60
>>> G.is_subgroup(AlternatingGroup(5))
True
See Also
========
commutator, derived_subgroup, random_pr
Notes
=====
The algorithm is described in [1], pp. 73-74; it makes use of the
generation of random elements for permutation groups by the product
replacement algorithm.
"""
if hasattr(other, 'generators'):
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in other.generators):
return other
Z = PermutationGroup(other.generators[:])
base, strong_gens = Z.schreier_sims_incremental()
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
self._random_pr_init(r=10, n=20)
_loop = True
while _loop:
Z._random_pr_init(r=10, n=10)
for i in range(k):
g = self.random_pr()
h = Z.random_pr()
conj = h^g
res = _strip(conj, base, basic_orbits, basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
gens = Z.generators
gens.append(conj)
Z = PermutationGroup(gens)
strong_gens.append(conj)
temp_base, temp_strong_gens = \
Z.schreier_sims_incremental(base, strong_gens)
base, strong_gens = temp_base, temp_strong_gens
strong_gens_distr = \
_distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base,
strong_gens_distr)
_loop = False
for g in self.generators:
for h in Z.generators:
conj = h^g
res = _strip(conj, base, basic_orbits,
basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
_loop = True
break
if _loop:
break
return Z
elif hasattr(other, '__getitem__'):
return self.normal_closure(PermutationGroup(other))
elif hasattr(other, 'array_form'):
return self.normal_closure(PermutationGroup([other]))
def orbit(self, alpha, action='tuples'):
r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set.
Explanation
===========
The time complexity of the algorithm used here is `O(|Orb|*r)` where
`|Orb|` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> G.orbit(0)
{0, 1, 2}
>>> G.orbit([0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit_transversal
"""
return _orbit(self.degree, self.generators, alpha, action)
def orbit_rep(self, alpha, beta, schreier_vector=None):
"""Return a group element which sends ``alpha`` to ``beta``.
Explanation
===========
If ``beta`` is not in the orbit of ``alpha``, the function returns
``False``. This implementation makes use of the schreier vector.
For a proof of correctness, see [1], p.80
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> G = AlternatingGroup(5)
>>> G.orbit_rep(0, 4)
(0 4 1 2 3)
See Also
========
schreier_vector
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if schreier_vector[beta] is None:
return False
k = schreier_vector[beta]
gens = [x._array_form for x in self.generators]
a = []
while k != -1:
a.append(gens[k])
beta = gens[k].index(beta) # beta = (~gens[k])(beta)
k = schreier_vector[beta]
if a:
return _af_new(_af_rmuln(*a))
else:
return _af_new(list(range(self._degree)))
def orbit_transversal(self, alpha, pairs=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
Explanation
===========
For a permutation group `G`, a transversal for the orbit
`Orb = \{g(\alpha) | g \in G\}` is a set
`\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
`(\beta, g_\beta)`. For a proof of correctness, see [1], p.79
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.orbit_transversal(0)
[(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)]
See Also
========
orbit
"""
return _orbit_transversal(self._degree, self.generators, alpha, pairs)
def orbits(self, rep=False):
"""Return the orbits of ``self``, ordered according to lowest element
in each orbit.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(1, 5)(2, 3)(4, 0, 6)
>>> b = Permutation(1, 5)(3, 4)(2, 6, 0)
>>> G = PermutationGroup([a, b])
>>> G.orbits()
[{0, 2, 3, 4, 6}, {1, 5}]
"""
return _orbits(self._degree, self._generators)
def order(self):
"""Return the order of the group: the number of permutations that
can be generated from elements of the group.
The number of permutations comprising the group is given by
``len(group)``; the length of each permutation in the group is
given by ``group.size``.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[(2), (2)(0 1)]
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.order()
6
See Also
========
degree
"""
if self._order is not None:
return self._order
if self._is_sym:
n = self._degree
self._order = factorial(n)
return self._order
if self._is_alt:
n = self._degree
self._order = factorial(n)/2
return self._order
basic_transversals = self.basic_transversals
m = 1
for x in basic_transversals:
m *= len(x)
self._order = m
return m
def index(self, H):
"""
Returns the index of a permutation group.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(1,2,3)
>>> b =Permutation(3)
>>> G = PermutationGroup([a])
>>> H = PermutationGroup([b])
>>> G.index(H)
3
"""
if H.is_subgroup(self):
return self.order()//H.order()
@property
def is_symmetric(self):
"""Return ``True`` if the group is symmetric.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> g = SymmetricGroup(5)
>>> g.is_symmetric
True
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = PermutationGroup(
... Permutation(0, 1, 2, 3, 4),
... Permutation(2, 3))
>>> g.is_symmetric
True
Notes
=====
This uses a naive test involving the computation of the full
group order.
If you need more quicker taxonomy for large groups, you can use
:meth:`PermutationGroup.is_alt_sym`.
However, :meth:`PermutationGroup.is_alt_sym` may not be accurate
and is not able to distinguish between an alternating group and
a symmetric group.
See Also
========
is_alt_sym
"""
_is_sym = self._is_sym
if _is_sym is not None:
return _is_sym
n = self.degree
if n >= 8:
if self.is_transitive():
_is_alt_sym = self._eval_is_alt_sym_monte_carlo()
if _is_alt_sym:
if any(g.is_odd for g in self.generators):
self._is_sym, self._is_alt = True, False
return True
self._is_sym, self._is_alt = False, True
return False
return self._eval_is_alt_sym_naive(only_sym=True)
self._is_sym, self._is_alt = False, False
return False
return self._eval_is_alt_sym_naive(only_sym=True)
@property
def is_alternating(self):
"""Return ``True`` if the group is alternating.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> g = AlternatingGroup(5)
>>> g.is_alternating
True
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = PermutationGroup(
... Permutation(0, 1, 2, 3, 4),
... Permutation(2, 3, 4))
>>> g.is_alternating
True
Notes
=====
This uses a naive test involving the computation of the full
group order.
If you need more quicker taxonomy for large groups, you can use
:meth:`PermutationGroup.is_alt_sym`.
However, :meth:`PermutationGroup.is_alt_sym` may not be accurate
and is not able to distinguish between an alternating group and
a symmetric group.
See Also
========
is_alt_sym
"""
_is_alt = self._is_alt
if _is_alt is not None:
return _is_alt
n = self.degree
if n >= 8:
if self.is_transitive():
_is_alt_sym = self._eval_is_alt_sym_monte_carlo()
if _is_alt_sym:
if all(g.is_even for g in self.generators):
self._is_sym, self._is_alt = False, True
return True
self._is_sym, self._is_alt = True, False
return False
return self._eval_is_alt_sym_naive(only_alt=True)
self._is_sym, self._is_alt = False, False
return False
return self._eval_is_alt_sym_naive(only_alt=True)
@classmethod
def _distinct_primes_lemma(cls, primes):
"""Subroutine to test if there is only one cyclic group for the
order."""
primes = sorted(primes)
l = len(primes)
for i in range(l):
for j in range(i+1, l):
if primes[j] % primes[i] == 1:
return None
return True
@property
def is_cyclic(self):
r"""
Return ``True`` if the group is Cyclic.
Examples
========
>>> from sympy.combinatorics.named_groups import AbelianGroup
>>> G = AbelianGroup(3, 4)
>>> G.is_cyclic
True
>>> G = AbelianGroup(4, 4)
>>> G.is_cyclic
False
Notes
=====
If the order of a group $n$ can be factored into the distinct
primes $p_1, p_2, ... , p_s$ and if
.. math::
\forall i, j \in \{1, 2, \ldots, s \}:
p_i \not \equiv 1 \pmod {p_j}
holds true, there is only one group of the order $n$ which
is a cyclic group. [1]_ This is a generalization of the lemma
that the group of order $15, 35, ...$ are cyclic.
And also, these additional lemmas can be used to test if a
group is cyclic if the order of the group is already found.
- If the group is abelian and the order of the group is
square-free, the group is cyclic.
- If the order of the group is less than $6$ and is not $4$, the
group is cyclic.
- If the order of the group is prime, the group is cyclic.
References
==========
.. [1] 1978: John S. Rose: A Course on Group Theory,
Introduction to Finite Group Theory: 1.4
"""
if self._is_cyclic is not None:
return self._is_cyclic
if len(self.generators) == 1:
self._is_cyclic = True
self._is_abelian = True
return True
if self._is_abelian is False:
self._is_cyclic = False
return False
order = self.order()
if order < 6:
self._is_abelian == True
if order != 4:
self._is_cyclic == True
return True
factors = factorint(order)
if all(v == 1 for v in factors.values()):
if self._is_abelian:
self._is_cyclic = True
return True
primes = list(factors.keys())
if PermutationGroup._distinct_primes_lemma(primes) is True:
self._is_cyclic = True
self._is_abelian = True
return True
for p in factors:
pgens = []
for g in self.generators:
pgens.append(g**p)
if self.index(self.subgroup(pgens)) != p:
self._is_cyclic = False
return False
self._is_cyclic = True
self._is_abelian = True
return True
def pointwise_stabilizer(self, points, incremental=True):
r"""Return the pointwise stabilizer for a set of points.
Explanation
===========
For a permutation group `G` and a set of points
`\{p_1, p_2,\ldots, p_k\}`, the pointwise stabilizer of
`p_1, p_2, \ldots, p_k` is defined as
`G_{p_1,\ldots, p_k} =
\{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\}` ([1],p20).
It is a subgroup of `G`.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(7)
>>> Stab = S.pointwise_stabilizer([2, 3, 5])
>>> Stab.is_subgroup(S.stabilizer(2).stabilizer(3).stabilizer(5))
True
See Also
========
stabilizer, schreier_sims_incremental
Notes
=====
When incremental == True,
rather than the obvious implementation using successive calls to
``.stabilizer()``, this uses the incremental Schreier-Sims algorithm
to obtain a base with starting segment - the given points.
"""
if incremental:
base, strong_gens = self.schreier_sims_incremental(base=points)
stab_gens = []
degree = self.degree
for gen in strong_gens:
if [gen(point) for point in points] == points:
stab_gens.append(gen)
if not stab_gens:
stab_gens = _af_new(list(range(degree)))
return PermutationGroup(stab_gens)
else:
gens = self._generators
degree = self.degree
for x in points:
gens = _stabilizer(degree, gens, x)
return PermutationGroup(gens)
def make_perm(self, n, seed=None):
"""
Multiply ``n`` randomly selected permutations from
pgroup together, starting with the identity
permutation. If ``n`` is a list of integers, those
integers will be used to select the permutations and they
will be applied in L to R order: make_perm((A, B, C)) will
give CBA(I) where I is the identity permutation.
``seed`` is used to set the seed for the random selection
of permutations from pgroup. If this is a list of integers,
the corresponding permutations from pgroup will be selected
in the order give. This is mainly used for testing purposes.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a, b = [Permutation([1, 0, 3, 2]), Permutation([1, 3, 0, 2])]
>>> G = PermutationGroup([a, b])
>>> G.make_perm(1, [0])
(0 1)(2 3)
>>> G.make_perm(3, [0, 1, 0])
(0 2 3 1)
>>> G.make_perm([0, 1, 0])
(0 2 3 1)
See Also
========
random
"""
if is_sequence(n):
if seed is not None:
raise ValueError('If n is a sequence, seed should be None')
n, seed = len(n), n
else:
try:
n = int(n)
except TypeError:
raise ValueError('n must be an integer or a sequence.')
randrange = _randrange(seed)
# start with the identity permutation
result = Permutation(list(range(self.degree)))
m = len(self)
for i in range(n):
p = self[randrange(m)]
result = rmul(result, p)
return result
def random(self, af=False):
"""Return a random group element
"""
rank = randrange(self.order())
return self.coset_unrank(rank, af)
def random_pr(self, gen_count=11, iterations=50, _random_prec=None):
"""Return a random group element using product replacement.
Explanation
===========
For the details of the product replacement algorithm, see
``_random_pr_init`` In ``random_pr`` the actual 'product replacement'
is performed. Notice that if the attribute ``_random_gens``
is empty, it needs to be initialized by ``_random_pr_init``.
See Also
========
_random_pr_init
"""
if self._random_gens == []:
self._random_pr_init(gen_count, iterations)
random_gens = self._random_gens
r = len(random_gens) - 1
# handle randomized input for testing purposes
if _random_prec is None:
s = randrange(r)
t = randrange(r - 1)
if t == s:
t = r - 1
x = choice([1, 2])
e = choice([-1, 1])
else:
s = _random_prec['s']
t = _random_prec['t']
if t == s:
t = r - 1
x = _random_prec['x']
e = _random_prec['e']
if x == 1:
random_gens[s] = _af_rmul(random_gens[s], _af_pow(random_gens[t], e))
random_gens[r] = _af_rmul(random_gens[r], random_gens[s])
else:
random_gens[s] = _af_rmul(_af_pow(random_gens[t], e), random_gens[s])
random_gens[r] = _af_rmul(random_gens[s], random_gens[r])
return _af_new(random_gens[r])
def random_stab(self, alpha, schreier_vector=None, _random_prec=None):
"""Random element from the stabilizer of ``alpha``.
The schreier vector for ``alpha`` is an optional argument used
for speeding up repeated calls. The algorithm is described in [1], p.81
See Also
========
random_pr, orbit_rep
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if _random_prec is None:
rand = self.random_pr()
else:
rand = _random_prec['rand']
beta = rand(alpha)
h = self.orbit_rep(alpha, beta, schreier_vector)
return rmul(~h, rand)
def schreier_sims(self):
"""Schreier-Sims algorithm.
Explanation
===========
It computes the generators of the chain of stabilizers
`G > G_{b_1} > .. > G_{b1,..,b_r} > 1`
in which `G_{b_1,..,b_i}` stabilizes `b_1,..,b_i`,
and the corresponding ``s`` cosets.
An element of the group can be written as the product
`h_1*..*h_s`.
We use the incremental Schreier-Sims algorithm.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_sims()
>>> G.basic_transversals
[{0: (2)(0 1), 1: (2), 2: (1 2)},
{0: (2), 2: (0 2)}]
"""
if self._transversals:
return
self._schreier_sims()
return
def _schreier_sims(self, base=None):
schreier = self.schreier_sims_incremental(base=base, slp_dict=True)
base, strong_gens = schreier[:2]
self._base = base
self._strong_gens = strong_gens
self._strong_gens_slp = schreier[2]
if not base:
self._transversals = []
self._basic_orbits = []
return
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals, slps = _orbits_transversals_from_bsgs(base,\
strong_gens_distr, slp=True)
# rewrite the indices stored in slps in terms of strong_gens
for i, slp in enumerate(slps):
gens = strong_gens_distr[i]
for k in slp:
slp[k] = [strong_gens.index(gens[s]) for s in slp[k]]
self._transversals = transversals
self._basic_orbits = [sorted(x) for x in basic_orbits]
self._transversal_slp = slps
def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False):
"""Extend a sequence of points and generating set to a base and strong
generating set.
Parameters
==========
base
The sequence of points to be extended to a base. Optional
parameter with default value ``[]``.
gens
The generating set to be extended to a strong generating set
relative to the base obtained. Optional parameter with default
value ``self.generators``.
slp_dict
If `True`, return a dictionary `{g: gens}` for each strong
generator `g` where `gens` is a list of strong generators
coming before `g` in `strong_gens`, such that the product
of the elements of `gens` is equal to `g`.
Returns
=======
(base, strong_gens)
``base`` is the base obtained, and ``strong_gens`` is the strong
generating set relative to it. The original parameters ``base``,
``gens`` remain unchanged.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(7)
>>> base = [2, 3]
>>> seq = [2, 3]
>>> base, strong_gens = A.schreier_sims_incremental(base=seq)
>>> _verify_bsgs(A, base, strong_gens)
True
>>> base[:2]
[2, 3]
Notes
=====
This version of the Schreier-Sims algorithm runs in polynomial time.
There are certain assumptions in the implementation - if the trivial
group is provided, ``base`` and ``gens`` are returned immediately,
as any sequence of points is a base for the trivial group. If the
identity is present in the generators ``gens``, it is removed as
it is a redundant generator.
The implementation is described in [1], pp. 90-93.
See Also
========
schreier_sims, schreier_sims_random
"""
if base is None:
base = []
if gens is None:
gens = self.generators[:]
degree = self.degree
id_af = list(range(degree))
# handle the trivial group
if len(gens) == 1 and gens[0].is_Identity:
if slp_dict:
return base, gens, {gens[0]: [gens[0]]}
return base, gens
# prevent side effects
_base, _gens = base[:], gens[:]
# remove the identity as a generator
_gens = [x for x in _gens if not x.is_Identity]
# make sure no generator fixes all base points
for gen in _gens:
if all(x == gen._array_form[x] for x in _base):
for new in id_af:
if gen._array_form[new] != new:
break
else:
assert None # can this ever happen?
_base.append(new)
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(_base, _gens)
strong_gens_slp = []
# initialize the basic stabilizers, basic orbits and basic transversals
orbs = {}
transversals = {}
slps = {}
base_len = len(_base)
for i in range(base_len):
transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],
_base[i], pairs=True, af=True, slp=True)
transversals[i] = dict(transversals[i])
orbs[i] = list(transversals[i].keys())
# main loop: amend the stabilizer chain until we have generators
# for all stabilizers
i = base_len - 1
while i >= 0:
# this flag is used to continue with the main loop from inside
# a nested loop
continue_i = False
# test the generators for being a strong generating set
db = {}
for beta, u_beta in list(transversals[i].items()):
for j, gen in enumerate(strong_gens_distr[i]):
gb = gen._array_form[beta]
u1 = transversals[i][gb]
g1 = _af_rmul(gen._array_form, u_beta)
slp = [(i, g) for g in slps[i][beta]]
slp = [(i, j)] + slp
if g1 != u1:
# test if the schreier generator is in the i+1-th
# would-be basic stabilizer
y = True
try:
u1_inv = db[gb]
except KeyError:
u1_inv = db[gb] = _af_invert(u1)
schreier_gen = _af_rmul(u1_inv, g1)
u1_inv_slp = slps[i][gb][:]
u1_inv_slp.reverse()
u1_inv_slp = [(i, (g,)) for g in u1_inv_slp]
slp = u1_inv_slp + slp
h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps)
if j <= base_len:
# new strong generator h at level j
y = False
elif h:
# h fixes all base points
y = False
moved = 0
while h[moved] == moved:
moved += 1
_base.append(moved)
base_len += 1
strong_gens_distr.append([])
if y is False:
# if a new strong generator is found, update the
# data structures and start over
h = _af_new(h)
strong_gens_slp.append((h, slp))
for l in range(i + 1, j):
strong_gens_distr[l].append(h)
transversals[l], slps[l] =\
_orbit_transversal(degree, strong_gens_distr[l],
_base[l], pairs=True, af=True, slp=True)
transversals[l] = dict(transversals[l])
orbs[l] = list(transversals[l].keys())
i = j - 1
# continue main loop using the flag
continue_i = True
if continue_i is True:
break
if continue_i is True:
break
if continue_i is True:
continue
i -= 1
strong_gens = _gens[:]
if slp_dict:
# create the list of the strong generators strong_gens and
# rewrite the indices of strong_gens_slp in terms of the
# elements of strong_gens
for k, slp in strong_gens_slp:
strong_gens.append(k)
for i in range(len(slp)):
s = slp[i]
if isinstance(s[1], tuple):
slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1
else:
slp[i] = strong_gens_distr[s[0]][s[1]]
strong_gens_slp = dict(strong_gens_slp)
# add the original generators
for g in _gens:
strong_gens_slp[g] = [g]
return (_base, strong_gens, strong_gens_slp)
strong_gens.extend([k for k, _ in strong_gens_slp])
return _base, strong_gens
def schreier_sims_random(self, base=None, gens=None, consec_succ=10,
_random_prec=None):
r"""Randomized Schreier-Sims algorithm.
Explanation
===========
The randomized Schreier-Sims algorithm takes the sequence ``base``
and the generating set ``gens``, and extends ``base`` to a base, and
``gens`` to a strong generating set relative to that base with
probability of a wrong answer at most `2^{-consec\_succ}`,
provided the random generators are sufficiently random.
Parameters
==========
base
The sequence to be extended to a base.
gens
The generating set to be extended to a strong generating set.
consec_succ
The parameter defining the probability of a wrong answer.
_random_prec
An internal parameter used for testing purposes.
Returns
=======
(base, strong_gens)
``base`` is the base and ``strong_gens`` is the strong generating
set relative to it.
Examples
========
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(5)
>>> base, strong_gens = S.schreier_sims_random(consec_succ=5)
>>> _verify_bsgs(S, base, strong_gens) #doctest: +SKIP
True
Notes
=====
The algorithm is described in detail in [1], pp. 97-98. It extends
the orbits ``orbs`` and the permutation groups ``stabs`` to
basic orbits and basic stabilizers for the base and strong generating
set produced in the end.
The idea of the extension process
is to "sift" random group elements through the stabilizer chain
and amend the stabilizers/orbits along the way when a sift
is not successful.
The helper function ``_strip`` is used to attempt
to decompose a random group element according to the current
state of the stabilizer chain and report whether the element was
fully decomposed (successful sift) or not (unsuccessful sift). In
the latter case, the level at which the sift failed is reported and
used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly.
The halting condition is for ``consec_succ`` consecutive successful
sifts to pass. This makes sure that the current ``base`` and ``gens``
form a BSGS with probability at least `1 - 1/\text{consec\_succ}`.
See Also
========
schreier_sims
"""
if base is None:
base = []
if gens is None:
gens = self.generators
base_len = len(base)
n = self.degree
# make sure no generator fixes all base points
for gen in gens:
if all(gen(x) == x for x in base):
new = 0
while gen._array_form[new] == new:
new += 1
base.append(new)
base_len += 1
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(base, gens)
# initialize the basic stabilizers, basic transversals and basic orbits
transversals = {}
orbs = {}
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i],
base[i], pairs=True))
orbs[i] = list(transversals[i].keys())
# initialize the number of consecutive elements sifted
c = 0
# start sifting random elements while the number of consecutive sifts
# is less than consec_succ
while c < consec_succ:
if _random_prec is None:
g = self.random_pr()
else:
g = _random_prec['g'].pop()
h, j = _strip(g, base, orbs, transversals)
y = True
# determine whether a new base point is needed
if j <= base_len:
y = False
elif not h.is_Identity:
y = False
moved = 0
while h(moved) == moved:
moved += 1
base.append(moved)
base_len += 1
strong_gens_distr.append([])
# if the element doesn't sift, amend the strong generators and
# associated stabilizers and orbits
if y is False:
for l in range(1, j):
strong_gens_distr[l].append(h)
transversals[l] = dict(_orbit_transversal(n,
strong_gens_distr[l], base[l], pairs=True))
orbs[l] = list(transversals[l].keys())
c = 0
else:
c += 1
# build the strong generating set
strong_gens = strong_gens_distr[0][:]
for gen in strong_gens_distr[1]:
if gen not in strong_gens:
strong_gens.append(gen)
return base, strong_gens
def schreier_vector(self, alpha):
"""Computes the schreier vector for ``alpha``.
Explanation
===========
The Schreier vector efficiently stores information
about the orbit of ``alpha``. It can later be used to quickly obtain
elements of the group that send ``alpha`` to a particular element
in the orbit. Notice that the Schreier vector depends on the order
in which the group generators are listed. For a definition, see [3].
Since list indices start from zero, we adopt the convention to use
"None" instead of 0 to signify that an element doesn't belong
to the orbit.
For the algorithm and its correctness, see [2], pp.78-80.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([2, 4, 6, 3, 1, 5, 0])
>>> b = Permutation([0, 1, 3, 5, 4, 6, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_vector(0)
[-1, None, 0, 1, None, 1, 0]
See Also
========
orbit
"""
n = self.degree
v = [None]*n
v[alpha] = -1
orb = [alpha]
used = [False]*n
used[alpha] = True
gens = self.generators
r = len(gens)
for b in orb:
for i in range(r):
temp = gens[i]._array_form[b]
if used[temp] is False:
orb.append(temp)
used[temp] = True
v[temp] = i
return v
def stabilizer(self, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
Explanation
===========
The stabilizer of `\alpha` is the group `G_\alpha =
\{g \in G | g(\alpha) = \alpha\}`.
For a proof of correctness, see [1], p.79.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.stabilizer(5)
PermutationGroup([
(5)(0 4)(1 3)])
See Also
========
orbit
"""
return PermGroup(_stabilizer(self._degree, self._generators, alpha))
@property
def strong_gens(self):
r"""Return a strong generating set from the Schreier-Sims algorithm.
Explanation
===========
A generating set `S = \{g_1, g_2, ..., g_t\}` for a permutation group
`G` is a strong generating set relative to the sequence of points
(referred to as a "base") `(b_1, b_2, ..., b_k)` if, for
`1 \leq i \leq k` we have that the intersection of the pointwise
stabilizer `G^{(i+1)} := G_{b_1, b_2, ..., b_i}` with `S` generates
the pointwise stabilizer `G^{(i+1)}`. The concepts of a base and
strong generating set and their applications are discussed in depth
in [1], pp. 87-89 and [2], pp. 55-57.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> D.strong_gens
[(0 1 2 3), (0 3)(1 2), (1 3)]
>>> D.base
[0, 1]
See Also
========
base, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._strong_gens == []:
self.schreier_sims()
return self._strong_gens
def subgroup(self, gens):
"""
Return the subgroup generated by `gens` which is a list of
elements of the group
"""
if not all([g in self for g in gens]):
raise ValueError("The group doesn't contain the supplied generators")
G = PermutationGroup(gens)
return G
def subgroup_search(self, prop, base=None, strong_gens=None, tests=None,
init_subgroup=None):
"""Find the subgroup of all elements satisfying the property ``prop``.
Explanation
===========
This is done by a depth-first search with respect to base images that
uses several tests to prune the search tree.
Parameters
==========
prop
The property to be used. Has to be callable on group elements
and always return ``True`` or ``False``. It is assumed that
all group elements satisfying ``prop`` indeed form a subgroup.
base
A base for the supergroup.
strong_gens
A strong generating set for the supergroup.
tests
A list of callables of length equal to the length of ``base``.
These are used to rule out group elements by partial base images,
so that ``tests[l](g)`` returns False if the element ``g`` is known
not to satisfy prop base on where g sends the first ``l + 1`` base
points.
init_subgroup
if a subgroup of the sought group is
known in advance, it can be passed to the function as this
parameter.
Returns
=======
res
The subgroup of all elements satisfying ``prop``. The generating
set for this group is guaranteed to be a strong generating set
relative to the base ``base``.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(7)
>>> prop_even = lambda x: x.is_even
>>> base, strong_gens = S.schreier_sims_incremental()
>>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens)
>>> G.is_subgroup(AlternatingGroup(7))
True
>>> _verify_bsgs(G, base, G.generators)
True
Notes
=====
This function is extremely lengthy and complicated and will require
some careful attention. The implementation is described in
[1], pp. 114-117, and the comments for the code here follow the lines
of the pseudocode in the book for clarity.
The complexity is exponential in general, since the search process by
itself visits all members of the supergroup. However, there are a lot
of tests which are used to prune the search tree, and users can define
their own tests via the ``tests`` parameter, so in practice, and for
some computations, it's not terrible.
A crucial part in the procedure is the frequent base change performed
(this is line 11 in the pseudocode) in order to obtain a new basic
stabilizer. The book mentiones that this can be done by using
``.baseswap(...)``, however the current implementation uses a more
straightforward way to find the next basic stabilizer - calling the
function ``.stabilizer(...)`` on the previous basic stabilizer.
"""
# initialize BSGS and basic group properties
def get_reps(orbits):
# get the minimal element in the base ordering
return [min(orbit, key = lambda x: base_ordering[x]) \
for orbit in orbits]
def update_nu(l):
temp_index = len(basic_orbits[l]) + 1 -\
len(res_basic_orbits_init_base[l])
# this corresponds to the element larger than all points
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
if base is None:
base, strong_gens = self.schreier_sims_incremental()
base_len = len(base)
degree = self.degree
identity = _af_new(list(range(degree)))
base_ordering = _base_ordering(base, degree)
# add an element larger than all points
base_ordering.append(degree)
# add an element smaller than all points
base_ordering.append(-1)
# compute BSGS-related structures
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,
strong_gens_distr)
# handle subgroup initialization and tests
if init_subgroup is None:
init_subgroup = PermutationGroup([identity])
if tests is None:
trivial_test = lambda x: True
tests = []
for i in range(base_len):
tests.append(trivial_test)
# line 1: more initializations.
res = init_subgroup
f = base_len - 1
l = base_len - 1
# line 2: set the base for K to the base for G
res_base = base[:]
# line 3: compute BSGS and related structures for K
res_base, res_strong_gens = res.schreier_sims_incremental(
base=res_base)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_generators = res.generators
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i])\
for i in range(base_len)]
# initialize orbit representatives
orbit_reps = [None]*base_len
# line 4: orbit representatives for f-th basic stabilizer of K
orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(orbits)
# line 5: remove the base point from the representatives to avoid
# getting the identity element as a generator for K
orbit_reps[f].remove(base[f])
# line 6: more initializations
c = [0]*base_len
u = [identity]*base_len
sorted_orbits = [None]*base_len
for i in range(base_len):
sorted_orbits[i] = basic_orbits[i][:]
sorted_orbits[i].sort(key=lambda point: base_ordering[point])
# line 7: initializations
mu = [None]*base_len
nu = [None]*base_len
# this corresponds to the element smaller than all points
mu[l] = degree + 1
update_nu(l)
# initialize computed words
computed_words = [identity]*base_len
# line 8: main loop
while True:
# apply all the tests
while l < base_len - 1 and \
computed_words[l](base[l]) in orbit_reps[l] and \
base_ordering[mu[l]] < \
base_ordering[computed_words[l](base[l])] < \
base_ordering[nu[l]] and \
tests[l](computed_words):
# line 11: change the (partial) base of K
new_point = computed_words[l](base[l])
res_base[l] = new_point
new_stab_gens = _stabilizer(degree, res_strong_gens_distr[l],
new_point)
res_strong_gens_distr[l + 1] = new_stab_gens
# line 12: calculate minimal orbit representatives for the
# l+1-th basic stabilizer
orbits = _orbits(degree, new_stab_gens)
orbit_reps[l + 1] = get_reps(orbits)
# line 13: amend sorted orbits
l += 1
temp_orbit = [computed_words[l - 1](point) for point
in basic_orbits[l]]
temp_orbit.sort(key=lambda point: base_ordering[point])
sorted_orbits[l] = temp_orbit
# lines 14 and 15: update variables used minimality tests
new_mu = degree + 1
for i in range(l):
if base[l] in res_basic_orbits_init_base[i]:
candidate = computed_words[i](base[i])
if base_ordering[candidate] > base_ordering[new_mu]:
new_mu = candidate
mu[l] = new_mu
update_nu(l)
# line 16: determine the new transversal element
c[l] = 0
temp_point = sorted_orbits[l][c[l]]
gamma = computed_words[l - 1]._array_form.index(temp_point)
u[l] = transversals[l][gamma]
# update computed words
computed_words[l] = rmul(computed_words[l - 1], u[l])
# lines 17 & 18: apply the tests to the group element found
g = computed_words[l]
temp_point = g(base[l])
if l == base_len - 1 and \
base_ordering[mu[l]] < \
base_ordering[temp_point] < base_ordering[nu[l]] and \
temp_point in orbit_reps[l] and \
tests[l](computed_words) and \
prop(g):
# line 19: reset the base of K
res_generators.append(g)
res_base = base[:]
# line 20: recalculate basic orbits (and transversals)
res_strong_gens.append(g)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i]) \
for i in range(base_len)]
# line 21: recalculate orbit representatives
# line 22: reset the search depth
orbit_reps[f] = get_reps(orbits)
l = f
# line 23: go up the tree until in the first branch not fully
# searched
while l >= 0 and c[l] == len(basic_orbits[l]) - 1:
l = l - 1
# line 24: if the entire tree is traversed, return K
if l == -1:
return PermutationGroup(res_generators)
# lines 25-27: update orbit representatives
if l < f:
# line 26
f = l
c[l] = 0
# line 27
temp_orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(temp_orbits)
# line 28: update variables used for minimality testing
mu[l] = degree + 1
temp_index = len(basic_orbits[l]) + 1 - \
len(res_basic_orbits_init_base[l])
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# line 29: set the next element from the current branch and update
# accordingly
c[l] += 1
if l == 0:
gamma = sorted_orbits[l][c[l]]
else:
gamma = computed_words[l - 1]._array_form.index(sorted_orbits[l][c[l]])
u[l] = transversals[l][gamma]
if l == 0:
computed_words[l] = u[l]
else:
computed_words[l] = rmul(computed_words[l - 1], u[l])
@property
def transitivity_degree(self):
r"""Compute the degree of transitivity of the group.
Explanation
===========
A permutation group `G` acting on `\Omega = \{0, 1, ..., n-1\}` is
``k``-fold transitive, if, for any k points
`(a_1, a_2, ..., a_k)\in\Omega` and any k points
`(b_1, b_2, ..., b_k)\in\Omega` there exists `g\in G` such that
`g(a_1)=b_1, g(a_2)=b_2, ..., g(a_k)=b_k`
The degree of transitivity of `G` is the maximum ``k`` such that
`G` is ``k``-fold transitive. ([8])
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.transitivity_degree
3
See Also
========
is_transitive, orbit
"""
if self._transitivity_degree is None:
n = self.degree
G = self
# if G is k-transitive, a tuple (a_0,..,a_k)
# can be brought to (b_0,...,b_(k-1), b_k)
# where b_0,...,b_(k-1) are fixed points;
# consider the group G_k which stabilizes b_0,...,b_(k-1)
# if G_k is transitive on the subset excluding b_0,...,b_(k-1)
# then G is (k+1)-transitive
for i in range(n):
orb = G.orbit(i)
if len(orb) != n - i:
self._transitivity_degree = i
return i
G = G.stabilizer(i)
self._transitivity_degree = n
return n
else:
return self._transitivity_degree
def _p_elements_group(G, p):
'''
For an abelian p-group G return the subgroup consisting of
all elements of order p (and the identity)
'''
gens = G.generators[:]
gens = sorted(gens, key=lambda x: x.order(), reverse=True)
gens_p = [g**(g.order()/p) for g in gens]
gens_r = []
for i in range(len(gens)):
x = gens[i]
x_order = x.order()
# x_p has order p
x_p = x**(x_order/p)
if i > 0:
P = PermutationGroup(gens_p[:i])
else:
P = PermutationGroup(G.identity)
if x**(x_order/p) not in P:
gens_r.append(x**(x_order/p))
else:
# replace x by an element of order (x.order()/p)
# so that gens still generates G
g = P.generator_product(x_p, original=True)
for s in g:
x = x*s**-1
x_order = x_order/p
# insert x to gens so that the sorting is preserved
del gens[i]
del gens_p[i]
j = i - 1
while j < len(gens) and gens[j].order() >= x_order:
j += 1
gens = gens[:j] + [x] + gens[j:]
gens_p = gens_p[:j] + [x] + gens_p[j:]
return PermutationGroup(gens_r)
def _sylow_alt_sym(self, p):
'''
Return a p-Sylow subgroup of a symmetric or an
alternating group.
Explanation
===========
The algorithm for this is hinted at in [1], Chapter 4,
Exercise 4.
For Sym(n) with n = p^i, the idea is as follows. Partition
the interval [0..n-1] into p equal parts, each of length p^(i-1):
[0..p^(i-1)-1], [p^(i-1)..2*p^(i-1)-1]...[(p-1)*p^(i-1)..p^i-1].
Find a p-Sylow subgroup of Sym(p^(i-1)) (treated as a subgroup
of ``self``) acting on each of the parts. Call the subgroups
P_1, P_2...P_p. The generators for the subgroups P_2...P_p
can be obtained from those of P_1 by applying a "shifting"
permutation to them, that is, a permutation mapping [0..p^(i-1)-1]
to the second part (the other parts are obtained by using the shift
multiple times). The union of this permutation and the generators
of P_1 is a p-Sylow subgroup of ``self``.
For n not equal to a power of p, partition
[0..n-1] in accordance with how n would be written in base p.
E.g. for p=2 and n=11, 11 = 2^3 + 2^2 + 1 so the partition
is [[0..7], [8..9], {10}]. To generate a p-Sylow subgroup,
take the union of the generators for each of the parts.
For the above example, {(0 1), (0 2)(1 3), (0 4), (1 5)(2 7)}
from the first part, {(8 9)} from the second part and
nothing from the third. This gives 4 generators in total, and
the subgroup they generate is p-Sylow.
Alternating groups are treated the same except when p=2. In this
case, (0 1)(s s+1) should be added for an appropriate s (the start
of a part) for each part in the partitions.
See Also
========
sylow_subgroup, is_alt_sym
'''
n = self.degree
gens = []
identity = Permutation(n-1)
# the case of 2-sylow subgroups of alternating groups
# needs special treatment
alt = p == 2 and all(g.is_even for g in self.generators)
# find the presentation of n in base p
coeffs = []
m = n
while m > 0:
coeffs.append(m % p)
m = m // p
power = len(coeffs)-1
# for a symmetric group, gens[:i] is the generating
# set for a p-Sylow subgroup on [0..p**(i-1)-1]. For
# alternating groups, the same is given by gens[:2*(i-1)]
for i in range(1, power+1):
if i == 1 and alt:
# (0 1) shouldn't be added for alternating groups
continue
gen = Permutation([(j + p**(i-1)) % p**i for j in range(p**i)])
gens.append(identity*gen)
if alt:
gen = Permutation(0, 1)*gen*Permutation(0, 1)*gen
gens.append(gen)
# the first point in the current part (see the algorithm
# description in the docstring)
start = 0
while power > 0:
a = coeffs[power]
# make the permutation shifting the start of the first
# part ([0..p^i-1] for some i) to the current one
for s in range(a):
shift = Permutation()
if start > 0:
for i in range(p**power):
shift = shift(i, start + i)
if alt:
gen = Permutation(0, 1)*shift*Permutation(0, 1)*shift
gens.append(gen)
j = 2*(power - 1)
else:
j = power
for i, gen in enumerate(gens[:j]):
if alt and i % 2 == 1:
continue
# shift the generator to the start of the
# partition part
gen = shift*gen*shift
gens.append(gen)
start += p**power
power = power-1
return gens
def sylow_subgroup(self, p):
'''
Return a p-Sylow subgroup of the group.
The algorithm is described in [1], Chapter 4, Section 7
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> D = DihedralGroup(6)
>>> S = D.sylow_subgroup(2)
>>> S.order()
4
>>> G = SymmetricGroup(6)
>>> S = G.sylow_subgroup(5)
>>> S.order()
5
>>> G1 = AlternatingGroup(3)
>>> G2 = AlternatingGroup(5)
>>> G3 = AlternatingGroup(9)
>>> S1 = G1.sylow_subgroup(3)
>>> S2 = G2.sylow_subgroup(3)
>>> S3 = G3.sylow_subgroup(3)
>>> len1 = len(S1.lower_central_series())
>>> len2 = len(S2.lower_central_series())
>>> len3 = len(S3.lower_central_series())
>>> len1 == len2
True
>>> len1 < len3
True
'''
from sympy.combinatorics.homomorphisms import (
orbit_homomorphism, block_homomorphism)
from sympy.ntheory.primetest import isprime
if not isprime(p):
raise ValueError("p must be a prime")
def is_p_group(G):
# check if the order of G is a power of p
# and return the power
m = G.order()
n = 0
while m % p == 0:
m = m/p
n += 1
if m == 1:
return True, n
return False, n
def _sylow_reduce(mu, nu):
# reduction based on two homomorphisms
# mu and nu with trivially intersecting
# kernels
Q = mu.image().sylow_subgroup(p)
Q = mu.invert_subgroup(Q)
nu = nu.restrict_to(Q)
R = nu.image().sylow_subgroup(p)
return nu.invert_subgroup(R)
order = self.order()
if order % p != 0:
return PermutationGroup([self.identity])
p_group, n = is_p_group(self)
if p_group:
return self
if self.is_alt_sym():
return PermutationGroup(self._sylow_alt_sym(p))
# if there is a non-trivial orbit with size not divisible
# by p, the sylow subgroup is contained in its stabilizer
# (by orbit-stabilizer theorem)
orbits = self.orbits()
non_p_orbits = [o for o in orbits if len(o) % p != 0 and len(o) != 1]
if non_p_orbits:
G = self.stabilizer(list(non_p_orbits[0]).pop())
return G.sylow_subgroup(p)
if not self.is_transitive():
# apply _sylow_reduce to orbit actions
orbits = sorted(orbits, key = lambda x: len(x))
omega1 = orbits.pop()
omega2 = orbits[0].union(*orbits)
mu = orbit_homomorphism(self, omega1)
nu = orbit_homomorphism(self, omega2)
return _sylow_reduce(mu, nu)
blocks = self.minimal_blocks()
if len(blocks) > 1:
# apply _sylow_reduce to block system actions
mu = block_homomorphism(self, blocks[0])
nu = block_homomorphism(self, blocks[1])
return _sylow_reduce(mu, nu)
elif len(blocks) == 1:
block = list(blocks)[0]
if any(e != 0 for e in block):
# self is imprimitive
mu = block_homomorphism(self, block)
if not is_p_group(mu.image())[0]:
S = mu.image().sylow_subgroup(p)
return mu.invert_subgroup(S).sylow_subgroup(p)
# find an element of order p
g = self.random()
g_order = g.order()
while g_order % p != 0 or g_order == 0:
g = self.random()
g_order = g.order()
g = g**(g_order // p)
if order % p**2 != 0:
return PermutationGroup(g)
C = self.centralizer(g)
while C.order() % p**n != 0:
S = C.sylow_subgroup(p)
s_order = S.order()
Z = S.center()
P = Z._p_elements_group(p)
h = P.random()
C_h = self.centralizer(h)
while C_h.order() % p*s_order != 0:
h = P.random()
C_h = self.centralizer(h)
C = C_h
return C.sylow_subgroup(p)
def _block_verify(H, L, alpha):
delta = sorted(list(H.orbit(alpha)))
H_gens = H.generators
# p[i] will be the number of the block
# delta[i] belongs to
p = [-1]*len(delta)
blocks = [-1]*len(delta)
B = [[]] # future list of blocks
u = [0]*len(delta) # u[i] in L s.t. alpha^u[i] = B[0][i]
t = L.orbit_transversal(alpha, pairs=True)
for a, beta in t:
B[0].append(a)
i_a = delta.index(a)
p[i_a] = 0
blocks[i_a] = alpha
u[i_a] = beta
rho = 0
m = 0 # number of blocks - 1
while rho <= m:
beta = B[rho][0]
for g in H_gens:
d = beta^g
i_d = delta.index(d)
sigma = p[i_d]
if sigma < 0:
# define a new block
m += 1
sigma = m
u[i_d] = u[delta.index(beta)]*g
p[i_d] = sigma
rep = d
blocks[i_d] = rep
newb = [rep]
for gamma in B[rho][1:]:
i_gamma = delta.index(gamma)
d = gamma^g
i_d = delta.index(d)
if p[i_d] < 0:
u[i_d] = u[i_gamma]*g
p[i_d] = sigma
blocks[i_d] = rep
newb.append(d)
else:
# B[rho] is not a block
s = u[i_gamma]*g*u[i_d]**(-1)
return False, s
B.append(newb)
else:
for h in B[rho][1:]:
if not h^g in B[sigma]:
# B[rho] is not a block
s = u[delta.index(beta)]*g*u[i_d]**(-1)
return False, s
rho += 1
return True, blocks
def _verify(H, K, phi, z, alpha):
'''
Return a list of relators ``rels`` in generators ``gens`_h` that
are mapped to ``H.generators`` by ``phi`` so that given a finite
presentation <gens_k | rels_k> of ``K`` on a subset of ``gens_h``
<gens_h | rels_k + rels> is a finite presentation of ``H``.
Explanation
===========
``H`` should be generated by the union of ``K.generators`` and ``z``
(a single generator), and ``H.stabilizer(alpha) == K``; ``phi`` is a
canonical injection from a free group into a permutation group
containing ``H``.
The algorithm is described in [1], Chapter 6.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.homomorphisms import homomorphism
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> H = PermutationGroup(Permutation(0, 2), Permutation (1, 5))
>>> K = PermutationGroup(Permutation(5)(0, 2))
>>> F = free_group("x_0 x_1")[0]
>>> gens = F.generators
>>> phi = homomorphism(F, H, F.generators, H.generators)
>>> rels_k = [gens[0]**2] # relators for presentation of K
>>> z= Permutation(1, 5)
>>> check, rels_h = H._verify(K, phi, z, 1)
>>> check
True
>>> rels = rels_k + rels_h
>>> G = FpGroup(F, rels) # presentation of H
>>> G.order() == H.order()
True
See also
========
strong_presentation, presentation, stabilizer
'''
orbit = H.orbit(alpha)
beta = alpha^(z**-1)
K_beta = K.stabilizer(beta)
# orbit representatives of K_beta
gammas = [alpha, beta]
orbits = list({tuple(K_beta.orbit(o)) for o in orbit})
orbit_reps = [orb[0] for orb in orbits]
for rep in orbit_reps:
if rep not in gammas:
gammas.append(rep)
# orbit transversal of K
betas = [alpha, beta]
transversal = {alpha: phi.invert(H.identity), beta: phi.invert(z**-1)}
for s, g in K.orbit_transversal(beta, pairs=True):
if not s in transversal:
transversal[s] = transversal[beta]*phi.invert(g)
union = K.orbit(alpha).union(K.orbit(beta))
while (len(union) < len(orbit)):
for gamma in gammas:
if gamma in union:
r = gamma^z
if r not in union:
betas.append(r)
transversal[r] = transversal[gamma]*phi.invert(z)
for s, g in K.orbit_transversal(r, pairs=True):
if not s in transversal:
transversal[s] = transversal[r]*phi.invert(g)
union = union.union(K.orbit(r))
break
# compute relators
rels = []
for b in betas:
k_gens = K.stabilizer(b).generators
for y in k_gens:
new_rel = transversal[b]
gens = K.generator_product(y, original=True)
for g in gens[::-1]:
new_rel = new_rel*phi.invert(g)
new_rel = new_rel*transversal[b]**-1
perm = phi(new_rel)
try:
gens = K.generator_product(perm, original=True)
except ValueError:
return False, perm
for g in gens:
new_rel = new_rel*phi.invert(g)**-1
if new_rel not in rels:
rels.append(new_rel)
for gamma in gammas:
new_rel = transversal[gamma]*phi.invert(z)*transversal[gamma^z]**-1
perm = phi(new_rel)
try:
gens = K.generator_product(perm, original=True)
except ValueError:
return False, perm
for g in gens:
new_rel = new_rel*phi.invert(g)**-1
if new_rel not in rels:
rels.append(new_rel)
return True, rels
def strong_presentation(G):
'''
Return a strong finite presentation of `G`. The generators
of the returned group are in the same order as the strong
generators of `G`.
The algorithm is based on Sims' Verify algorithm described
in [1], Chapter 6.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> P = DihedralGroup(4)
>>> G = P.strong_presentation()
>>> P.order() == G.order()
True
See Also
========
presentation, _verify
'''
from sympy.combinatorics.fp_groups import (FpGroup,
simplify_presentation)
from sympy.combinatorics.free_groups import free_group
from sympy.combinatorics.homomorphisms import (block_homomorphism,
homomorphism, GroupHomomorphism)
strong_gens = G.strong_gens[:]
stabs = G.basic_stabilizers[:]
base = G.base[:]
# injection from a free group on len(strong_gens)
# generators into G
gen_syms = [('x_%d'%i) for i in range(len(strong_gens))]
F = free_group(', '.join(gen_syms))[0]
phi = homomorphism(F, G, F.generators, strong_gens)
H = PermutationGroup(G.identity)
while stabs:
alpha = base.pop()
K = H
H = stabs.pop()
new_gens = [g for g in H.generators if g not in K]
if K.order() == 1:
z = new_gens.pop()
rels = [F.generators[-1]**z.order()]
intermediate_gens = [z]
K = PermutationGroup(intermediate_gens)
# add generators one at a time building up from K to H
while new_gens:
z = new_gens.pop()
intermediate_gens = [z] + intermediate_gens
K_s = PermutationGroup(intermediate_gens)
orbit = K_s.orbit(alpha)
orbit_k = K.orbit(alpha)
# split into cases based on the orbit of K_s
if orbit_k == orbit:
if z in K:
rel = phi.invert(z)
perm = z
else:
t = K.orbit_rep(alpha, alpha^z)
rel = phi.invert(z)*phi.invert(t)**-1
perm = z*t**-1
for g in K.generator_product(perm, original=True):
rel = rel*phi.invert(g)**-1
new_rels = [rel]
elif len(orbit_k) == 1:
# `success` is always true because `strong_gens`
# and `base` are already a verified BSGS. Later
# this could be changed to start with a randomly
# generated (potential) BSGS, and then new elements
# would have to be appended to it when `success`
# is false.
success, new_rels = K_s._verify(K, phi, z, alpha)
else:
# K.orbit(alpha) should be a block
# under the action of K_s on K_s.orbit(alpha)
check, block = K_s._block_verify(K, alpha)
if check:
# apply _verify to the action of K_s
# on the block system; for convenience,
# add the blocks as additional points
# that K_s should act on
t = block_homomorphism(K_s, block)
m = t.codomain.degree # number of blocks
d = K_s.degree
# conjugating with p will shift
# permutations in t.image() to
# higher numbers, e.g.
# p*(0 1)*p = (m m+1)
p = Permutation()
for i in range(m):
p *= Permutation(i, i+d)
t_img = t.images
# combine generators of K_s with their
# action on the block system
images = {g: g*p*t_img[g]*p for g in t_img}
for g in G.strong_gens[:-len(K_s.generators)]:
images[g] = g
K_s_act = PermutationGroup(list(images.values()))
f = GroupHomomorphism(G, K_s_act, images)
K_act = PermutationGroup([f(g) for g in K.generators])
success, new_rels = K_s_act._verify(K_act, f.compose(phi), f(z), d)
for n in new_rels:
if not n in rels:
rels.append(n)
K = K_s
group = FpGroup(F, rels)
return simplify_presentation(group)
def presentation(G, eliminate_gens=True):
'''
Return an `FpGroup` presentation of the group.
The algorithm is described in [1], Chapter 6.1.
'''
from sympy.combinatorics.fp_groups import (FpGroup,
simplify_presentation)
from sympy.combinatorics.coset_table import CosetTable
from sympy.combinatorics.free_groups import free_group
from sympy.combinatorics.homomorphisms import homomorphism
from itertools import product
if G._fp_presentation:
return G._fp_presentation
if G._fp_presentation:
return G._fp_presentation
def _factor_group_by_rels(G, rels):
if isinstance(G, FpGroup):
rels.extend(G.relators)
return FpGroup(G.free_group, list(set(rels)))
return FpGroup(G, rels)
gens = G.generators
len_g = len(gens)
if len_g == 1:
order = gens[0].order()
# handle the trivial group
if order == 1:
return free_group([])[0]
F, x = free_group('x')
return FpGroup(F, [x**order])
if G.order() > 20:
half_gens = G.generators[0:(len_g+1)//2]
else:
half_gens = []
H = PermutationGroup(half_gens)
H_p = H.presentation()
len_h = len(H_p.generators)
C = G.coset_table(H)
n = len(C) # subgroup index
gen_syms = [('x_%d'%i) for i in range(len(gens))]
F = free_group(', '.join(gen_syms))[0]
# mapping generators of H_p to those of F
images = [F.generators[i] for i in range(len_h)]
R = homomorphism(H_p, F, H_p.generators, images, check=False)
# rewrite relators
rels = R(H_p.relators)
G_p = FpGroup(F, rels)
# injective homomorphism from G_p into G
T = homomorphism(G_p, G, G_p.generators, gens)
C_p = CosetTable(G_p, [])
C_p.table = [[None]*(2*len_g) for i in range(n)]
# initiate the coset transversal
transversal = [None]*n
transversal[0] = G_p.identity
# fill in the coset table as much as possible
for i in range(2*len_h):
C_p.table[0][i] = 0
gamma = 1
for alpha, x in product(range(0, n), range(2*len_g)):
beta = C[alpha][x]
if beta == gamma:
gen = G_p.generators[x//2]**((-1)**(x % 2))
transversal[beta] = transversal[alpha]*gen
C_p.table[alpha][x] = beta
C_p.table[beta][x + (-1)**(x % 2)] = alpha
gamma += 1
if gamma == n:
break
C_p.p = list(range(n))
beta = x = 0
while not C_p.is_complete():
# find the first undefined entry
while C_p.table[beta][x] == C[beta][x]:
x = (x + 1) % (2*len_g)
if x == 0:
beta = (beta + 1) % n
# define a new relator
gen = G_p.generators[x//2]**((-1)**(x % 2))
new_rel = transversal[beta]*gen*transversal[C[beta][x]]**-1
perm = T(new_rel)
next = G_p.identity
for s in H.generator_product(perm, original=True):
next = next*T.invert(s)**-1
new_rel = new_rel*next
# continue coset enumeration
G_p = _factor_group_by_rels(G_p, [new_rel])
C_p.scan_and_fill(0, new_rel)
C_p = G_p.coset_enumeration([], strategy="coset_table",
draft=C_p, max_cosets=n, incomplete=True)
G._fp_presentation = simplify_presentation(G_p)
return G._fp_presentation
def polycyclic_group(self):
"""
Return the PolycyclicGroup instance with below parameters:
Explanation
===========
* ``pc_sequence`` : Polycyclic sequence is formed by collecting all
the missing generators between the adjacent groups in the
derived series of given permutation group.
* ``pc_series`` : Polycyclic series is formed by adding all the missing
generators of ``der[i+1]`` in ``der[i]``, where ``der`` represents
the derived series.
* ``relative_order`` : A list, computed by the ratio of adjacent groups in
pc_series.
"""
from sympy.combinatorics.pc_groups import PolycyclicGroup
if not self.is_polycyclic:
raise ValueError("The group must be solvable")
der = self.derived_series()
pc_series = []
pc_sequence = []
relative_order = []
pc_series.append(der[-1])
der.reverse()
for i in range(len(der)-1):
H = der[i]
for g in der[i+1].generators:
if g not in H:
H = PermutationGroup([g] + H.generators)
pc_series.insert(0, H)
pc_sequence.insert(0, g)
G1 = pc_series[0].order()
G2 = pc_series[1].order()
relative_order.insert(0, G1 // G2)
return PolycyclicGroup(pc_sequence, pc_series, relative_order, collector=None)
def _orbit(degree, generators, alpha, action='tuples'):
r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set.
Explanation
===========
The time complexity of the algorithm used here is `O(|Orb|*r)` where
`|Orb|` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1, 2, 3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup, _orbit
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> _orbit(G.degree, G.generators, 0)
{0, 1, 2}
>>> _orbit(G.degree, G.generators, [0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit, orbit_transversal
"""
if not hasattr(alpha, '__getitem__'):
alpha = [alpha]
gens = [x._array_form for x in generators]
if len(alpha) == 1 or action == 'union':
orb = alpha
used = [False]*degree
for el in alpha:
used[el] = True
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] == False:
orb.append(temp)
used[temp] = True
return set(orb)
elif action == 'tuples':
alpha = tuple(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = tuple([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return set(orb)
elif action == 'sets':
alpha = frozenset(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = frozenset([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return {tuple(x) for x in orb}
def _orbits(degree, generators):
"""Compute the orbits of G.
If ``rep=False`` it returns a list of sets else it returns a list of
representatives of the orbits
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import _orbits
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> _orbits(a.size, [a, b])
[{0, 1, 2}]
"""
orbs = []
sorted_I = list(range(degree))
I = set(sorted_I)
while I:
i = sorted_I[0]
orb = _orbit(degree, generators, i)
orbs.append(orb)
# remove all indices that are in this orbit
I -= orb
sorted_I = [i for i in sorted_I if i not in orb]
return orbs
def _orbit_transversal(degree, generators, alpha, pairs, af=False, slp=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
Explanation
===========
generators generators of the group ``G``
For a permutation group ``G``, a transversal for the orbit
`Orb = \{g(\alpha) | g \in G\}` is a set
`\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
`(\beta, g_\beta)`. For a proof of correctness, see [1], p.79
if ``af`` is ``True``, the transversal elements are given in
array form.
If `slp` is `True`, a dictionary `{beta: slp_beta}` is returned
for `\beta \in Orb` where `slp_beta` is a list of indices of the
generators in `generators` s.t. if `slp_beta = [i_1 ... i_n]`
`g_\beta = generators[i_n]*...*generators[i_1]`.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.perm_groups import _orbit_transversal
>>> G = DihedralGroup(6)
>>> _orbit_transversal(G.degree, G.generators, 0, False)
[(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)]
"""
tr = [(alpha, list(range(degree)))]
slp_dict = {alpha: []}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
for x, px in tr:
px_slp = slp_dict[x]
for gen in gens:
temp = gen[x]
if used[temp] == False:
slp_dict[temp] = [gens.index(gen)] + px_slp
tr.append((temp, _af_rmul(gen, px)))
used[temp] = True
if pairs:
if not af:
tr = [(x, _af_new(y)) for x, y in tr]
if not slp:
return tr
return tr, slp_dict
if af:
tr = [y for _, y in tr]
if not slp:
return tr
return tr, slp_dict
tr = [_af_new(y) for _, y in tr]
if not slp:
return tr
return tr, slp_dict
def _stabilizer(degree, generators, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
Explanation
===========
The stabilizer of `\alpha` is the group `G_\alpha =
\{g \in G | g(\alpha) = \alpha\}`.
For a proof of correctness, see [1], p.79.
degree : degree of G
generators : generators of G
Examples
========
>>> from sympy.combinatorics.perm_groups import _stabilizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> _stabilizer(G.degree, G.generators, 5)
[(5)(0 4)(1 3), (5)]
See Also
========
orbit
"""
orb = [alpha]
table = {alpha: list(range(degree))}
table_inv = {alpha: list(range(degree))}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
stab_gens = []
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] is False:
gen_temp = _af_rmul(gen, table[b])
orb.append(temp)
table[temp] = gen_temp
table_inv[temp] = _af_invert(gen_temp)
used[temp] = True
else:
schreier_gen = _af_rmuln(table_inv[temp], gen, table[b])
if schreier_gen not in stab_gens:
stab_gens.append(schreier_gen)
return [_af_new(x) for x in stab_gens]
PermGroup = PermutationGroup
class SymmetricPermutationGroup(Basic):
"""
The class defining the lazy form of SymmetricGroup.
deg : int
"""
def __new__(cls, deg):
deg = _sympify(deg)
obj = Basic.__new__(cls, deg)
obj._deg = deg
obj._order = None
return obj
def __contains__(self, i):
"""Return ``True`` if *i* is contained in SymmetricPermutationGroup.
Examples
========
>>> from sympy.combinatorics import Permutation, SymmetricPermutationGroup
>>> G = SymmetricPermutationGroup(4)
>>> Permutation(1, 2, 3) in G
True
"""
if not isinstance(i, Permutation):
raise TypeError("A SymmetricPermutationGroup contains only Permutations as "
"elements, not elements of type %s" % type(i))
return i.size == self.degree
def order(self):
"""
Return the order of the SymmetricPermutationGroup.
Examples
========
>>> from sympy.combinatorics import SymmetricPermutationGroup
>>> G = SymmetricPermutationGroup(4)
>>> G.order()
24
"""
if self._order is not None:
return self._order
n = self._deg
self._order = factorial(n)
return self._order
@property
def degree(self):
"""
Return the degree of the SymmetricPermutationGroup.
Examples
========
>>> from sympy.combinatorics import SymmetricPermutationGroup
>>> G = SymmetricPermutationGroup(4)
>>> G.degree
4
"""
return self._deg
@property
def identity(self):
'''
Return the identity element of the SymmetricPermutationGroup.
Examples
========
>>> from sympy.combinatorics import SymmetricPermutationGroup
>>> G = SymmetricPermutationGroup(4)
>>> G.identity()
(3)
'''
return _af_new(list(range(self._deg)))
class Coset(Basic):
"""A left coset of a permutation group with respect to an element.
Parameters
==========
g : Permutation
H : PermutationGroup
dir : "+" or "-", If not specified by default it will be "+"
here ``dir`` specified the type of coset "+" represent the
right coset and "-" represent the left coset.
G : PermutationGroup, optional
The group which contains *H* as its subgroup and *g* as its
element.
If not specified, it would automatically become a symmetric
group ``SymmetricPermutationGroup(g.size)`` and
``SymmetricPermutationGroup(H.degree)`` if ``g.size`` and ``H.degree``
are matching.``SymmetricPermutationGroup`` is a lazy form of SymmetricGroup
used for representation purpose.
"""
def __new__(cls, g, H, G=None, dir="+"):
g = _sympify(g)
if not isinstance(g, Permutation):
raise NotImplementedError
H = _sympify(H)
if not isinstance(H, PermutationGroup):
raise NotImplementedError
if G is not None:
G = _sympify(G)
if not isinstance(G, PermutationGroup) and not isinstance(G, SymmetricPermutationGroup):
raise NotImplementedError
if not H.is_subgroup(G):
raise ValueError("{} must be a subgroup of {}.".format(H, G))
if g not in G:
raise ValueError("{} must be an element of {}.".format(g, G))
else:
g_size = g.size
h_degree = H.degree
if g_size != h_degree:
raise ValueError(
"The size of the permutation {} and the degree of "
"the permutation group {} should be matching "
.format(g, H))
G = SymmetricPermutationGroup(g.size)
if isinstance(dir, str):
dir = Symbol(dir)
elif not isinstance(dir, Symbol):
raise TypeError("dir must be of type basestring or "
"Symbol, not %s" % type(dir))
if str(dir) not in ('+', '-'):
raise ValueError("dir must be one of '+' or '-' not %s" % dir)
obj = Basic.__new__(cls, g, H, G, dir)
obj._dir = dir
return obj
@property
def is_left_coset(self):
"""
Check if the coset is left coset that is ``gH``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup, Coset
>>> a = Permutation(1, 2)
>>> b = Permutation(0, 1)
>>> G = PermutationGroup([a, b])
>>> cst = Coset(a, G, dir="-")
>>> cst.is_left_coset
True
"""
return str(self._dir) == '-'
@property
def is_right_coset(self):
"""
Check if the coset is right coset that is ``Hg``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup, Coset
>>> a = Permutation(1, 2)
>>> b = Permutation(0, 1)
>>> G = PermutationGroup([a, b])
>>> cst = Coset(a, G, dir="+")
>>> cst.is_right_coset
True
"""
return str(self._dir) == '+'
def as_list(self):
"""
Return all the elements of coset in the form of list.
"""
g = self.args[0]
H = self.args[1]
cst = []
if str(self._dir) == '+':
for h in H.elements:
cst.append(h*g)
else:
for h in H.elements:
cst.append(g*h)
return cst
|
63ed4e6adc5ac1d4a921424f2c62fef8853e61b9caab3fc64142548895f0d48d | import random
from collections import defaultdict
from sympy.core.parameters import global_parameters
from sympy.core.basic import Atom
from sympy.core.expr import Expr
from sympy.core.compatibility import \
is_sequence, reduce, as_int, Iterable
from sympy.core.numbers import Integer
from sympy.core.sympify import _sympify
from sympy.matrices import zeros
from sympy.polys.polytools import lcm
from sympy.utilities.iterables import (flatten, has_variety, minlex,
has_dups, runs)
from mpmath.libmp.libintmath import ifac
from sympy.multipledispatch import dispatch
def _af_rmul(a, b):
"""
Return the product b*a; input and output are array forms. The ith value
is a[b[i]].
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmuln
"""
return [a[i] for i in b]
def _af_rmuln(*abc):
"""
Given [a, b, c, ...] return the product of ...*c*b*a using array forms.
The ith value is a[b[c[i]]].
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmul
"""
a = abc
m = len(a)
if m == 3:
p0, p1, p2 = a
return [p0[p1[i]] for i in p2]
if m == 4:
p0, p1, p2, p3 = a
return [p0[p1[p2[i]]] for i in p3]
if m == 5:
p0, p1, p2, p3, p4 = a
return [p0[p1[p2[p3[i]]]] for i in p4]
if m == 6:
p0, p1, p2, p3, p4, p5 = a
return [p0[p1[p2[p3[p4[i]]]]] for i in p5]
if m == 7:
p0, p1, p2, p3, p4, p5, p6 = a
return [p0[p1[p2[p3[p4[p5[i]]]]]] for i in p6]
if m == 8:
p0, p1, p2, p3, p4, p5, p6, p7 = a
return [p0[p1[p2[p3[p4[p5[p6[i]]]]]]] for i in p7]
if m == 1:
return a[0][:]
if m == 2:
a, b = a
return [a[i] for i in b]
if m == 0:
raise ValueError("String must not be empty")
p0 = _af_rmuln(*a[:m//2])
p1 = _af_rmuln(*a[m//2:])
return [p0[i] for i in p1]
def _af_parity(pi):
"""
Computes the parity of a permutation in array form.
Explanation
===========
The parity of a permutation reflects the parity of the
number of inversions in the permutation, i.e., the
number of pairs of x and y such that x > y but p[x] < p[y].
Examples
========
>>> from sympy.combinatorics.permutations import _af_parity
>>> _af_parity([0, 1, 2, 3])
0
>>> _af_parity([3, 2, 0, 1])
1
See Also
========
Permutation
"""
n = len(pi)
a = [0] * n
c = 0
for j in range(n):
if a[j] == 0:
c += 1
a[j] = 1
i = j
while pi[i] != j:
i = pi[i]
a[i] = 1
return (n - c) % 2
def _af_invert(a):
"""
Finds the inverse, ~A, of a permutation, A, given in array form.
Examples
========
>>> from sympy.combinatorics.permutations import _af_invert, _af_rmul
>>> A = [1, 2, 0, 3]
>>> _af_invert(A)
[2, 0, 1, 3]
>>> _af_rmul(_, A)
[0, 1, 2, 3]
See Also
========
Permutation, __invert__
"""
inv_form = [0] * len(a)
for i, ai in enumerate(a):
inv_form[ai] = i
return inv_form
def _af_pow(a, n):
"""
Routine for finding powers of a permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation, _af_pow
>>> p = Permutation([2, 0, 3, 1])
>>> p.order()
4
>>> _af_pow(p._array_form, 4)
[0, 1, 2, 3]
"""
if n == 0:
return list(range(len(a)))
if n < 0:
return _af_pow(_af_invert(a), -n)
if n == 1:
return a[:]
elif n == 2:
b = [a[i] for i in a]
elif n == 3:
b = [a[a[i]] for i in a]
elif n == 4:
b = [a[a[a[i]]] for i in a]
else:
# use binary multiplication
b = list(range(len(a)))
while 1:
if n & 1:
b = [b[i] for i in a]
n -= 1
if not n:
break
if n % 4 == 0:
a = [a[a[a[i]]] for i in a]
n = n // 4
elif n % 2 == 0:
a = [a[i] for i in a]
n = n // 2
return b
def _af_commutes_with(a, b):
"""
Checks if the two permutations with array forms
given by ``a`` and ``b`` commute.
Examples
========
>>> from sympy.combinatorics.permutations import _af_commutes_with
>>> _af_commutes_with([1, 2, 0], [0, 2, 1])
False
See Also
========
Permutation, commutes_with
"""
return not any(a[b[i]] != b[a[i]] for i in range(len(a) - 1))
class Cycle(dict):
"""
Wrapper around dict which provides the functionality of a disjoint cycle.
Explanation
===========
A cycle shows the rule to use to move subsets of elements to obtain
a permutation. The Cycle class is more flexible than Permutation in
that 1) all elements need not be present in order to investigate how
multiple cycles act in sequence and 2) it can contain singletons:
>>> from sympy.combinatorics.permutations import Perm, Cycle
A Cycle will automatically parse a cycle given as a tuple on the rhs:
>>> Cycle(1, 2)(2, 3)
(1 3 2)
The identity cycle, Cycle(), can be used to start a product:
>>> Cycle()(1, 2)(2, 3)
(1 3 2)
The array form of a Cycle can be obtained by calling the list
method (or passing it to the list function) and all elements from
0 will be shown:
>>> a = Cycle(1, 2)
>>> a.list()
[0, 2, 1]
>>> list(a)
[0, 2, 1]
If a larger (or smaller) range is desired use the list method and
provide the desired size -- but the Cycle cannot be truncated to
a size smaller than the largest element that is out of place:
>>> b = Cycle(2, 4)(1, 2)(3, 1, 4)(1, 3)
>>> b.list()
[0, 2, 1, 3, 4]
>>> b.list(b.size + 1)
[0, 2, 1, 3, 4, 5]
>>> b.list(-1)
[0, 2, 1]
Singletons are not shown when printing with one exception: the largest
element is always shown -- as a singleton if necessary:
>>> Cycle(1, 4, 10)(4, 5)
(1 5 4 10)
>>> Cycle(1, 2)(4)(5)(10)
(1 2)(10)
The array form can be used to instantiate a Permutation so other
properties of the permutation can be investigated:
>>> Perm(Cycle(1, 2)(3, 4).list()).transpositions()
[(1, 2), (3, 4)]
Notes
=====
The underlying structure of the Cycle is a dictionary and although
the __iter__ method has been redefined to give the array form of the
cycle, the underlying dictionary items are still available with the
such methods as items():
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
See Also
========
Permutation
"""
def __missing__(self, arg):
"""Enter arg into dictionary and return arg."""
return as_int(arg)
def __iter__(self):
yield from self.list()
def __call__(self, *other):
"""Return product of cycles processed from R to L.
Examples
========
>>> from sympy.combinatorics.permutations import Cycle as C
>>> C(1, 2)(2, 3)
(1 3 2)
An instance of a Cycle will automatically parse list-like
objects and Permutations that are on the right. It is more
flexible than the Permutation in that all elements need not
be present:
>>> a = C(1, 2)
>>> a(2, 3)
(1 3 2)
>>> a(2, 3)(4, 5)
(1 3 2)(4 5)
"""
rv = Cycle(*other)
for k, v in zip(list(self.keys()), [rv[self[k]] for k in self.keys()]):
rv[k] = v
return rv
def list(self, size=None):
"""Return the cycles as an explicit list starting from 0 up
to the greater of the largest value in the cycles and size.
Truncation of trailing unmoved items will occur when size
is less than the maximum element in the cycle; if this is
desired, setting ``size=-1`` will guarantee such trimming.
Examples
========
>>> from sympy.combinatorics.permutations import Cycle
>>> p = Cycle(2, 3)(4, 5)
>>> p.list()
[0, 1, 3, 2, 5, 4]
>>> p.list(10)
[0, 1, 3, 2, 5, 4, 6, 7, 8, 9]
Passing a length too small will trim trailing, unchanged elements
in the permutation:
>>> Cycle(2, 4)(1, 2, 4).list(-1)
[0, 2, 1]
"""
if not self and size is None:
raise ValueError('must give size for empty Cycle')
if size is not None:
big = max([i for i in self.keys() if self[i] != i] + [0])
size = max(size, big + 1)
else:
size = self.size
return [self[i] for i in range(size)]
def __repr__(self):
"""We want it to print as a Cycle, not as a dict.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)
(1 2)
>>> print(_)
(1 2)
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
"""
if not self:
return 'Cycle()'
cycles = Permutation(self).cyclic_form
s = ''.join(str(tuple(c)) for c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += '(%s)' % big
return 'Cycle%s' % s
def __str__(self):
"""We want it to be printed in a Cycle notation with no
comma in-between.
Examples
========
>>> from sympy.combinatorics import Cycle
>>> Cycle(1, 2)
(1 2)
>>> Cycle(1, 2, 4)(5, 6)
(1 2 4)(5 6)
"""
if not self:
return '()'
cycles = Permutation(self).cyclic_form
s = ''.join(str(tuple(c)) for c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += '(%s)' % big
s = s.replace(',', '')
return s
def __init__(self, *args):
"""Load up a Cycle instance with the values for the cycle.
Examples
========
>>> from sympy.combinatorics.permutations import Cycle
>>> Cycle(1, 2, 6)
(1 2 6)
"""
if not args:
return
if len(args) == 1:
if isinstance(args[0], Permutation):
for c in args[0].cyclic_form:
self.update(self(*c))
return
elif isinstance(args[0], Cycle):
for k, v in args[0].items():
self[k] = v
return
args = [as_int(a) for a in args]
if any(i < 0 for i in args):
raise ValueError('negative integers are not allowed in a cycle.')
if has_dups(args):
raise ValueError('All elements must be unique in a cycle.')
for i in range(-len(args), 0):
self[args[i]] = args[i + 1]
@property
def size(self):
if not self:
return 0
return max(self.keys()) + 1
def copy(self):
return Cycle(self)
class Permutation(Atom):
"""
A permutation, alternatively known as an 'arrangement number' or 'ordering'
is an arrangement of the elements of an ordered list into a one-to-one
mapping with itself. The permutation of a given arrangement is given by
indicating the positions of the elements after re-arrangement [2]_. For
example, if one started with elements [x, y, a, b] (in that order) and
they were reordered as [x, y, b, a] then the permutation would be
[0, 1, 3, 2]. Notice that (in SymPy) the first element is always referred
to as 0 and the permutation uses the indices of the elements in the
original ordering, not the elements (a, b, etc...) themselves.
>>> from sympy.combinatorics import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
Permutations Notation
=====================
Permutations are commonly represented in disjoint cycle or array forms.
Array Notation and 2-line Form
------------------------------------
In the 2-line form, the elements and their final positions are shown
as a matrix with 2 rows:
[0 1 2 ... n-1]
[p(0) p(1) p(2) ... p(n-1)]
Since the first line is always range(n), where n is the size of p,
it is sufficient to represent the permutation by the second line,
referred to as the "array form" of the permutation. This is entered
in brackets as the argument to the Permutation class:
>>> p = Permutation([0, 2, 1]); p
Permutation([0, 2, 1])
Given i in range(p.size), the permutation maps i to i^p
>>> [i^p for i in range(p.size)]
[0, 2, 1]
The composite of two permutations p*q means first apply p, then q, so
i^(p*q) = (i^p)^q which is i^p^q according to Python precedence rules:
>>> q = Permutation([2, 1, 0])
>>> [i^p^q for i in range(3)]
[2, 0, 1]
>>> [i^(p*q) for i in range(3)]
[2, 0, 1]
One can use also the notation p(i) = i^p, but then the composition
rule is (p*q)(i) = q(p(i)), not p(q(i)):
>>> [(p*q)(i) for i in range(p.size)]
[2, 0, 1]
>>> [q(p(i)) for i in range(p.size)]
[2, 0, 1]
>>> [p(q(i)) for i in range(p.size)]
[1, 2, 0]
Disjoint Cycle Notation
-----------------------
In disjoint cycle notation, only the elements that have shifted are
indicated. In the above case, the 2 and 1 switched places. This can
be entered in two ways:
>>> Permutation(1, 2) == Permutation([[1, 2]]) == p
True
Only the relative ordering of elements in a cycle matter:
>>> Permutation(1,2,3) == Permutation(2,3,1) == Permutation(3,1,2)
True
The disjoint cycle notation is convenient when representing
permutations that have several cycles in them:
>>> Permutation(1, 2)(3, 5) == Permutation([[1, 2], [3, 5]])
True
It also provides some economy in entry when computing products of
permutations that are written in disjoint cycle notation:
>>> Permutation(1, 2)(1, 3)(2, 3)
Permutation([0, 3, 2, 1])
>>> _ == Permutation([[1, 2]])*Permutation([[1, 3]])*Permutation([[2, 3]])
True
Caution: when the cycles have common elements
between them then the order in which the
permutations are applied matters. The
convention is that the permutations are
applied from *right to left*. In the following, the
transposition of elements 2 and 3 is followed
by the transposition of elements 1 and 2:
>>> Permutation(1, 2)(2, 3) == Permutation([(1, 2), (2, 3)])
True
>>> Permutation(1, 2)(2, 3).list()
[0, 3, 1, 2]
If the first and second elements had been
swapped first, followed by the swapping of the second
and third, the result would have been [0, 2, 3, 1].
If, for some reason, you want to apply the cycles
in the order they are entered, you can simply reverse
the order of cycles:
>>> Permutation([(1, 2), (2, 3)][::-1]).list()
[0, 2, 3, 1]
Entering a singleton in a permutation is a way to indicate the size of the
permutation. The ``size`` keyword can also be used.
Array-form entry:
>>> Permutation([[1, 2], [9]])
Permutation([0, 2, 1], size=10)
>>> Permutation([[1, 2]], size=10)
Permutation([0, 2, 1], size=10)
Cyclic-form entry:
>>> Permutation(1, 2, size=10)
Permutation([0, 2, 1], size=10)
>>> Permutation(9)(1, 2)
Permutation([0, 2, 1], size=10)
Caution: no singleton containing an element larger than the largest
in any previous cycle can be entered. This is an important difference
in how Permutation and Cycle handle the __call__ syntax. A singleton
argument at the start of a Permutation performs instantiation of the
Permutation and is permitted:
>>> Permutation(5)
Permutation([], size=6)
A singleton entered after instantiation is a call to the permutation
-- a function call -- and if the argument is out of range it will
trigger an error. For this reason, it is better to start the cycle
with the singleton:
The following fails because there is no element 3:
>>> Permutation(1, 2)(3)
Traceback (most recent call last):
...
IndexError: list index out of range
This is ok: only the call to an out of range singleton is prohibited;
otherwise the permutation autosizes:
>>> Permutation(3)(1, 2)
Permutation([0, 2, 1, 3])
>>> Permutation(1, 2)(3, 4) == Permutation(3, 4)(1, 2)
True
Equality testing
----------------
The array forms must be the same in order for permutations to be equal:
>>> Permutation([1, 0, 2, 3]) == Permutation([1, 0])
False
Identity Permutation
--------------------
The identity permutation is a permutation in which no element is out of
place. It can be entered in a variety of ways. All the following create
an identity permutation of size 4:
>>> I = Permutation([0, 1, 2, 3])
>>> all(p == I for p in [
... Permutation(3),
... Permutation(range(4)),
... Permutation([], size=4),
... Permutation(size=4)])
True
Watch out for entering the range *inside* a set of brackets (which is
cycle notation):
>>> I == Permutation([range(4)])
False
Permutation Printing
====================
There are a few things to note about how Permutations are printed.
1) If you prefer one form (array or cycle) over another, you can set
``init_printing`` with the ``perm_cyclic`` flag.
>>> from sympy import init_printing
>>> p = Permutation(1, 2)(4, 5)(3, 4)
>>> p
Permutation([0, 2, 1, 4, 5, 3])
>>> init_printing(perm_cyclic=True, pretty_print=False)
>>> p
(1 2)(3 4 5)
2) Regardless of the setting, a list of elements in the array for cyclic
form can be obtained and either of those can be copied and supplied as
the argument to Permutation:
>>> p.array_form
[0, 2, 1, 4, 5, 3]
>>> p.cyclic_form
[[1, 2], [3, 4, 5]]
>>> Permutation(_) == p
True
3) Printing is economical in that as little as possible is printed while
retaining all information about the size of the permutation:
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation([1, 0, 2, 3])
Permutation([1, 0, 2, 3])
>>> Permutation([1, 0, 2, 3], size=20)
Permutation([1, 0], size=20)
>>> Permutation([1, 0, 2, 4, 3, 5, 6], size=20)
Permutation([1, 0, 2, 4, 3], size=20)
>>> p = Permutation([1, 0, 2, 3])
>>> init_printing(perm_cyclic=True, pretty_print=False)
>>> p
(3)(0 1)
>>> init_printing(perm_cyclic=False, pretty_print=False)
The 2 was not printed but it is still there as can be seen with the
array_form and size methods:
>>> p.array_form
[1, 0, 2, 3]
>>> p.size
4
Short introduction to other methods
===================================
The permutation can act as a bijective function, telling what element is
located at a given position
>>> q = Permutation([5, 2, 3, 4, 1, 0])
>>> q.array_form[1] # the hard way
2
>>> q(1) # the easy way
2
>>> {i: q(i) for i in range(q.size)} # showing the bijection
{0: 5, 1: 2, 2: 3, 3: 4, 4: 1, 5: 0}
The full cyclic form (including singletons) can be obtained:
>>> p.full_cyclic_form
[[0, 1], [2], [3]]
Any permutation can be factored into transpositions of pairs of elements:
>>> Permutation([[1, 2], [3, 4, 5]]).transpositions()
[(1, 2), (3, 5), (3, 4)]
>>> Permutation.rmul(*[Permutation([ti], size=6) for ti in _]).cyclic_form
[[1, 2], [3, 4, 5]]
The number of permutations on a set of n elements is given by n! and is
called the cardinality.
>>> p.size
4
>>> p.cardinality
24
A given permutation has a rank among all the possible permutations of the
same elements, but what that rank is depends on how the permutations are
enumerated. (There are a number of different methods of doing so.) The
lexicographic rank is given by the rank method and this rank is used to
increment a permutation with addition/subtraction:
>>> p.rank()
6
>>> p + 1
Permutation([1, 0, 3, 2])
>>> p.next_lex()
Permutation([1, 0, 3, 2])
>>> _.rank()
7
>>> p.unrank_lex(p.size, rank=7)
Permutation([1, 0, 3, 2])
The product of two permutations p and q is defined as their composition as
functions, (p*q)(i) = q(p(i)) [6]_.
>>> p = Permutation([1, 0, 2, 3])
>>> q = Permutation([2, 3, 1, 0])
>>> list(q*p)
[2, 3, 0, 1]
>>> list(p*q)
[3, 2, 1, 0]
>>> [q(p(i)) for i in range(p.size)]
[3, 2, 1, 0]
The permutation can be 'applied' to any list-like object, not only
Permutations:
>>> p(['zero', 'one', 'four', 'two'])
['one', 'zero', 'four', 'two']
>>> p('zo42')
['o', 'z', '4', '2']
If you have a list of arbitrary elements, the corresponding permutation
can be found with the from_sequence method:
>>> Permutation.from_sequence('SymPy')
Permutation([1, 3, 2, 0, 4])
See Also
========
Cycle
References
==========
.. [1] Skiena, S. 'Permutations.' 1.1 in Implementing Discrete Mathematics
Combinatorics and Graph Theory with Mathematica. Reading, MA:
Addison-Wesley, pp. 3-16, 1990.
.. [2] Knuth, D. E. The Art of Computer Programming, Vol. 4: Combinatorial
Algorithms, 1st ed. Reading, MA: Addison-Wesley, 2011.
.. [3] Wendy Myrvold and Frank Ruskey. 2001. Ranking and unranking
permutations in linear time. Inf. Process. Lett. 79, 6 (September 2001),
281-284. DOI=10.1016/S0020-0190(01)00141-7
.. [4] D. L. Kreher, D. R. Stinson 'Combinatorial Algorithms'
CRC Press, 1999
.. [5] Graham, R. L.; Knuth, D. E.; and Patashnik, O.
Concrete Mathematics: A Foundation for Computer Science, 2nd ed.
Reading, MA: Addison-Wesley, 1994.
.. [6] https://en.wikipedia.org/wiki/Permutation#Product_and_inverse
.. [7] https://en.wikipedia.org/wiki/Lehmer_code
"""
is_Permutation = True
_array_form = None
_cyclic_form = None
_cycle_structure = None
_size = None
_rank = None
def __new__(cls, *args, size=None, **kwargs):
"""
Constructor for the Permutation object from a list or a
list of lists in which all elements of the permutation may
appear only once.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
Permutations entered in array-form are left unaltered:
>>> Permutation([0, 2, 1])
Permutation([0, 2, 1])
Permutations entered in cyclic form are converted to array form;
singletons need not be entered, but can be entered to indicate the
largest element:
>>> Permutation([[4, 5, 6], [0, 1]])
Permutation([1, 0, 2, 3, 5, 6, 4])
>>> Permutation([[4, 5, 6], [0, 1], [19]])
Permutation([1, 0, 2, 3, 5, 6, 4], size=20)
All manipulation of permutations assumes that the smallest element
is 0 (in keeping with 0-based indexing in Python) so if the 0 is
missing when entering a permutation in array form, an error will be
raised:
>>> Permutation([2, 1])
Traceback (most recent call last):
...
ValueError: Integers 0 through 2 must be present.
If a permutation is entered in cyclic form, it can be entered without
singletons and the ``size`` specified so those values can be filled
in, otherwise the array form will only extend to the maximum value
in the cycles:
>>> Permutation([[1, 4], [3, 5, 2]], size=10)
Permutation([0, 4, 3, 5, 1, 2], size=10)
>>> _.array_form
[0, 4, 3, 5, 1, 2, 6, 7, 8, 9]
"""
if size is not None:
size = int(size)
#a) ()
#b) (1) = identity
#c) (1, 2) = cycle
#d) ([1, 2, 3]) = array form
#e) ([[1, 2]]) = cyclic form
#f) (Cycle) = conversion to permutation
#g) (Permutation) = adjust size or return copy
ok = True
if not args: # a
return cls._af_new(list(range(size or 0)))
elif len(args) > 1: # c
return cls._af_new(Cycle(*args).list(size))
if len(args) == 1:
a = args[0]
if isinstance(a, cls): # g
if size is None or size == a.size:
return a
return cls(a.array_form, size=size)
if isinstance(a, Cycle): # f
return cls._af_new(a.list(size))
if not is_sequence(a): # b
if size is not None and a + 1 > size:
raise ValueError('size is too small when max is %s' % a)
return cls._af_new(list(range(a + 1)))
if has_variety(is_sequence(ai) for ai in a):
ok = False
else:
ok = False
if not ok:
raise ValueError("Permutation argument must be a list of ints, "
"a list of lists, Permutation or Cycle.")
# safe to assume args are valid; this also makes a copy
# of the args
args = list(args[0])
is_cycle = args and is_sequence(args[0])
if is_cycle: # e
args = [[int(i) for i in c] for c in args]
else: # d
args = [int(i) for i in args]
# if there are n elements present, 0, 1, ..., n-1 should be present
# unless a cycle notation has been provided. A 0 will be added
# for convenience in case one wants to enter permutations where
# counting starts from 1.
temp = flatten(args)
if has_dups(temp) and not is_cycle:
raise ValueError('there were repeated elements.')
temp = set(temp)
if not is_cycle:
if any(i not in temp for i in range(len(temp))):
raise ValueError('Integers 0 through %s must be present.' %
max(temp))
if size is not None and temp and max(temp) + 1 > size:
raise ValueError('max element should not exceed %s' % (size - 1))
if is_cycle:
# it's not necessarily canonical so we won't store
# it -- use the array form instead
c = Cycle()
for ci in args:
c = c(*ci)
aform = c.list()
else:
aform = list(args)
if size and size > len(aform):
# don't allow for truncation of permutation which
# might split a cycle and lead to an invalid aform
# but do allow the permutation size to be increased
aform.extend(list(range(len(aform), size)))
return cls._af_new(aform)
@classmethod
def _af_new(cls, perm):
"""A method to produce a Permutation object from a list;
the list is bound to the _array_form attribute, so it must
not be modified; this method is meant for internal use only;
the list ``a`` is supposed to be generated as a temporary value
in a method, so p = Perm._af_new(a) is the only object
to hold a reference to ``a``::
Examples
========
>>> from sympy.combinatorics.permutations import Perm
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> a = [2, 1, 3, 0]
>>> p = Perm._af_new(a)
>>> p
Permutation([2, 1, 3, 0])
"""
p = super().__new__(cls)
p._array_form = perm
p._size = len(perm)
return p
def _hashable_content(self):
# the array_form (a list) is the Permutation arg, so we need to
# return a tuple, instead
return tuple(self.array_form)
@property
def array_form(self):
"""
Return a copy of the attribute _array_form
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([[2, 0], [3, 1]])
>>> p.array_form
[2, 3, 0, 1]
>>> Permutation([[2, 0, 3, 1]]).array_form
[3, 2, 0, 1]
>>> Permutation([2, 0, 3, 1]).array_form
[2, 0, 3, 1]
>>> Permutation([[1, 2], [4, 5]]).array_form
[0, 2, 1, 3, 5, 4]
"""
return self._array_form[:]
def list(self, size=None):
"""Return the permutation as an explicit list, possibly
trimming unmoved elements if size is less than the maximum
element in the permutation; if this is desired, setting
``size=-1`` will guarantee such trimming.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation(2, 3)(4, 5)
>>> p.list()
[0, 1, 3, 2, 5, 4]
>>> p.list(10)
[0, 1, 3, 2, 5, 4, 6, 7, 8, 9]
Passing a length too small will trim trailing, unchanged elements
in the permutation:
>>> Permutation(2, 4)(1, 2, 4).list(-1)
[0, 2, 1]
>>> Permutation(3).list(-1)
[]
"""
if not self and size is None:
raise ValueError('must give size for empty Cycle')
rv = self.array_form
if size is not None:
if size > self.size:
rv.extend(list(range(self.size, size)))
else:
# find first value from rhs where rv[i] != i
i = self.size - 1
while rv:
if rv[-1] != i:
break
rv.pop()
i -= 1
return rv
@property
def cyclic_form(self):
"""
This is used to convert to the cyclic notation
from the canonical notation. Singletons are omitted.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 3, 1, 2])
>>> p.cyclic_form
[[1, 3, 2]]
>>> Permutation([1, 0, 2, 4, 3, 5]).cyclic_form
[[0, 1], [3, 4]]
See Also
========
array_form, full_cyclic_form
"""
if self._cyclic_form is not None:
return list(self._cyclic_form)
array_form = self.array_form
unchecked = [True] * len(array_form)
cyclic_form = []
for i in range(len(array_form)):
if unchecked[i]:
cycle = []
cycle.append(i)
unchecked[i] = False
j = i
while unchecked[array_form[j]]:
j = array_form[j]
cycle.append(j)
unchecked[j] = False
if len(cycle) > 1:
cyclic_form.append(cycle)
assert cycle == list(minlex(cycle, is_set=True))
cyclic_form.sort()
self._cyclic_form = cyclic_form[:]
return cyclic_form
@property
def full_cyclic_form(self):
"""Return permutation in cyclic form including singletons.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> Permutation([0, 2, 1]).full_cyclic_form
[[0], [1, 2]]
"""
need = set(range(self.size)) - set(flatten(self.cyclic_form))
rv = self.cyclic_form
rv.extend([[i] for i in need])
rv.sort()
return rv
@property
def size(self):
"""
Returns the number of elements in the permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([[3, 2], [0, 1]]).size
4
See Also
========
cardinality, length, order, rank
"""
return self._size
def support(self):
"""Return the elements in permutation, P, for which P[i] != i.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation([[3, 2], [0, 1], [4]])
>>> p.array_form
[1, 0, 3, 2, 4]
>>> p.support()
[0, 1, 2, 3]
"""
a = self.array_form
return [i for i, e in enumerate(a) if a[i] != i]
def __add__(self, other):
"""Return permutation that is other higher in rank than self.
The rank is the lexicographical rank, with the identity permutation
having rank of 0.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> I = Permutation([0, 1, 2, 3])
>>> a = Permutation([2, 1, 3, 0])
>>> I + a.rank() == a
True
See Also
========
__sub__, inversion_vector
"""
rank = (self.rank() + other) % self.cardinality
rv = self.unrank_lex(self.size, rank)
rv._rank = rank
return rv
def __sub__(self, other):
"""Return the permutation that is other lower in rank than self.
See Also
========
__add__
"""
return self.__add__(-other)
@staticmethod
def rmul(*args):
"""
Return product of Permutations [a, b, c, ...] as the Permutation whose
ith value is a(b(c(i))).
a, b, c, ... can be Permutation objects or tuples.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> a = Permutation(a); b = Permutation(b)
>>> list(Permutation.rmul(a, b))
[1, 2, 0]
>>> [a(b(i)) for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
Notes
=====
All items in the sequence will be parsed by Permutation as
necessary as long as the first item is a Permutation:
>>> Permutation.rmul(a, [0, 2, 1]) == Permutation.rmul(a, b)
True
The reverse order of arguments will raise a TypeError.
"""
rv = args[0]
for i in range(1, len(args)):
rv = args[i]*rv
return rv
@classmethod
def rmul_with_af(cls, *args):
"""
same as rmul, but the elements of args are Permutation objects
which have _array_form
"""
a = [x._array_form for x in args]
rv = cls._af_new(_af_rmuln(*a))
return rv
def mul_inv(self, other):
"""
other*~self, self and other have _array_form
"""
a = _af_invert(self._array_form)
b = other._array_form
return self._af_new(_af_rmul(a, b))
def __rmul__(self, other):
"""This is needed to coerce other to Permutation in rmul."""
cls = type(self)
return cls(other)*self
def __mul__(self, other):
"""
Return the product a*b as a Permutation; the ith value is b(a(i)).
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
This handles operands in reverse order compared to _af_rmul and rmul:
>>> al = list(a); bl = list(b)
>>> _af_rmul(al, bl)
[1, 2, 0]
>>> [al[bl[i]] for i in range(3)]
[1, 2, 0]
It is acceptable for the arrays to have different lengths; the shorter
one will be padded to match the longer one:
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> b*Permutation([1, 0])
Permutation([1, 2, 0])
>>> Permutation([1, 0])*b
Permutation([2, 0, 1])
It is also acceptable to allow coercion to handle conversion of a
single list to the left of a Permutation:
>>> [0, 1]*a # no change: 2-element identity
Permutation([1, 0, 2])
>>> [[0, 1]]*a # exchange first two elements
Permutation([0, 1, 2])
You cannot use more than 1 cycle notation in a product of cycles
since coercion can only handle one argument to the left. To handle
multiple cycles it is convenient to use Cycle instead of Permutation:
>>> [[1, 2]]*[[2, 3]]*Permutation([]) # doctest: +SKIP
>>> from sympy.combinatorics.permutations import Cycle
>>> Cycle(1, 2)(2, 3)
(1 3 2)
"""
from sympy.combinatorics.perm_groups import PermutationGroup, Coset
if isinstance(other, PermutationGroup):
return Coset(self, other, dir='-')
a = self.array_form
# __rmul__ makes sure the other is a Permutation
b = other.array_form
if not b:
perm = a
else:
b.extend(list(range(len(b), len(a))))
perm = [b[i] for i in a] + b[len(a):]
return self._af_new(perm)
def commutes_with(self, other):
"""
Checks if the elements are commuting.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([1, 4, 3, 0, 2, 5])
>>> b = Permutation([0, 1, 2, 3, 4, 5])
>>> a.commutes_with(b)
True
>>> b = Permutation([2, 3, 5, 4, 1, 0])
>>> a.commutes_with(b)
False
"""
a = self.array_form
b = other.array_form
return _af_commutes_with(a, b)
def __pow__(self, n):
"""
Routine for finding powers of a permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([2, 0, 3, 1])
>>> p.order()
4
>>> p**4
Permutation([0, 1, 2, 3])
"""
if isinstance(n, Permutation):
raise NotImplementedError(
'p**p is not defined; do you mean p^p (conjugate)?')
n = int(n)
return self._af_new(_af_pow(self.array_form, n))
def __rxor__(self, i):
"""Return self(i) when ``i`` is an int.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> p = Permutation(1, 2, 9)
>>> 2^p == p(2) == 9
True
"""
if int(i) == i:
return self(i)
else:
raise NotImplementedError(
"i^p = p(i) when i is an integer, not %s." % i)
def __xor__(self, h):
"""Return the conjugate permutation ``~h*self*h` `.
Explanation
===========
If ``a`` and ``b`` are conjugates, ``a = h*b*~h`` and
``b = ~h*a*h`` and both have the same cycle structure.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation(1, 2, 9)
>>> q = Permutation(6, 9, 8)
>>> p*q != q*p
True
Calculate and check properties of the conjugate:
>>> c = p^q
>>> c == ~q*p*q and p == q*c*~q
True
The expression q^p^r is equivalent to q^(p*r):
>>> r = Permutation(9)(4, 6, 8)
>>> q^p^r == q^(p*r)
True
If the term to the left of the conjugate operator, i, is an integer
then this is interpreted as selecting the ith element from the
permutation to the right:
>>> all(i^p == p(i) for i in range(p.size))
True
Note that the * operator as higher precedence than the ^ operator:
>>> q^r*p^r == q^(r*p)^r == Permutation(9)(1, 6, 4)
True
Notes
=====
In Python the precedence rule is p^q^r = (p^q)^r which differs
in general from p^(q^r)
>>> q^p^r
(9)(1 4 8)
>>> q^(p^r)
(9)(1 8 6)
For a given r and p, both of the following are conjugates of p:
~r*p*r and r*p*~r. But these are not necessarily the same:
>>> ~r*p*r == r*p*~r
True
>>> p = Permutation(1, 2, 9)(5, 6)
>>> ~r*p*r == r*p*~r
False
The conjugate ~r*p*r was chosen so that ``p^q^r`` would be equivalent
to ``p^(q*r)`` rather than ``p^(r*q)``. To obtain r*p*~r, pass ~r to
this method:
>>> p^~r == r*p*~r
True
"""
if self.size != h.size:
raise ValueError("The permutations must be of equal size.")
a = [None]*self.size
h = h._array_form
p = self._array_form
for i in range(self.size):
a[h[i]] = h[p[i]]
return self._af_new(a)
def transpositions(self):
"""
Return the permutation decomposed into a list of transpositions.
Explanation
===========
It is always possible to express a permutation as the product of
transpositions, see [1]
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([[1, 2, 3], [0, 4, 5, 6, 7]])
>>> t = p.transpositions()
>>> t
[(0, 7), (0, 6), (0, 5), (0, 4), (1, 3), (1, 2)]
>>> print(''.join(str(c) for c in t))
(0, 7)(0, 6)(0, 5)(0, 4)(1, 3)(1, 2)
>>> Permutation.rmul(*[Permutation([ti], size=p.size) for ti in t]) == p
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Transposition_%28mathematics%29#Properties
"""
a = self.cyclic_form
res = []
for x in a:
nx = len(x)
if nx == 2:
res.append(tuple(x))
elif nx > 2:
first = x[0]
for y in x[nx - 1:0:-1]:
res.append((first, y))
return res
@classmethod
def from_sequence(self, i, key=None):
"""Return the permutation needed to obtain ``i`` from the sorted
elements of ``i``. If custom sorting is desired, a key can be given.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.from_sequence('SymPy')
(4)(0 1 3)
>>> _(sorted("SymPy"))
['S', 'y', 'm', 'P', 'y']
>>> Permutation.from_sequence('SymPy', key=lambda x: x.lower())
(4)(0 2)(1 3)
"""
ic = list(zip(i, list(range(len(i)))))
if key:
ic.sort(key=lambda x: key(x[0]))
else:
ic.sort()
return ~Permutation([i[1] for i in ic])
def __invert__(self):
"""
Return the inverse of the permutation.
A permutation multiplied by its inverse is the identity permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([[2, 0], [3, 1]])
>>> ~p
Permutation([2, 3, 0, 1])
>>> _ == p**-1
True
>>> p*~p == ~p*p == Permutation([0, 1, 2, 3])
True
"""
return self._af_new(_af_invert(self._array_form))
def __iter__(self):
"""Yield elements from array form.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> list(Permutation(range(3)))
[0, 1, 2]
"""
yield from self.array_form
def __repr__(self):
from sympy.printing.repr import srepr
return srepr(self)
def __call__(self, *i):
"""
Allows applying a permutation instance as a bijective function.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([[2, 0], [3, 1]])
>>> p.array_form
[2, 3, 0, 1]
>>> [p(i) for i in range(4)]
[2, 3, 0, 1]
If an array is given then the permutation selects the items
from the array (i.e. the permutation is applied to the array):
>>> from sympy.abc import x
>>> p([x, 1, 0, x**2])
[0, x**2, x, 1]
"""
# list indices can be Integer or int; leave this
# as it is (don't test or convert it) because this
# gets called a lot and should be fast
if len(i) == 1:
i = i[0]
if not isinstance(i, Iterable):
i = as_int(i)
if i < 0 or i > self.size:
raise TypeError(
"{} should be an integer between 0 and {}"
.format(i, self.size-1))
return self._array_form[i]
# P([a, b, c])
if len(i) != self.size:
raise TypeError(
"{} should have the length {}.".format(i, self.size))
return [i[j] for j in self._array_form]
# P(1, 2, 3)
return self*Permutation(Cycle(*i), size=self.size)
def atoms(self):
"""
Returns all the elements of a permutation
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 1, 2, 3, 4, 5]).atoms()
{0, 1, 2, 3, 4, 5}
>>> Permutation([[0, 1], [2, 3], [4, 5]]).atoms()
{0, 1, 2, 3, 4, 5}
"""
return set(self.array_form)
def apply(self, i):
r"""Apply the permutation to an expression.
Parameters
==========
i : Expr
It should be an integer between $0$ and $n-1$ where $n$
is the size of the permutation.
If it is a symbol or a symbolic expression that can
have integer values, an ``AppliedPermutation`` object
will be returned which can represent an unevaluated
function.
Notes
=====
Any permutation can be defined as a bijective function
$\sigma : \{ 0, 1, ..., n-1 \} \rightarrow \{ 0, 1, ..., n-1 \}$
where $n$ denotes the size of the permutation.
The definition may even be extended for any set with distinctive
elements, such that the permutation can even be applied for
real numbers or such, however, it is not implemented for now for
computational reasons and the integrity with the group theory
module.
This function is similar to the ``__call__`` magic, however,
``__call__`` magic already has some other applications like
permuting an array or attatching new cycles, which would
not always be mathematically consistent.
This also guarantees that the return type is a SymPy integer,
which guarantees the safety to use assumptions.
"""
i = _sympify(i)
if i.is_integer is False:
raise NotImplementedError("{} should be an integer.".format(i))
n = self.size
if (i < 0) == True or (i >= n) == True:
raise NotImplementedError(
"{} should be an integer between 0 and {}".format(i, n-1))
if i.is_Integer:
return Integer(self._array_form[i])
return AppliedPermutation(self, i)
def next_lex(self):
"""
Returns the next permutation in lexicographical order.
If self is the last permutation in lexicographical order
it returns None.
See [4] section 2.4.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([2, 3, 1, 0])
>>> p = Permutation([2, 3, 1, 0]); p.rank()
17
>>> p = p.next_lex(); p.rank()
18
See Also
========
rank, unrank_lex
"""
perm = self.array_form[:]
n = len(perm)
i = n - 2
while perm[i + 1] < perm[i]:
i -= 1
if i == -1:
return None
else:
j = n - 1
while perm[j] < perm[i]:
j -= 1
perm[j], perm[i] = perm[i], perm[j]
i += 1
j = n - 1
while i < j:
perm[j], perm[i] = perm[i], perm[j]
i += 1
j -= 1
return self._af_new(perm)
@classmethod
def unrank_nonlex(self, n, r):
"""
This is a linear time unranking algorithm that does not
respect lexicographic order [3].
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation.unrank_nonlex(4, 5)
Permutation([2, 0, 3, 1])
>>> Permutation.unrank_nonlex(4, -1)
Permutation([0, 1, 2, 3])
See Also
========
next_nonlex, rank_nonlex
"""
def _unrank1(n, r, a):
if n > 0:
a[n - 1], a[r % n] = a[r % n], a[n - 1]
_unrank1(n - 1, r//n, a)
id_perm = list(range(n))
n = int(n)
r = r % ifac(n)
_unrank1(n, r, id_perm)
return self._af_new(id_perm)
def rank_nonlex(self, inv_perm=None):
"""
This is a linear time ranking algorithm that does not
enforce lexicographic order [3].
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank_nonlex()
23
See Also
========
next_nonlex, unrank_nonlex
"""
def _rank1(n, perm, inv_perm):
if n == 1:
return 0
s = perm[n - 1]
t = inv_perm[n - 1]
perm[n - 1], perm[t] = perm[t], s
inv_perm[n - 1], inv_perm[s] = inv_perm[s], t
return s + n*_rank1(n - 1, perm, inv_perm)
if inv_perm is None:
inv_perm = (~self).array_form
if not inv_perm:
return 0
perm = self.array_form[:]
r = _rank1(len(perm), perm, inv_perm)
return r
def next_nonlex(self):
"""
Returns the next permutation in nonlex order [3].
If self is the last permutation in this order it returns None.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([2, 0, 3, 1]); p.rank_nonlex()
5
>>> p = p.next_nonlex(); p
Permutation([3, 0, 1, 2])
>>> p.rank_nonlex()
6
See Also
========
rank_nonlex, unrank_nonlex
"""
r = self.rank_nonlex()
if r == ifac(self.size) - 1:
return None
return self.unrank_nonlex(self.size, r + 1)
def rank(self):
"""
Returns the lexicographic rank of the permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank()
0
>>> p = Permutation([3, 2, 1, 0])
>>> p.rank()
23
See Also
========
next_lex, unrank_lex, cardinality, length, order, size
"""
if not self._rank is None:
return self._rank
rank = 0
rho = self.array_form[:]
n = self.size - 1
size = n + 1
psize = int(ifac(n))
for j in range(size - 1):
rank += rho[j]*psize
for i in range(j + 1, size):
if rho[i] > rho[j]:
rho[i] -= 1
psize //= n
n -= 1
self._rank = rank
return rank
@property
def cardinality(self):
"""
Returns the number of all possible permutations.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.cardinality
24
See Also
========
length, order, rank, size
"""
return int(ifac(self.size))
def parity(self):
"""
Computes the parity of a permutation.
Explanation
===========
The parity of a permutation reflects the parity of the
number of inversions in the permutation, i.e., the
number of pairs of x and y such that ``x > y`` but ``p[x] < p[y]``.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.parity()
0
>>> p = Permutation([3, 2, 0, 1])
>>> p.parity()
1
See Also
========
_af_parity
"""
if self._cyclic_form is not None:
return (self.size - self.cycles) % 2
return _af_parity(self.array_form)
@property
def is_even(self):
"""
Checks if a permutation is even.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.is_even
True
>>> p = Permutation([3, 2, 1, 0])
>>> p.is_even
True
See Also
========
is_odd
"""
return not self.is_odd
@property
def is_odd(self):
"""
Checks if a permutation is odd.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.is_odd
False
>>> p = Permutation([3, 2, 0, 1])
>>> p.is_odd
True
See Also
========
is_even
"""
return bool(self.parity() % 2)
@property
def is_Singleton(self):
"""
Checks to see if the permutation contains only one number and is
thus the only possible permutation of this set of numbers
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0]).is_Singleton
True
>>> Permutation([0, 1]).is_Singleton
False
See Also
========
is_Empty
"""
return self.size == 1
@property
def is_Empty(self):
"""
Checks to see if the permutation is a set with zero elements
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([]).is_Empty
True
>>> Permutation([0]).is_Empty
False
See Also
========
is_Singleton
"""
return self.size == 0
@property
def is_identity(self):
return self.is_Identity
@property
def is_Identity(self):
"""
Returns True if the Permutation is an identity permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([])
>>> p.is_Identity
True
>>> p = Permutation([[0], [1], [2]])
>>> p.is_Identity
True
>>> p = Permutation([0, 1, 2])
>>> p.is_Identity
True
>>> p = Permutation([0, 2, 1])
>>> p.is_Identity
False
See Also
========
order
"""
af = self.array_form
return not af or all(i == af[i] for i in range(self.size))
def ascents(self):
"""
Returns the positions of ascents in a permutation, ie, the location
where p[i] < p[i+1]
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([4, 0, 1, 3, 2])
>>> p.ascents()
[1, 2]
See Also
========
descents, inversions, min, max
"""
a = self.array_form
pos = [i for i in range(len(a) - 1) if a[i] < a[i + 1]]
return pos
def descents(self):
"""
Returns the positions of descents in a permutation, ie, the location
where p[i] > p[i+1]
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([4, 0, 1, 3, 2])
>>> p.descents()
[0, 3]
See Also
========
ascents, inversions, min, max
"""
a = self.array_form
pos = [i for i in range(len(a) - 1) if a[i] > a[i + 1]]
return pos
def max(self):
"""
The maximum element moved by the permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([1, 0, 2, 3, 4])
>>> p.max()
1
See Also
========
min, descents, ascents, inversions
"""
max = 0
a = self.array_form
for i in range(len(a)):
if a[i] != i and a[i] > max:
max = a[i]
return max
def min(self):
"""
The minimum element moved by the permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 4, 3, 2])
>>> p.min()
2
See Also
========
max, descents, ascents, inversions
"""
a = self.array_form
min = len(a)
for i in range(len(a)):
if a[i] != i and a[i] < min:
min = a[i]
return min
def inversions(self):
"""
Computes the number of inversions of a permutation.
Explanation
===========
An inversion is where i > j but p[i] < p[j].
For small length of p, it iterates over all i and j
values and calculates the number of inversions.
For large length of p, it uses a variation of merge
sort to calculate the number of inversions.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3, 4, 5])
>>> p.inversions()
0
>>> Permutation([3, 2, 1, 0]).inversions()
6
See Also
========
descents, ascents, min, max
References
==========
.. [1] http://www.cp.eng.chula.ac.th/~piak/teaching/algo/algo2008/count-inv.htm
"""
inversions = 0
a = self.array_form
n = len(a)
if n < 130:
for i in range(n - 1):
b = a[i]
for c in a[i + 1:]:
if b > c:
inversions += 1
else:
k = 1
right = 0
arr = a[:]
temp = a[:]
while k < n:
i = 0
while i + k < n:
right = i + k * 2 - 1
if right >= n:
right = n - 1
inversions += _merge(arr, temp, i, i + k, right)
i = i + k * 2
k = k * 2
return inversions
def commutator(self, x):
"""Return the commutator of ``self`` and ``x``: ``~x*~self*x*self``
If f and g are part of a group, G, then the commutator of f and g
is the group identity iff f and g commute, i.e. fg == gf.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([0, 2, 3, 1])
>>> x = Permutation([2, 0, 3, 1])
>>> c = p.commutator(x); c
Permutation([2, 1, 3, 0])
>>> c == ~x*~p*x*p
True
>>> I = Permutation(3)
>>> p = [I + i for i in range(6)]
>>> for i in range(len(p)):
... for j in range(len(p)):
... c = p[i].commutator(p[j])
... if p[i]*p[j] == p[j]*p[i]:
... assert c == I
... else:
... assert c != I
...
References
==========
https://en.wikipedia.org/wiki/Commutator
"""
a = self.array_form
b = x.array_form
n = len(a)
if len(b) != n:
raise ValueError("The permutations must be of equal size.")
inva = [None]*n
for i in range(n):
inva[a[i]] = i
invb = [None]*n
for i in range(n):
invb[b[i]] = i
return self._af_new([a[b[inva[i]]] for i in invb])
def signature(self):
"""
Gives the signature of the permutation needed to place the
elements of the permutation in canonical order.
The signature is calculated as (-1)^<number of inversions>
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2])
>>> p.inversions()
0
>>> p.signature()
1
>>> q = Permutation([0,2,1])
>>> q.inversions()
1
>>> q.signature()
-1
See Also
========
inversions
"""
if self.is_even:
return 1
return -1
def order(self):
"""
Computes the order of a permutation.
When the permutation is raised to the power of its
order it equals the identity permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([3, 1, 5, 2, 4, 0])
>>> p.order()
4
>>> (p**(p.order()))
Permutation([], size=6)
See Also
========
identity, cardinality, length, rank, size
"""
return reduce(lcm, [len(cycle) for cycle in self.cyclic_form], 1)
def length(self):
"""
Returns the number of integers moved by a permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 3, 2, 1]).length()
2
>>> Permutation([[0, 1], [2, 3]]).length()
4
See Also
========
min, max, support, cardinality, order, rank, size
"""
return len(self.support())
@property
def cycle_structure(self):
"""Return the cycle structure of the permutation as a dictionary
indicating the multiplicity of each cycle length.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation(3).cycle_structure
{1: 4}
>>> Permutation(0, 4, 3)(1, 2)(5, 6).cycle_structure
{2: 2, 3: 1}
"""
if self._cycle_structure:
rv = self._cycle_structure
else:
rv = defaultdict(int)
singletons = self.size
for c in self.cyclic_form:
rv[len(c)] += 1
singletons -= len(c)
if singletons:
rv[1] = singletons
self._cycle_structure = rv
return dict(rv) # make a copy
@property
def cycles(self):
"""
Returns the number of cycles contained in the permutation
(including singletons).
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation([0, 1, 2]).cycles
3
>>> Permutation([0, 1, 2]).full_cyclic_form
[[0], [1], [2]]
>>> Permutation(0, 1)(2, 3).cycles
2
See Also
========
sympy.functions.combinatorial.numbers.stirling
"""
return len(self.full_cyclic_form)
def index(self):
"""
Returns the index of a permutation.
The index of a permutation is the sum of all subscripts j such
that p[j] is greater than p[j+1].
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([3, 0, 2, 1, 4])
>>> p.index()
2
"""
a = self.array_form
return sum([j for j in range(len(a) - 1) if a[j] > a[j + 1]])
def runs(self):
"""
Returns the runs of a permutation.
An ascending sequence in a permutation is called a run [5].
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([2, 5, 7, 3, 6, 0, 1, 4, 8])
>>> p.runs()
[[2, 5, 7], [3, 6], [0, 1, 4, 8]]
>>> q = Permutation([1,3,2,0])
>>> q.runs()
[[1, 3], [2], [0]]
"""
return runs(self.array_form)
def inversion_vector(self):
"""Return the inversion vector of the permutation.
The inversion vector consists of elements whose value
indicates the number of elements in the permutation
that are lesser than it and lie on its right hand side.
The inversion vector is the same as the Lehmer encoding of a
permutation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([4, 8, 0, 7, 1, 5, 3, 6, 2])
>>> p.inversion_vector()
[4, 7, 0, 5, 0, 2, 1, 1]
>>> p = Permutation([3, 2, 1, 0])
>>> p.inversion_vector()
[3, 2, 1]
The inversion vector increases lexicographically with the rank
of the permutation, the -ith element cycling through 0..i.
>>> p = Permutation(2)
>>> while p:
... print('%s %s %s' % (p, p.inversion_vector(), p.rank()))
... p = p.next_lex()
(2) [0, 0] 0
(1 2) [0, 1] 1
(2)(0 1) [1, 0] 2
(0 1 2) [1, 1] 3
(0 2 1) [2, 0] 4
(0 2) [2, 1] 5
See Also
========
from_inversion_vector
"""
self_array_form = self.array_form
n = len(self_array_form)
inversion_vector = [0] * (n - 1)
for i in range(n - 1):
val = 0
for j in range(i + 1, n):
if self_array_form[j] < self_array_form[i]:
val += 1
inversion_vector[i] = val
return inversion_vector
def rank_trotterjohnson(self):
"""
Returns the Trotter Johnson rank, which we get from the minimal
change algorithm. See [4] section 2.4.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 1, 2, 3])
>>> p.rank_trotterjohnson()
0
>>> p = Permutation([0, 2, 1, 3])
>>> p.rank_trotterjohnson()
7
See Also
========
unrank_trotterjohnson, next_trotterjohnson
"""
if self.array_form == [] or self.is_Identity:
return 0
if self.array_form == [1, 0]:
return 1
perm = self.array_form
n = self.size
rank = 0
for j in range(1, n):
k = 1
i = 0
while perm[i] != j:
if perm[i] < j:
k += 1
i += 1
j1 = j + 1
if rank % 2 == 0:
rank = j1*rank + j1 - k
else:
rank = j1*rank + k - 1
return rank
@classmethod
def unrank_trotterjohnson(cls, size, rank):
"""
Trotter Johnson permutation unranking. See [4] section 2.4.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation.unrank_trotterjohnson(5, 10)
Permutation([0, 3, 1, 2, 4])
See Also
========
rank_trotterjohnson, next_trotterjohnson
"""
perm = [0]*size
r2 = 0
n = ifac(size)
pj = 1
for j in range(2, size + 1):
pj *= j
r1 = (rank * pj) // n
k = r1 - j*r2
if r2 % 2 == 0:
for i in range(j - 1, j - k - 1, -1):
perm[i] = perm[i - 1]
perm[j - k - 1] = j - 1
else:
for i in range(j - 1, k, -1):
perm[i] = perm[i - 1]
perm[k] = j - 1
r2 = r1
return cls._af_new(perm)
def next_trotterjohnson(self):
"""
Returns the next permutation in Trotter-Johnson order.
If self is the last permutation it returns None.
See [4] section 2.4. If it is desired to generate all such
permutations, they can be generated in order more quickly
with the ``generate_bell`` function.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation([3, 0, 2, 1])
>>> p.rank_trotterjohnson()
4
>>> p = p.next_trotterjohnson(); p
Permutation([0, 3, 2, 1])
>>> p.rank_trotterjohnson()
5
See Also
========
rank_trotterjohnson, unrank_trotterjohnson, sympy.utilities.iterables.generate_bell
"""
pi = self.array_form[:]
n = len(pi)
st = 0
rho = pi[:]
done = False
m = n-1
while m > 0 and not done:
d = rho.index(m)
for i in range(d, m):
rho[i] = rho[i + 1]
par = _af_parity(rho[:m])
if par == 1:
if d == m:
m -= 1
else:
pi[st + d], pi[st + d + 1] = pi[st + d + 1], pi[st + d]
done = True
else:
if d == 0:
m -= 1
st += 1
else:
pi[st + d], pi[st + d - 1] = pi[st + d - 1], pi[st + d]
done = True
if m == 0:
return None
return self._af_new(pi)
def get_precedence_matrix(self):
"""
Gets the precedence matrix. This is used for computing the
distance between two permutations.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> p = Permutation.josephus(3, 6, 1)
>>> p
Permutation([2, 5, 3, 1, 4, 0])
>>> p.get_precedence_matrix()
Matrix([
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0],
[1, 1, 0, 1, 1, 1],
[1, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0]])
See Also
========
get_precedence_distance, get_adjacency_matrix, get_adjacency_distance
"""
m = zeros(self.size)
perm = self.array_form
for i in range(m.rows):
for j in range(i + 1, m.cols):
m[perm[i], perm[j]] = 1
return m
def get_precedence_distance(self, other):
"""
Computes the precedence distance between two permutations.
Explanation
===========
Suppose p and p' represent n jobs. The precedence metric
counts the number of times a job j is preceded by job i
in both p and p'. This metric is commutative.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([2, 0, 4, 3, 1])
>>> q = Permutation([3, 1, 2, 4, 0])
>>> p.get_precedence_distance(q)
7
>>> q.get_precedence_distance(p)
7
See Also
========
get_precedence_matrix, get_adjacency_matrix, get_adjacency_distance
"""
if self.size != other.size:
raise ValueError("The permutations must be of equal size.")
self_prec_mat = self.get_precedence_matrix()
other_prec_mat = other.get_precedence_matrix()
n_prec = 0
for i in range(self.size):
for j in range(self.size):
if i == j:
continue
if self_prec_mat[i, j] * other_prec_mat[i, j] == 1:
n_prec += 1
d = self.size * (self.size - 1)//2 - n_prec
return d
def get_adjacency_matrix(self):
"""
Computes the adjacency matrix of a permutation.
Explanation
===========
If job i is adjacent to job j in a permutation p
then we set m[i, j] = 1 where m is the adjacency
matrix of p.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation.josephus(3, 6, 1)
>>> p.get_adjacency_matrix()
Matrix([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0]])
>>> q = Permutation([0, 1, 2, 3])
>>> q.get_adjacency_matrix()
Matrix([
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]])
See Also
========
get_precedence_matrix, get_precedence_distance, get_adjacency_distance
"""
m = zeros(self.size)
perm = self.array_form
for i in range(self.size - 1):
m[perm[i], perm[i + 1]] = 1
return m
def get_adjacency_distance(self, other):
"""
Computes the adjacency distance between two permutations.
Explanation
===========
This metric counts the number of times a pair i,j of jobs is
adjacent in both p and p'. If n_adj is this quantity then
the adjacency distance is n - n_adj - 1 [1]
[1] Reeves, Colin R. Landscapes, Operators and Heuristic search, Annals
of Operational Research, 86, pp 473-490. (1999)
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 3, 1, 2, 4])
>>> q = Permutation.josephus(4, 5, 2)
>>> p.get_adjacency_distance(q)
3
>>> r = Permutation([0, 2, 1, 4, 3])
>>> p.get_adjacency_distance(r)
4
See Also
========
get_precedence_matrix, get_precedence_distance, get_adjacency_matrix
"""
if self.size != other.size:
raise ValueError("The permutations must be of the same size.")
self_adj_mat = self.get_adjacency_matrix()
other_adj_mat = other.get_adjacency_matrix()
n_adj = 0
for i in range(self.size):
for j in range(self.size):
if i == j:
continue
if self_adj_mat[i, j] * other_adj_mat[i, j] == 1:
n_adj += 1
d = self.size - n_adj - 1
return d
def get_positional_distance(self, other):
"""
Computes the positional distance between two permutations.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> p = Permutation([0, 3, 1, 2, 4])
>>> q = Permutation.josephus(4, 5, 2)
>>> r = Permutation([3, 1, 4, 0, 2])
>>> p.get_positional_distance(q)
12
>>> p.get_positional_distance(r)
12
See Also
========
get_precedence_distance, get_adjacency_distance
"""
a = self.array_form
b = other.array_form
if len(a) != len(b):
raise ValueError("The permutations must be of the same size.")
return sum([abs(a[i] - b[i]) for i in range(len(a))])
@classmethod
def josephus(cls, m, n, s=1):
"""Return as a permutation the shuffling of range(n) using the Josephus
scheme in which every m-th item is selected until all have been chosen.
The returned permutation has elements listed by the order in which they
were selected.
The parameter ``s`` stops the selection process when there are ``s``
items remaining and these are selected by continuing the selection,
counting by 1 rather than by ``m``.
Consider selecting every 3rd item from 6 until only 2 remain::
choices chosen
======== ======
012345
01 345 2
01 34 25
01 4 253
0 4 2531
0 25314
253140
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.josephus(3, 6, 2).array_form
[2, 5, 3, 1, 4, 0]
References
==========
.. [1] https://en.wikipedia.org/wiki/Flavius_Josephus
.. [2] https://en.wikipedia.org/wiki/Josephus_problem
.. [3] http://www.wou.edu/~burtonl/josephus.html
"""
from collections import deque
m -= 1
Q = deque(list(range(n)))
perm = []
while len(Q) > max(s, 1):
for dp in range(m):
Q.append(Q.popleft())
perm.append(Q.popleft())
perm.extend(list(Q))
return cls(perm)
@classmethod
def from_inversion_vector(cls, inversion):
"""
Calculates the permutation from the inversion vector.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> Permutation.from_inversion_vector([3, 2, 1, 0, 0])
Permutation([3, 2, 1, 0, 4, 5])
"""
size = len(inversion)
N = list(range(size + 1))
perm = []
try:
for k in range(size):
val = N[inversion[k]]
perm.append(val)
N.remove(val)
except IndexError:
raise ValueError("The inversion vector is not valid.")
perm.extend(N)
return cls._af_new(perm)
@classmethod
def random(cls, n):
"""
Generates a random permutation of length ``n``.
Uses the underlying Python pseudo-random number generator.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1]))
True
"""
perm_array = list(range(n))
random.shuffle(perm_array)
return cls._af_new(perm_array)
@classmethod
def unrank_lex(cls, size, rank):
"""
Lexicographic permutation unranking.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> init_printing(perm_cyclic=False, pretty_print=False)
>>> a = Permutation.unrank_lex(5, 10)
>>> a.rank()
10
>>> a
Permutation([0, 2, 4, 1, 3])
See Also
========
rank, next_lex
"""
perm_array = [0] * size
psize = 1
for i in range(size):
new_psize = psize*(i + 1)
d = (rank % new_psize) // psize
rank -= d*psize
perm_array[size - i - 1] = d
for j in range(size - i, size):
if perm_array[j] > d - 1:
perm_array[j] += 1
psize = new_psize
return cls._af_new(perm_array)
def resize(self, n):
"""Resize the permutation to the new size ``n``.
Parameters
==========
n : int
The new size of the permutation.
Raises
======
ValueError
If the permutation cannot be resized to the given size.
This may only happen when resized to a smaller size than
the original.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
Increasing the size of a permutation:
>>> p = Permutation(0, 1, 2)
>>> p = p.resize(5)
>>> p
(4)(0 1 2)
Decreasing the size of the permutation:
>>> p = p.resize(4)
>>> p
(3)(0 1 2)
If resizing to the specific size breaks the cycles:
>>> p.resize(2)
Traceback (most recent call last):
...
ValueError: The permutation can not be resized to 2 because the
cycle (0, 1, 2) may break.
"""
aform = self.array_form
l = len(aform)
if n > l:
aform += list(range(l, n))
return Permutation._af_new(aform)
elif n < l:
cyclic_form = self.full_cyclic_form
new_cyclic_form = []
for cycle in cyclic_form:
cycle_min = min(cycle)
cycle_max = max(cycle)
if cycle_min <= n-1:
if cycle_max > n-1:
raise ValueError(
"The permutation can not be resized to {} "
"because the cycle {} may break."
.format(n, tuple(cycle)))
new_cyclic_form.append(cycle)
return Permutation(new_cyclic_form)
return self
# XXX Deprecated flag
print_cyclic = None
def _merge(arr, temp, left, mid, right):
"""
Merges two sorted arrays and calculates the inversion count.
Helper function for calculating inversions. This method is
for internal use only.
"""
i = k = left
j = mid
inv_count = 0
while i < mid and j <= right:
if arr[i] < arr[j]:
temp[k] = arr[i]
k += 1
i += 1
else:
temp[k] = arr[j]
k += 1
j += 1
inv_count += (mid -i)
while i < mid:
temp[k] = arr[i]
k += 1
i += 1
if j <= right:
k += right - j + 1
j += right - j + 1
arr[left:k + 1] = temp[left:k + 1]
else:
arr[left:right + 1] = temp[left:right + 1]
return inv_count
Perm = Permutation
_af_new = Perm._af_new
class AppliedPermutation(Expr):
"""A permutation applied to a symbolic variable.
Parameters
==========
perm : Permutation
x : Expr
Examples
========
>>> from sympy import Symbol
>>> from sympy.combinatorics import Permutation
Creating a symbolic permutation function application:
>>> x = Symbol('x')
>>> p = Permutation(0, 1, 2)
>>> p.apply(x)
AppliedPermutation((0 1 2), x)
>>> _.subs(x, 1)
2
"""
def __new__(cls, perm, x, evaluate=None):
if evaluate is None:
evaluate = global_parameters.evaluate
perm = _sympify(perm)
x = _sympify(x)
if not isinstance(perm, Permutation):
raise ValueError("{} must be a Permutation instance."
.format(perm))
if evaluate:
if x.is_Integer:
return perm.apply(x)
obj = super().__new__(cls, perm, x)
return obj
@dispatch(Permutation, Permutation)
def _eval_is_eq(lhs, rhs):
if lhs._size != rhs._size:
return None
return lhs._array_form == rhs._array_form
|
dcbff30c138b49b2ba5c8435bedee7ec8b793900e080202fb10db1224d56aa56 | from itertools import combinations
from sympy.combinatorics.graycode import GrayCode
from sympy.core import Basic
class Subset(Basic):
"""
Represents a basic subset object.
Explanation
===========
We generate subsets using essentially two techniques,
binary enumeration and lexicographic enumeration.
The Subset class takes two arguments, the first one
describes the initial subset to consider and the second
describes the superset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_binary().subset
['b']
>>> a.prev_binary().subset
['c']
"""
_rank_binary = None
_rank_lex = None
_rank_graycode = None
_subset = None
_superset = None
def __new__(cls, subset, superset):
"""
Default constructor.
It takes the ``subset`` and its ``superset`` as its parameters.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.subset
['c', 'd']
>>> a.superset
['a', 'b', 'c', 'd']
>>> a.size
2
"""
if len(subset) > len(superset):
raise ValueError('Invalid arguments have been provided. The '
'superset must be larger than the subset.')
for elem in subset:
if elem not in superset:
raise ValueError('The superset provided is invalid as it does '
'not contain the element {}'.format(elem))
obj = Basic.__new__(cls)
obj._subset = subset
obj._superset = superset
return obj
def iterate_binary(self, k):
"""
This is a helper function. It iterates over the
binary subsets by ``k`` steps. This variable can be
both positive or negative.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.iterate_binary(-2).subset
['d']
>>> a = Subset(['a', 'b', 'c'], ['a', 'b', 'c', 'd'])
>>> a.iterate_binary(2).subset
[]
See Also
========
next_binary, prev_binary
"""
bin_list = Subset.bitlist_from_subset(self.subset, self.superset)
n = (int(''.join(bin_list), 2) + k) % 2**self.superset_size
bits = bin(n)[2:].rjust(self.superset_size, '0')
return Subset.subset_from_bitlist(self.superset, bits)
def next_binary(self):
"""
Generates the next binary ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_binary().subset
['b']
>>> a = Subset(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_binary().subset
[]
See Also
========
prev_binary, iterate_binary
"""
return self.iterate_binary(1)
def prev_binary(self):
"""
Generates the previous binary ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([], ['a', 'b', 'c', 'd'])
>>> a.prev_binary().subset
['a', 'b', 'c', 'd']
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.prev_binary().subset
['c']
See Also
========
next_binary, iterate_binary
"""
return self.iterate_binary(-1)
def next_lexicographic(self):
"""
Generates the next lexicographically ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_lexicographic().subset
['d']
>>> a = Subset(['d'], ['a', 'b', 'c', 'd'])
>>> a.next_lexicographic().subset
[]
See Also
========
prev_lexicographic
"""
i = self.superset_size - 1
indices = Subset.subset_indices(self.subset, self.superset)
if i in indices:
if i - 1 in indices:
indices.remove(i - 1)
else:
indices.remove(i)
i = i - 1
while not i in indices and i >= 0:
i = i - 1
if i >= 0:
indices.remove(i)
indices.append(i+1)
else:
while i not in indices and i >= 0:
i = i - 1
indices.append(i + 1)
ret_set = []
super_set = self.superset
for i in indices:
ret_set.append(super_set[i])
return Subset(ret_set, super_set)
def prev_lexicographic(self):
"""
Generates the previous lexicographically ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([], ['a', 'b', 'c', 'd'])
>>> a.prev_lexicographic().subset
['d']
>>> a = Subset(['c','d'], ['a', 'b', 'c', 'd'])
>>> a.prev_lexicographic().subset
['c']
See Also
========
next_lexicographic
"""
i = self.superset_size - 1
indices = Subset.subset_indices(self.subset, self.superset)
while i not in indices and i >= 0:
i = i - 1
if i - 1 in indices or i == 0:
indices.remove(i)
else:
if i >= 0:
indices.remove(i)
indices.append(i - 1)
indices.append(self.superset_size - 1)
ret_set = []
super_set = self.superset
for i in indices:
ret_set.append(super_set[i])
return Subset(ret_set, super_set)
def iterate_graycode(self, k):
"""
Helper function used for prev_gray and next_gray.
It performs ``k`` step overs to get the respective Gray codes.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([1, 2, 3], [1, 2, 3, 4])
>>> a.iterate_graycode(3).subset
[1, 4]
>>> a.iterate_graycode(-2).subset
[1, 2, 4]
See Also
========
next_gray, prev_gray
"""
unranked_code = GrayCode.unrank(self.superset_size,
(self.rank_gray + k) % self.cardinality)
return Subset.subset_from_bitlist(self.superset,
unranked_code)
def next_gray(self):
"""
Generates the next Gray code ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([1, 2, 3], [1, 2, 3, 4])
>>> a.next_gray().subset
[1, 3]
See Also
========
iterate_graycode, prev_gray
"""
return self.iterate_graycode(1)
def prev_gray(self):
"""
Generates the previous Gray code ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([2, 3, 4], [1, 2, 3, 4, 5])
>>> a.prev_gray().subset
[2, 3, 4, 5]
See Also
========
iterate_graycode, next_gray
"""
return self.iterate_graycode(-1)
@property
def rank_binary(self):
"""
Computes the binary ordered rank.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([], ['a','b','c','d'])
>>> a.rank_binary
0
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.rank_binary
3
See Also
========
iterate_binary, unrank_binary
"""
if self._rank_binary is None:
self._rank_binary = int("".join(
Subset.bitlist_from_subset(self.subset,
self.superset)), 2)
return self._rank_binary
@property
def rank_lexicographic(self):
"""
Computes the lexicographic ranking of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.rank_lexicographic
14
>>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6])
>>> a.rank_lexicographic
43
"""
if self._rank_lex is None:
def _ranklex(self, subset_index, i, n):
if subset_index == [] or i > n:
return 0
if i in subset_index:
subset_index.remove(i)
return 1 + _ranklex(self, subset_index, i + 1, n)
return 2**(n - i - 1) + _ranklex(self, subset_index, i + 1, n)
indices = Subset.subset_indices(self.subset, self.superset)
self._rank_lex = _ranklex(self, indices, 0, self.superset_size)
return self._rank_lex
@property
def rank_gray(self):
"""
Computes the Gray code ranking of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c','d'], ['a','b','c','d'])
>>> a.rank_gray
2
>>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6])
>>> a.rank_gray
27
See Also
========
iterate_graycode, unrank_gray
"""
if self._rank_graycode is None:
bits = Subset.bitlist_from_subset(self.subset, self.superset)
self._rank_graycode = GrayCode(len(bits), start=bits).rank
return self._rank_graycode
@property
def subset(self):
"""
Gets the subset represented by the current instance.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.subset
['c', 'd']
See Also
========
superset, size, superset_size, cardinality
"""
return self._subset
@property
def size(self):
"""
Gets the size of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.size
2
See Also
========
subset, superset, superset_size, cardinality
"""
return len(self.subset)
@property
def superset(self):
"""
Gets the superset of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.superset
['a', 'b', 'c', 'd']
See Also
========
subset, size, superset_size, cardinality
"""
return self._superset
@property
def superset_size(self):
"""
Returns the size of the superset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.superset_size
4
See Also
========
subset, superset, size, cardinality
"""
return len(self.superset)
@property
def cardinality(self):
"""
Returns the number of all possible subsets.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.cardinality
16
See Also
========
subset, superset, size, superset_size
"""
return 2**(self.superset_size)
@classmethod
def subset_from_bitlist(self, super_set, bitlist):
"""
Gets the subset defined by the bitlist.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.subset_from_bitlist(['a', 'b', 'c', 'd'], '0011').subset
['c', 'd']
See Also
========
bitlist_from_subset
"""
if len(super_set) != len(bitlist):
raise ValueError("The sizes of the lists are not equal")
ret_set = []
for i in range(len(bitlist)):
if bitlist[i] == '1':
ret_set.append(super_set[i])
return Subset(ret_set, super_set)
@classmethod
def bitlist_from_subset(self, subset, superset):
"""
Gets the bitlist corresponding to a subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.bitlist_from_subset(['c', 'd'], ['a', 'b', 'c', 'd'])
'0011'
See Also
========
subset_from_bitlist
"""
bitlist = ['0'] * len(superset)
if type(subset) is Subset:
subset = subset.subset
for i in Subset.subset_indices(subset, superset):
bitlist[i] = '1'
return ''.join(bitlist)
@classmethod
def unrank_binary(self, rank, superset):
"""
Gets the binary ordered subset of the specified rank.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.unrank_binary(4, ['a', 'b', 'c', 'd']).subset
['b']
See Also
========
iterate_binary, rank_binary
"""
bits = bin(rank)[2:].rjust(len(superset), '0')
return Subset.subset_from_bitlist(superset, bits)
@classmethod
def unrank_gray(self, rank, superset):
"""
Gets the Gray code ordered subset of the specified rank.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.unrank_gray(4, ['a', 'b', 'c']).subset
['a', 'b']
>>> Subset.unrank_gray(0, ['a', 'b', 'c']).subset
[]
See Also
========
iterate_graycode, rank_gray
"""
graycode_bitlist = GrayCode.unrank(len(superset), rank)
return Subset.subset_from_bitlist(superset, graycode_bitlist)
@classmethod
def subset_indices(self, subset, superset):
"""Return indices of subset in superset in a list; the list is empty
if all elements of ``subset`` are not in ``superset``.
Examples
========
>>> from sympy.combinatorics import Subset
>>> superset = [1, 3, 2, 5, 4]
>>> Subset.subset_indices([3, 2, 1], superset)
[1, 2, 0]
>>> Subset.subset_indices([1, 6], superset)
[]
>>> Subset.subset_indices([], superset)
[]
"""
a, b = superset, subset
sb = set(b)
d = {}
for i, ai in enumerate(a):
if ai in sb:
d[ai] = i
sb.remove(ai)
if not sb:
break
else:
return list()
return [d[bi] for bi in b]
def ksubsets(superset, k):
"""
Finds the subsets of size ``k`` in lexicographic order.
This uses the itertools generator.
Examples
========
>>> from sympy.combinatorics.subsets import ksubsets
>>> list(ksubsets([1, 2, 3], 2))
[(1, 2), (1, 3), (2, 3)]
>>> list(ksubsets([1, 2, 3, 4, 5], 2))
[(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 4), \
(2, 5), (3, 4), (3, 5), (4, 5)]
See Also
========
Subset
"""
return combinations(superset, k)
|
20ac75dbfc9626a110ee0fa806e0167795512db08e5b22f42c6e0f42ee37052f | from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.permutations import Permutation
from sympy.utilities.iterables import uniq
_af_new = Permutation._af_new
def DirectProduct(*groups):
"""
Returns the direct product of several groups as a permutation group.
Explanation
===========
This is implemented much like the __mul__ procedure for taking the direct
product of two permutation groups, but the idea of shifting the
generators is realized in the case of an arbitrary number of groups.
A call to DirectProduct(G1, G2, ..., Gn) is generally expected to be faster
than a call to G1*G2*...*Gn (and thus the need for this algorithm).
Examples
========
>>> from sympy.combinatorics.group_constructs import DirectProduct
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> C = CyclicGroup(4)
>>> G = DirectProduct(C, C, C)
>>> G.order()
64
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.__mul__
"""
degrees = []
gens_count = []
total_degree = 0
total_gens = 0
for group in groups:
current_deg = group.degree
current_num_gens = len(group.generators)
degrees.append(current_deg)
total_degree += current_deg
gens_count.append(current_num_gens)
total_gens += current_num_gens
array_gens = []
for i in range(total_gens):
array_gens.append(list(range(total_degree)))
current_gen = 0
current_deg = 0
for i in range(len(gens_count)):
for j in range(current_gen, current_gen + gens_count[i]):
gen = ((groups[i].generators)[j - current_gen]).array_form
array_gens[j][current_deg:current_deg + degrees[i]] = \
[x + current_deg for x in gen]
current_gen += gens_count[i]
current_deg += degrees[i]
perm_gens = list(uniq([_af_new(list(a)) for a in array_gens]))
return PermutationGroup(perm_gens, dups=False)
|
e606bdf433557bb0e9363559f9b0d58af0c56dde60895b16bda3bfe1546a88ff | from sympy.combinatorics.permutations import Permutation, _af_rmul, \
_af_invert, _af_new
from sympy.combinatorics.perm_groups import PermutationGroup, _orbit, \
_orbit_transversal
from sympy.combinatorics.util import _distribute_gens_by_base, \
_orbits_transversals_from_bsgs
"""
References for tensor canonicalization:
[1] R. Portugal "Algorithmic simplification of tensor expressions",
J. Phys. A 32 (1999) 7779-7789
[2] R. Portugal, B.F. Svaiter "Group-theoretic Approach for Symbolic
Tensor Manipulation: I. Free Indices"
arXiv:math-ph/0107031v1
[3] L.R.U. Manssur, R. Portugal "Group-theoretic Approach for Symbolic
Tensor Manipulation: II. Dummy Indices"
arXiv:math-ph/0107032v1
[4] xperm.c part of XPerm written by J. M. Martin-Garcia
http://www.xact.es/index.html
"""
def dummy_sgs(dummies, sym, n):
"""
Return the strong generators for dummy indices.
Parameters
==========
dummies : List of dummy indices.
`dummies[2k], dummies[2k+1]` are paired indices.
In base form, the dummy indices are always in
consecutive positions.
sym : symmetry under interchange of contracted dummies::
* None no symmetry
* 0 commuting
* 1 anticommuting
n : number of indices
Examples
========
>>> from sympy.combinatorics.tensor_can import dummy_sgs
>>> dummy_sgs(list(range(2, 8)), 0, 8)
[[0, 1, 3, 2, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 5, 4, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 7, 6, 8, 9], [0, 1, 4, 5, 2, 3, 6, 7, 8, 9],
[0, 1, 2, 3, 6, 7, 4, 5, 8, 9]]
"""
if len(dummies) > n:
raise ValueError("List too large")
res = []
# exchange of contravariant and covariant indices
if sym is not None:
for j in dummies[::2]:
a = list(range(n + 2))
if sym == 1:
a[n] = n + 1
a[n + 1] = n
a[j], a[j + 1] = a[j + 1], a[j]
res.append(a)
# rename dummy indices
for j in dummies[:-3:2]:
a = list(range(n + 2))
a[j:j + 4] = a[j + 2], a[j + 3], a[j], a[j + 1]
res.append(a)
return res
def _min_dummies(dummies, sym, indices):
"""
Return list of minima of the orbits of indices in group of dummies.
See ``double_coset_can_rep`` for the description of ``dummies`` and ``sym``.
``indices`` is the initial list of dummy indices.
Examples
========
>>> from sympy.combinatorics.tensor_can import _min_dummies
>>> _min_dummies([list(range(2, 8))], [0], list(range(10)))
[0, 1, 2, 2, 2, 2, 2, 2, 8, 9]
"""
num_types = len(sym)
m = []
for dx in dummies:
if dx:
m.append(min(dx))
else:
m.append(None)
res = indices[:]
for i in range(num_types):
for c, i in enumerate(indices):
for j in range(num_types):
if i in dummies[j]:
res[c] = m[j]
break
return res
def _trace_S(s, j, b, S_cosets):
"""
Return the representative h satisfying s[h[b]] == j
If there is not such a representative return None
"""
for h in S_cosets[b]:
if s[h[b]] == j:
return h
return None
def _trace_D(gj, p_i, Dxtrav):
"""
Return the representative h satisfying h[gj] == p_i
If there is not such a representative return None
"""
for h in Dxtrav:
if h[gj] == p_i:
return h
return None
def _dumx_remove(dumx, dumx_flat, p0):
"""
remove p0 from dumx
"""
res = []
for dx in dumx:
if p0 not in dx:
res.append(dx)
continue
k = dx.index(p0)
if k % 2 == 0:
p0_paired = dx[k + 1]
else:
p0_paired = dx[k - 1]
dx.remove(p0)
dx.remove(p0_paired)
dumx_flat.remove(p0)
dumx_flat.remove(p0_paired)
res.append(dx)
def transversal2coset(size, base, transversal):
a = []
j = 0
for i in range(size):
if i in base:
a.append(sorted(transversal[j].values()))
j += 1
else:
a.append([list(range(size))])
j = len(a) - 1
while a[j] == [list(range(size))]:
j -= 1
return a[:j + 1]
def double_coset_can_rep(dummies, sym, b_S, sgens, S_transversals, g):
"""
Butler-Portugal algorithm for tensor canonicalization with dummy indices.
Parameters
==========
dummies
list of lists of dummy indices,
one list for each type of index;
the dummy indices are put in order contravariant, covariant
[d0, -d0, d1, -d1, ...].
sym
list of the symmetries of the index metric for each type.
possible symmetries of the metrics
* 0 symmetric
* 1 antisymmetric
* None no symmetry
b_S
base of a minimal slot symmetry BSGS.
sgens
generators of the slot symmetry BSGS.
S_transversals
transversals for the slot BSGS.
g
permutation representing the tensor.
Returns
=======
Return 0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Notes
=====
A tensor with dummy indices can be represented in a number
of equivalent ways which typically grows exponentially with
the number of indices. To be able to establish if two tensors
with many indices are equal becomes computationally very slow
in absence of an efficient algorithm.
The Butler-Portugal algorithm [3] is an efficient algorithm to
put tensors in canonical form, solving the above problem.
Portugal observed that a tensor can be represented by a permutation,
and that the class of tensors equivalent to it under slot and dummy
symmetries is equivalent to the double coset `D*g*S`
(Note: in this documentation we use the conventions for multiplication
of permutations p, q with (p*q)(i) = p[q[i]] which is opposite
to the one used in the Permutation class)
Using the algorithm by Butler to find a representative of the
double coset one can find a canonical form for the tensor.
To see this correspondence,
let `g` be a permutation in array form; a tensor with indices `ind`
(the indices including both the contravariant and the covariant ones)
can be written as
`t = T(ind[g[0]],..., ind[g[n-1]])`,
where `n= len(ind)`;
`g` has size `n + 2`, the last two indices for the sign of the tensor
(trick introduced in [4]).
A slot symmetry transformation `s` is a permutation acting on the slots
`t -> T(ind[(g*s)[0]],..., ind[(g*s)[n-1]])`
A dummy symmetry transformation acts on `ind`
`t -> T(ind[(d*g)[0]],..., ind[(d*g)[n-1]])`
Being interested only in the transformations of the tensor under
these symmetries, one can represent the tensor by `g`, which transforms
as
`g -> d*g*s`, so it belongs to the coset `D*g*S`, or in other words
to the set of all permutations allowed by the slot and dummy symmetries.
Let us explain the conventions by an example.
Given a tensor `T^{d3 d2 d1}{}_{d1 d2 d3}` with the slot symmetries
`T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`
`T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}`
and symmetric metric, find the tensor equivalent to it which
is the lowest under the ordering of indices:
lexicographic ordering `d1, d2, d3` and then contravariant
before covariant index; that is the canonical form of the tensor.
The canonical form is `-T^{d1 d2 d3}{}_{d1 d2 d3}`
obtained using `T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`.
To convert this problem in the input for this function,
use the following ordering of the index names
(- for covariant for short) `d1, -d1, d2, -d2, d3, -d3`
`T^{d3 d2 d1}{}_{d1 d2 d3}` corresponds to `g = [4, 2, 0, 1, 3, 5, 6, 7]`
where the last two indices are for the sign
`sgens = [Permutation(0, 2)(6, 7), Permutation(0, 4)(6, 7)]`
sgens[0] is the slot symmetry `-(0, 2)`
`T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`
sgens[1] is the slot symmetry `-(0, 4)`
`T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}`
The dummy symmetry group D is generated by the strong base generators
`[(0, 1), (2, 3), (4, 5), (0, 2)(1, 3), (0, 4)(1, 5)]`
where the first three interchange covariant and contravariant
positions of the same index (d1 <-> -d1) and the last two interchange
the dummy indices themselves (d1 <-> d2).
The dummy symmetry acts from the left
`d = [1, 0, 2, 3, 4, 5, 6, 7]` exchange `d1 <-> -d1`
`T^{d3 d2 d1}{}_{d1 d2 d3} == T^{d3 d2}{}_{d1}{}^{d1}{}_{d2 d3}`
`g=[4, 2, 0, 1, 3, 5, 6, 7] -> [4, 2, 1, 0, 3, 5, 6, 7] = _af_rmul(d, g)`
which differs from `_af_rmul(g, d)`.
The slot symmetry acts from the right
`s = [2, 1, 0, 3, 4, 5, 7, 6]` exchanges slots 0 and 2 and changes sign
`T^{d3 d2 d1}{}_{d1 d2 d3} == -T^{d1 d2 d3}{}_{d1 d2 d3}`
`g=[4,2,0,1,3,5,6,7] -> [0, 2, 4, 1, 3, 5, 7, 6] = _af_rmul(g, s)`
Example in which the tensor is zero, same slot symmetries as above:
`T^{d2}{}_{d1 d3}{}^{d1 d3}{}_{d2}`
`= -T^{d3}{}_{d1 d3}{}^{d1 d2}{}_{d2}` under slot symmetry `-(0,4)`;
`= T_{d3 d1}{}^{d3}{}^{d1 d2}{}_{d2}` under slot symmetry `-(0,2)`;
`= T^{d3}{}_{d1 d3}{}^{d1 d2}{}_{d2}` symmetric metric;
`= 0` since two of these lines have tensors differ only for the sign.
The double coset D*g*S consists of permutations `h = d*g*s` corresponding
to equivalent tensors; if there are two `h` which are the same apart
from the sign, return zero; otherwise
choose as representative the tensor with indices
ordered lexicographically according to `[d1, -d1, d2, -d2, d3, -d3]`
that is `rep = min(D*g*S) = min([d*g*s for d in D for s in S])`
The indices are fixed one by one; first choose the lowest index
for slot 0, then the lowest remaining index for slot 1, etc.
Doing this one obtains a chain of stabilizers
`S -> S_{b0} -> S_{b0,b1} -> ...` and
`D -> D_{p0} -> D_{p0,p1} -> ...`
where `[b0, b1, ...] = range(b)` is a base of the symmetric group;
the strong base `b_S` of S is an ordered sublist of it;
therefore it is sufficient to compute once the
strong base generators of S using the Schreier-Sims algorithm;
the stabilizers of the strong base generators are the
strong base generators of the stabilizer subgroup.
`dbase = [p0, p1, ...]` is not in general in lexicographic order,
so that one must recompute the strong base generators each time;
however this is trivial, there is no need to use the Schreier-Sims
algorithm for D.
The algorithm keeps a TAB of elements `(s_i, d_i, h_i)`
where `h_i = d_i*g*s_i` satisfying `h_i[j] = p_j` for `0 <= j < i`
starting from `s_0 = id, d_0 = id, h_0 = g`.
The equations `h_0[0] = p_0, h_1[1] = p_1,...` are solved in this order,
choosing each time the lowest possible value of p_i
For `j < i`
`d_i*g*s_i*S_{b_0,...,b_{i-1}}*b_j = D_{p_0,...,p_{i-1}}*p_j`
so that for dx in `D_{p_0,...,p_{i-1}}` and sx in
`S_{base[0],...,base[i-1]}` one has `dx*d_i*g*s_i*sx*b_j = p_j`
Search for dx, sx such that this equation holds for `j = i`;
it can be written as `s_i*sx*b_j = J, dx*d_i*g*J = p_j`
`sx*b_j = s_i**-1*J; sx = trace(s_i**-1, S_{b_0,...,b_{i-1}})`
`dx**-1*p_j = d_i*g*J; dx = trace(d_i*g*J, D_{p_0,...,p_{i-1}})`
`s_{i+1} = s_i*trace(s_i**-1*J, S_{b_0,...,b_{i-1}})`
`d_{i+1} = trace(d_i*g*J, D_{p_0,...,p_{i-1}})**-1*d_i`
`h_{i+1}*b_i = d_{i+1}*g*s_{i+1}*b_i = p_i`
`h_n*b_j = p_j` for all j, so that `h_n` is the solution.
Add the found `(s, d, h)` to TAB1.
At the end of the iteration sort TAB1 with respect to the `h`;
if there are two consecutive `h` in TAB1 which differ only for the
sign, the tensor is zero, so return 0;
if there are two consecutive `h` which are equal, keep only one.
Then stabilize the slot generators under `i` and the dummy generators
under `p_i`.
Assign `TAB = TAB1` at the end of the iteration step.
At the end `TAB` contains a unique `(s, d, h)`, since all the slots
of the tensor `h` have been fixed to have the minimum value according
to the symmetries. The algorithm returns `h`.
It is important that the slot BSGS has lexicographic minimal base,
otherwise there is an `i` which does not belong to the slot base
for which `p_i` is fixed by the dummy symmetry only, while `i`
is not invariant from the slot stabilizer, so `p_i` is not in
general the minimal value.
This algorithm differs slightly from the original algorithm [3]:
the canonical form is minimal lexicographically, and
the BSGS has minimal base under lexicographic order.
Equal tensors `h` are eliminated from TAB.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.tensor_can import double_coset_can_rep, get_transversals
>>> gens = [Permutation(x) for x in [[2, 1, 0, 3, 4, 5, 7, 6], [4, 1, 2, 3, 0, 5, 7, 6]]]
>>> base = [0, 2]
>>> g = Permutation([4, 2, 0, 1, 3, 5, 6, 7])
>>> transversals = get_transversals(base, gens)
>>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g)
[0, 1, 2, 3, 4, 5, 7, 6]
>>> g = Permutation([4, 1, 3, 0, 5, 2, 6, 7])
>>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g)
0
"""
size = g.size
g = g.array_form
num_dummies = size - 2
indices = list(range(num_dummies))
all_metrics_with_sym = all([_ is not None for _ in sym])
num_types = len(sym)
dumx = dummies[:]
dumx_flat = []
for dx in dumx:
dumx_flat.extend(dx)
b_S = b_S[:]
sgensx = [h._array_form for h in sgens]
if b_S:
S_transversals = transversal2coset(size, b_S, S_transversals)
# strong generating set for D
dsgsx = []
for i in range(num_types):
dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies))
idn = list(range(size))
# TAB = list of entries (s, d, h) where h = _af_rmuln(d,g,s)
# for short, in the following d*g*s means _af_rmuln(d,g,s)
TAB = [(idn, idn, g)]
for i in range(size - 2):
b = i
testb = b in b_S and sgensx
if testb:
sgensx1 = [_af_new(_) for _ in sgensx]
deltab = _orbit(size, sgensx1, b)
else:
deltab = {b}
# p1 = min(IMAGES) = min(Union D_p*h*deltab for h in TAB)
if all_metrics_with_sym:
md = _min_dummies(dumx, sym, indices)
else:
md = [min(_orbit(size, [_af_new(
ddx) for ddx in dsgsx], ii)) for ii in range(size - 2)]
p_i = min([min([md[h[x]] for x in deltab]) for s, d, h in TAB])
dsgsx1 = [_af_new(_) for _ in dsgsx]
Dxtrav = _orbit_transversal(size, dsgsx1, p_i, False, af=True) \
if dsgsx else None
if Dxtrav:
Dxtrav = [_af_invert(x) for x in Dxtrav]
# compute the orbit of p_i
for ii in range(num_types):
if p_i in dumx[ii]:
# the orbit is made by all the indices in dum[ii]
if sym[ii] is not None:
deltap = dumx[ii]
else:
# the orbit is made by all the even indices if p_i
# is even, by all the odd indices if p_i is odd
p_i_index = dumx[ii].index(p_i) % 2
deltap = dumx[ii][p_i_index::2]
break
else:
deltap = [p_i]
TAB1 = []
while TAB:
s, d, h = TAB.pop()
if min([md[h[x]] for x in deltab]) != p_i:
continue
deltab1 = [x for x in deltab if md[h[x]] == p_i]
# NEXT = s*deltab1 intersection (d*g)**-1*deltap
dg = _af_rmul(d, g)
dginv = _af_invert(dg)
sdeltab = [s[x] for x in deltab1]
gdeltap = [dginv[x] for x in deltap]
NEXT = [x for x in sdeltab if x in gdeltap]
# d, s satisfy
# d*g*s*base[i-1] = p_{i-1}; using the stabilizers
# d*g*s*S_{base[0],...,base[i-1]}*base[i-1] =
# D_{p_0,...,p_{i-1}}*p_{i-1}
# so that to find d1, s1 satisfying d1*g*s1*b = p_i
# one can look for dx in D_{p_0,...,p_{i-1}} and
# sx in S_{base[0],...,base[i-1]}
# d1 = dx*d; s1 = s*sx
# d1*g*s1*b = dx*d*g*s*sx*b = p_i
for j in NEXT:
if testb:
# solve s1*b = j with s1 = s*sx for some element sx
# of the stabilizer of ..., base[i-1]
# sx*b = s**-1*j; sx = _trace_S(s, j,...)
# s1 = s*trace_S(s**-1*j,...)
s1 = _trace_S(s, j, b, S_transversals)
if not s1:
continue
else:
s1 = [s[ix] for ix in s1]
else:
s1 = s
# assert s1[b] == j # invariant
# solve d1*g*j = p_i with d1 = dx*d for some element dg
# of the stabilizer of ..., p_{i-1}
# dx**-1*p_i = d*g*j; dx**-1 = trace_D(d*g*j,...)
# d1 = trace_D(d*g*j,...)**-1*d
# to save an inversion in the inner loop; notice we did
# Dxtrav = [perm_af_invert(x) for x in Dxtrav] out of the loop
if Dxtrav:
d1 = _trace_D(dg[j], p_i, Dxtrav)
if not d1:
continue
else:
if p_i != dg[j]:
continue
d1 = idn
assert d1[dg[j]] == p_i # invariant
d1 = [d1[ix] for ix in d]
h1 = [d1[g[ix]] for ix in s1]
# assert h1[b] == p_i # invariant
TAB1.append((s1, d1, h1))
# if TAB contains equal permutations, keep only one of them;
# if TAB contains equal permutations up to the sign, return 0
TAB1.sort(key=lambda x: x[-1])
prev = [0] * size
while TAB1:
s, d, h = TAB1.pop()
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
else:
TAB.append((s, d, h))
prev = h
# stabilize the SGS
sgensx = [h for h in sgensx if h[b] == b]
if b in b_S:
b_S.remove(b)
_dumx_remove(dumx, dumx_flat, p_i)
dsgsx = []
for i in range(num_types):
dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies))
return TAB[0][-1]
def canonical_free(base, gens, g, num_free):
"""
Canonicalization of a tensor with respect to free indices
choosing the minimum with respect to lexicographical ordering
in the free indices.
Explanation
===========
``base``, ``gens`` BSGS for slot permutation group
``g`` permutation representing the tensor
``num_free`` number of free indices
The indices must be ordered with first the free indices
See explanation in double_coset_can_rep
The algorithm is a variation of the one given in [2].
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import canonical_free
>>> gens = [[1, 0, 2, 3, 5, 4], [2, 3, 0, 1, 4, 5],[0, 1, 3, 2, 5, 4]]
>>> gens = [Permutation(h) for h in gens]
>>> base = [0, 2]
>>> g = Permutation([2, 1, 0, 3, 4, 5])
>>> canonical_free(base, gens, g, 4)
[0, 3, 1, 2, 5, 4]
Consider the product of Riemann tensors
``T = R^{a}_{d0}^{d1,d2}*R_{d2,d1}^{d0,b}``
The order of the indices is ``[a, b, d0, -d0, d1, -d1, d2, -d2]``
The permutation corresponding to the tensor is
``g = [0, 3, 4, 6, 7, 5, 2, 1, 8, 9]``
In particular ``a`` is position ``0``, ``b`` is in position ``9``.
Use the slot symmetries to get `T` is a form which is the minimal
in lexicographic order in the free indices ``a`` and ``b``, e.g.
``-R^{a}_{d0}^{d1,d2}*R^{b,d0}_{d2,d1}`` corresponding to
``[0, 3, 4, 6, 1, 2, 7, 5, 9, 8]``
>>> from sympy.combinatorics.tensor_can import riemann_bsgs, tensor_gens
>>> base, gens = riemann_bsgs
>>> size, sbase, sgens = tensor_gens(base, gens, [[], []], 0)
>>> g = Permutation([0, 3, 4, 6, 7, 5, 2, 1, 8, 9])
>>> canonical_free(sbase, [Permutation(h) for h in sgens], g, 2)
[0, 3, 4, 6, 1, 2, 7, 5, 9, 8]
"""
g = g.array_form
size = len(g)
if not base:
return g[:]
transversals = get_transversals(base, gens)
for x in sorted(g[:-2]):
if x not in base:
base.append(x)
h = g
for i, transv in enumerate(transversals):
h_i = [size]*num_free
# find the element s in transversals[i] such that
# _af_rmul(h, s) has its free elements with the lowest position in h
s = None
for sk in transv.values():
h1 = _af_rmul(h, sk)
hi = [h1.index(ix) for ix in range(num_free)]
if hi < h_i:
h_i = hi
s = sk
if s:
h = _af_rmul(h, s)
return h
def _get_map_slots(size, fixed_slots):
res = list(range(size))
pos = 0
for i in range(size):
if i in fixed_slots:
continue
res[i] = pos
pos += 1
return res
def _lift_sgens(size, fixed_slots, free, s):
a = []
j = k = 0
fd = list(zip(fixed_slots, free))
fd = [y for x, y in sorted(fd)]
num_free = len(free)
for i in range(size):
if i in fixed_slots:
a.append(fd[k])
k += 1
else:
a.append(s[j] + num_free)
j += 1
return a
def canonicalize(g, dummies, msym, *v):
"""
canonicalize tensor formed by tensors
Parameters
==========
g : permutation representing the tensor
dummies : list representing the dummy indices
it can be a list of dummy indices of the same type
or a list of lists of dummy indices, one list for each
type of index;
the dummy indices must come after the free indices,
and put in order contravariant, covariant
[d0, -d0, d1,-d1,...]
msym : symmetry of the metric(s)
it can be an integer or a list;
in the first case it is the symmetry of the dummy index metric;
in the second case it is the list of the symmetries of the
index metric for each type
v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i`
base_i, gens_i : BSGS for tensors of this type.
The BSGS should have minimal base under lexicographic ordering;
if not, an attempt is made do get the minimal BSGS;
in case of failure,
canonicalize_naive is used, which is much slower.
n_i : number of tensors of type `i`.
sym_i : symmetry under exchange of component tensors of type `i`.
Both for msym and sym_i the cases are
* None no symmetry
* 0 commuting
* 1 anticommuting
Returns
=======
0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Algorithm
=========
First one uses canonical_free to get the minimum tensor under
lexicographic order, using only the slot symmetries.
If the component tensors have not minimal BSGS, it is attempted
to find it; if the attempt fails canonicalize_naive
is used instead.
Compute the residual slot symmetry keeping fixed the free indices
using tensor_gens(base, gens, list_free_indices, sym).
Reduce the problem eliminating the free indices.
Then use double_coset_can_rep and lift back the result reintroducing
the free indices.
Examples
========
one type of index with commuting metric;
`A_{a b}` and `B_{a b}` antisymmetric and commuting
`T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}`
`ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices
g = [1, 3, 0, 5, 4, 2, 6, 7]
`T_c = 0`
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product
>>> from sympy.combinatorics import Permutation
>>> base2a, gens2a = get_symmetric_group_sgs(2, 1)
>>> t0 = (base2a, gens2a, 1, 0)
>>> t1 = (base2a, gens2a, 2, 0)
>>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7])
>>> canonicalize(g, range(6), 0, t0, t1)
0
same as above, but with `B_{a b}` anticommuting
`T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}`
can = [0,2,1,4,3,5,7,6]
>>> t1 = (base2a, gens2a, 2, 1)
>>> canonicalize(g, range(6), 0, t0, t1)
[0, 2, 1, 4, 3, 5, 7, 6]
two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order,
both with commuting metric
`f^{a b c}` antisymmetric, commuting
`A_{m a}` no symmetry, commuting
`T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}`
ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n]
g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15]
The canonical tensor is
`T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}`
can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14]
>>> base_f, gens_f = get_symmetric_group_sgs(3, 1)
>>> base1, gens1 = get_symmetric_group_sgs(1)
>>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1)
>>> t0 = (base_f, gens_f, 2, 0)
>>> t1 = (base_A, gens_A, 4, 0)
>>> dummies = [range(2, 10), range(10, 14)]
>>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15])
>>> canonicalize(g, dummies, [0, 0], t0, t1)
[0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14]
"""
from sympy.combinatorics.testutil import canonicalize_naive
if not isinstance(msym, list):
if not msym in [0, 1, None]:
raise ValueError('msym must be 0, 1 or None')
num_types = 1
else:
num_types = len(msym)
if not all(msymx in [0, 1, None] for msymx in msym):
raise ValueError('msym entries must be 0, 1 or None')
if len(dummies) != num_types:
raise ValueError(
'dummies and msym must have the same number of elements')
size = g.size
num_tensors = 0
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
# check that the BSGS is minimal;
# this property is used in double_coset_can_rep;
# if it is not minimal use canonicalize_naive
if not _is_minimal_bsgs(base_i, gens_i):
mbsgs = get_minimal_bsgs(base_i, gens_i)
if not mbsgs:
can = canonicalize_naive(g, dummies, msym, *v)
return can
base_i, gens_i = mbsgs
v1.append((base_i, gens_i, [[]] * n_i, sym_i))
num_tensors += n_i
if num_types == 1 and not isinstance(msym, list):
dummies = [dummies]
msym = [msym]
flat_dummies = []
for dumx in dummies:
flat_dummies.extend(dumx)
if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)):
raise ValueError('dummies is not valid')
# slot symmetry of the tensor
size1, sbase, sgens = gens_products(*v1)
if size != size1:
raise ValueError(
'g has size %d, generators have size %d' % (size, size1))
free = [i for i in range(size - 2) if i not in flat_dummies]
num_free = len(free)
# g1 minimal tensor under slot symmetry
g1 = canonical_free(sbase, sgens, g, num_free)
if not flat_dummies:
return g1
# save the sign of g1
sign = 0 if g1[-1] == size - 1 else 1
# the free indices are kept fixed.
# Determine free_i, the list of slots of tensors which are fixed
# since they are occupied by free indices, which are fixed.
start = 0
for i in range(len(v)):
free_i = []
base_i, gens_i, n_i, sym_i = v[i]
len_tens = gens_i[0].size - 2
# for each component tensor get a list od fixed islots
for j in range(n_i):
# get the elements corresponding to the component tensor
h = g1[start:(start + len_tens)]
fr = []
# get the positions of the fixed elements in h
for k in free:
if k in h:
fr.append(h.index(k))
free_i.append(fr)
start += len_tens
v1[i] = (base_i, gens_i, free_i, sym_i)
# BSGS of the tensor with fixed free indices
# if tensor_gens fails in gens_product, use canonicalize_naive
size, sbase, sgens = gens_products(*v1)
# reduce the permutations getting rid of the free indices
pos_free = [g1.index(x) for x in range(num_free)]
size_red = size - num_free
g1_red = [x - num_free for x in g1 if x in flat_dummies]
if sign:
g1_red.extend([size_red - 1, size_red - 2])
else:
g1_red.extend([size_red - 2, size_red - 1])
map_slots = _get_map_slots(size, pos_free)
sbase_red = [map_slots[i] for i in sbase if i not in pos_free]
sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens]
dummies_red = [[x - num_free for x in y] for y in dummies]
transv_red = get_transversals(sbase_red, sgens_red)
g1_red = _af_new(g1_red)
g2 = double_coset_can_rep(
dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red)
if g2 == 0:
return 0
# lift to the case with the free indices
g3 = _lift_sgens(size, pos_free, free, g2)
return g3
def perm_af_direct_product(gens1, gens2, signed=True):
"""
Direct products of the generators gens1 and gens2.
Examples
========
>>> from sympy.combinatorics.tensor_can import perm_af_direct_product
>>> gens1 = [[1, 0, 2, 3], [0, 1, 3, 2]]
>>> gens2 = [[1, 0]]
>>> perm_af_direct_product(gens1, gens2, False)
[[1, 0, 2, 3, 4, 5], [0, 1, 3, 2, 4, 5], [0, 1, 2, 3, 5, 4]]
>>> gens1 = [[1, 0, 2, 3, 5, 4], [0, 1, 3, 2, 4, 5]]
>>> gens2 = [[1, 0, 2, 3]]
>>> perm_af_direct_product(gens1, gens2, True)
[[1, 0, 2, 3, 4, 5, 7, 6], [0, 1, 3, 2, 4, 5, 6, 7], [0, 1, 2, 3, 5, 4, 6, 7]]
"""
gens1 = [list(x) for x in gens1]
gens2 = [list(x) for x in gens2]
s = 2 if signed else 0
n1 = len(gens1[0]) - s
n2 = len(gens2[0]) - s
start = list(range(n1))
end = list(range(n1, n1 + n2))
if signed:
gens1 = [gen[:-2] + end + [gen[-2] + n2, gen[-1] + n2]
for gen in gens1]
gens2 = [start + [x + n1 for x in gen] for gen in gens2]
else:
gens1 = [gen + end for gen in gens1]
gens2 = [start + [x + n1 for x in gen] for gen in gens2]
res = gens1 + gens2
return res
def bsgs_direct_product(base1, gens1, base2, gens2, signed=True):
"""
Direct product of two BSGS.
Parameters
==========
base1 : base of the first BSGS.
gens1 : strong generating sequence of the first BSGS.
base2, gens2 : similarly for the second BSGS.
signed : flag for signed permutations.
Examples
========
>>> from sympy.combinatorics.tensor_can import (get_symmetric_group_sgs, bsgs_direct_product)
>>> base1, gens1 = get_symmetric_group_sgs(1)
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> bsgs_direct_product(base1, gens1, base2, gens2)
([1], [(4)(1 2)])
"""
s = 2 if signed else 0
n1 = gens1[0].size - s
base = list(base1)
base += [x + n1 for x in base2]
gens1 = [h._array_form for h in gens1]
gens2 = [h._array_form for h in gens2]
gens = perm_af_direct_product(gens1, gens2, signed)
size = len(gens[0])
id_af = list(range(size))
gens = [h for h in gens if h != id_af]
if not gens:
gens = [id_af]
return base, [_af_new(h) for h in gens]
def get_symmetric_group_sgs(n, antisym=False):
"""
Return base, gens of the minimal BSGS for (anti)symmetric tensor
Parameters
==========
``n``: rank of the tensor
``antisym`` : bool
``antisym = False`` symmetric tensor
``antisym = True`` antisymmetric tensor
Examples
========
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> get_symmetric_group_sgs(3)
([0, 1], [(4)(0 1), (4)(1 2)])
"""
if n == 1:
return [], [_af_new(list(range(3)))]
gens = [Permutation(n - 1)(i, i + 1)._array_form for i in range(n - 1)]
if antisym == 0:
gens = [x + [n, n + 1] for x in gens]
else:
gens = [x + [n + 1, n] for x in gens]
base = list(range(n - 1))
return base, [_af_new(h) for h in gens]
riemann_bsgs = [0, 2], [Permutation(0, 1)(4, 5), Permutation(2, 3)(4, 5),
Permutation(5)(0, 2)(1, 3)]
def get_transversals(base, gens):
"""
Return transversals for the group with BSGS base, gens
"""
if not base:
return []
stabs = _distribute_gens_by_base(base, gens)
orbits, transversals = _orbits_transversals_from_bsgs(base, stabs)
transversals = [{x: h._array_form for x, h in y.items()} for y in
transversals]
return transversals
def _is_minimal_bsgs(base, gens):
"""
Check if the BSGS has minimal base under lexigographic order.
base, gens BSGS
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import riemann_bsgs, _is_minimal_bsgs
>>> _is_minimal_bsgs(*riemann_bsgs)
True
>>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0, 1)(4, 5), Permutation(5)(0, 2)(1, 3)]))
>>> _is_minimal_bsgs(*riemann_bsgs1)
False
"""
base1 = []
sgs1 = gens[:]
size = gens[0].size
for i in range(size):
if not all(h._array_form[i] == i for h in sgs1):
base1.append(i)
sgs1 = [h for h in sgs1 if h._array_form[i] == i]
return base1 == base
def get_minimal_bsgs(base, gens):
"""
Compute a minimal GSGS
base, gens BSGS
If base, gens is a minimal BSGS return it; else return a minimal BSGS
if it fails in finding one, it returns None
TODO: use baseswap in the case in which if it fails in finding a
minimal BSGS
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import get_minimal_bsgs
>>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0, 1)(4, 5), Permutation(5)(0, 2)(1, 3)]))
>>> get_minimal_bsgs(*riemann_bsgs1)
([0, 2], [(0 1)(4 5), (5)(0 2)(1 3), (2 3)(4 5)])
"""
G = PermutationGroup(gens)
base, gens = G.schreier_sims_incremental()
if not _is_minimal_bsgs(base, gens):
return None
return base, gens
def tensor_gens(base, gens, list_free_indices, sym=0):
"""
Returns size, res_base, res_gens BSGS for n tensors of the
same type.
Explanation
===========
base, gens BSGS for tensors of this type
list_free_indices list of the slots occupied by fixed indices
for each of the tensors
sym symmetry under commutation of two tensors
sym None no symmetry
sym 0 commuting
sym 1 anticommuting
Examples
========
>>> from sympy.combinatorics.tensor_can import tensor_gens, get_symmetric_group_sgs
two symmetric tensors with 3 indices without free indices
>>> base, gens = get_symmetric_group_sgs(3)
>>> tensor_gens(base, gens, [[], []])
(8, [0, 1, 3, 4], [(7)(0 1), (7)(1 2), (7)(3 4), (7)(4 5), (7)(0 3)(1 4)(2 5)])
two symmetric tensors with 3 indices with free indices in slot 1 and 0
>>> tensor_gens(base, gens, [[1], [0]])
(8, [0, 4], [(7)(0 2), (7)(4 5)])
four symmetric tensors with 3 indices, two of which with free indices
"""
def _get_bsgs(G, base, gens, free_indices):
"""
return the BSGS for G.pointwise_stabilizer(free_indices)
"""
if not free_indices:
return base[:], gens[:]
else:
H = G.pointwise_stabilizer(free_indices)
base, sgs = H.schreier_sims_incremental()
return base, sgs
# if not base there is no slot symmetry for the component tensors
# if list_free_indices.count([]) < 2 there is no commutation symmetry
# so there is no resulting slot symmetry
if not base and list_free_indices.count([]) < 2:
n = len(list_free_indices)
size = gens[0].size
size = n * (gens[0].size - 2) + 2
return size, [], [_af_new(list(range(size)))]
# if any(list_free_indices) one needs to compute the pointwise
# stabilizer, so G is needed
if any(list_free_indices):
G = PermutationGroup(gens)
else:
G = None
# no_free list of lists of indices for component tensors without fixed
# indices
no_free = []
size = gens[0].size
id_af = list(range(size))
num_indices = size - 2
if not list_free_indices[0]:
no_free.append(list(range(num_indices)))
res_base, res_gens = _get_bsgs(G, base, gens, list_free_indices[0])
for i in range(1, len(list_free_indices)):
base1, gens1 = _get_bsgs(G, base, gens, list_free_indices[i])
res_base, res_gens = bsgs_direct_product(res_base, res_gens,
base1, gens1, 1)
if not list_free_indices[i]:
no_free.append(list(range(size - 2, size - 2 + num_indices)))
size += num_indices
nr = size - 2
res_gens = [h for h in res_gens if h._array_form != id_af]
# if sym there are no commuting tensors stop here
if sym is None or not no_free:
if not res_gens:
res_gens = [_af_new(id_af)]
return size, res_base, res_gens
# if the component tensors have moinimal BSGS, so is their direct
# product P; the slot symmetry group is S = P*C, where C is the group
# to (anti)commute the component tensors with no free indices
# a stabilizer has the property S_i = P_i*C_i;
# the BSGS of P*C has SGS_P + SGS_C and the base is
# the ordered union of the bases of P and C.
# If P has minimal BSGS, so has S with this base.
base_comm = []
for i in range(len(no_free) - 1):
ind1 = no_free[i]
ind2 = no_free[i + 1]
a = list(range(ind1[0]))
a.extend(ind2)
a.extend(ind1)
base_comm.append(ind1[0])
a.extend(list(range(ind2[-1] + 1, nr)))
if sym == 0:
a.extend([nr, nr + 1])
else:
a.extend([nr + 1, nr])
res_gens.append(_af_new(a))
res_base = list(res_base)
# each base is ordered; order the union of the two bases
for i in base_comm:
if i not in res_base:
res_base.append(i)
res_base.sort()
if not res_gens:
res_gens = [_af_new(id_af)]
return size, res_base, res_gens
def gens_products(*v):
"""
Returns size, res_base, res_gens BSGS for n tensors of different types.
Explanation
===========
v is a sequence of (base_i, gens_i, free_i, sym_i)
where
base_i, gens_i BSGS of tensor of type `i`
free_i list of the fixed slots for each of the tensors
of type `i`; if there are `n_i` tensors of type `i`
and none of them have fixed slots, `free = [[]]*n_i`
sym 0 (1) if the tensors of type `i` (anti)commute among themselves
Examples
========
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, gens_products
>>> base, gens = get_symmetric_group_sgs(2)
>>> gens_products((base, gens, [[], []], 0))
(6, [0, 2], [(5)(0 1), (5)(2 3), (5)(0 2)(1 3)])
>>> gens_products((base, gens, [[1], []], 0))
(6, [2], [(5)(2 3)])
"""
res_size, res_base, res_gens = tensor_gens(*v[0])
for i in range(1, len(v)):
size, base, gens = tensor_gens(*v[i])
res_base, res_gens = bsgs_direct_product(res_base, res_gens, base,
gens, 1)
res_size = res_gens[0].size
id_af = list(range(res_size))
res_gens = [h for h in res_gens if h != id_af]
if not res_gens:
res_gens = [id_af]
return res_size, res_base, res_gens
|
7980f821d4c2d10739349db72691d4e9292c8317bb12a550f1a34a0466e180f3 | from sympy import isprime
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.printing.defaults import DefaultPrinting
from sympy.combinatorics.free_groups import free_group
class PolycyclicGroup(DefaultPrinting):
is_group = True
is_solvable = True
def __init__(self, pc_sequence, pc_series, relative_order, collector=None):
"""
Parameters
==========
pc_sequence : list
A sequence of elements whose classes generate the cyclic factor
groups of pc_series.
pc_series : list
A subnormal sequence of subgroups where each factor group is cyclic.
relative_order : list
The orders of factor groups of pc_series.
collector : Collector
By default, it is None. Collector class provides the
polycyclic presentation with various other functionalities.
"""
self.pcgs = pc_sequence
self.pc_series = pc_series
self.relative_order = relative_order
self.collector = Collector(self.pcgs, pc_series, relative_order) if not collector else collector
def is_prime_order(self):
return all(isprime(order) for order in self.relative_order)
def length(self):
return len(self.pcgs)
class Collector(DefaultPrinting):
"""
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 8.1.3
"""
def __init__(self, pcgs, pc_series, relative_order, free_group_=None, pc_presentation=None):
"""
Most of the parameters for the Collector class are the same as for PolycyclicGroup.
Others are described below.
Parameters
==========
free_group_ : tuple
free_group_ provides the mapping of polycyclic generating
sequence with the free group elements.
pc_presentation : dict
Provides the presentation of polycyclic groups with the
help of power and conjugate relators.
See Also
========
PolycyclicGroup
"""
self.pcgs = pcgs
self.pc_series = pc_series
self.relative_order = relative_order
self.free_group = free_group('x:{}'.format(len(pcgs)))[0] if not free_group_ else free_group_
self.index = {s: i for i, s in enumerate(self.free_group.symbols)}
self.pc_presentation = self.pc_relators()
def minimal_uncollected_subword(self, word):
r"""
Returns the minimal uncollected subwords.
Explanation
===========
A word ``v`` defined on generators in ``X`` is a minimal
uncollected subword of the word ``w`` if ``v`` is a subword
of ``w`` and it has one of the following form
* `v = {x_{i+1}}^{a_j}x_i`
* `v = {x_{i+1}}^{a_j}{x_i}^{-1}`
* `v = {x_i}^{a_j}`
for `a_j` not in `\{1, \ldots, s-1\}`. Where, ``s`` is the power
exponent of the corresponding generator.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> G = SymmetricGroup(4)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> F, x1, x2 = free_group("x1, x2")
>>> word = x2**2*x1**7
>>> collector.minimal_uncollected_subword(word)
((x2, 2),)
"""
# To handle the case word = <identity>
if not word:
return None
array = word.array_form
re = self.relative_order
index = self.index
for i in range(len(array)):
s1, e1 = array[i]
if re[index[s1]] and (e1 < 0 or e1 > re[index[s1]]-1):
return ((s1, e1), )
for i in range(len(array)-1):
s1, e1 = array[i]
s2, e2 = array[i+1]
if index[s1] > index[s2]:
e = 1 if e2 > 0 else -1
return ((s1, e1), (s2, e))
return None
def relations(self):
"""
Separates the given relators of pc presentation in power and
conjugate relations.
Returns
=======
(power_rel, conj_rel)
Separates pc presentation into power and conjugate relations.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> G = SymmetricGroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> power_rel, conj_rel = collector.relations()
>>> power_rel
{x0**2: (), x1**3: ()}
>>> conj_rel
{x0**-1*x1*x0: x1**2}
See Also
========
pc_relators
"""
power_relators = {}
conjugate_relators = {}
for key, value in self.pc_presentation.items():
if len(key.array_form) == 1:
power_relators[key] = value
else:
conjugate_relators[key] = value
return power_relators, conjugate_relators
def subword_index(self, word, w):
"""
Returns the start and ending index of a given
subword in a word.
Parameters
==========
word : FreeGroupElement
word defined on free group elements for a
polycyclic group.
w : FreeGroupElement
subword of a given word, whose starting and
ending index to be computed.
Returns
=======
(i, j)
A tuple containing starting and ending index of ``w``
in the given word.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> G = SymmetricGroup(4)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> F, x1, x2 = free_group("x1, x2")
>>> word = x2**2*x1**7
>>> w = x2**2*x1
>>> collector.subword_index(word, w)
(0, 3)
>>> w = x1**7
>>> collector.subword_index(word, w)
(2, 9)
"""
low = -1
high = -1
for i in range(len(word)-len(w)+1):
if word.subword(i, i+len(w)) == w:
low = i
high = i+len(w)
break
if low == high == -1:
return -1, -1
return low, high
def map_relation(self, w):
"""
Return a conjugate relation.
Explanation
===========
Given a word formed by two free group elements, the
corresponding conjugate relation with those free
group elements is formed and mapped with the collected
word in the polycyclic presentation.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> G = SymmetricGroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> F, x0, x1 = free_group("x0, x1")
>>> w = x1*x0
>>> collector.map_relation(w)
x1**2
See Also
========
pc_presentation
"""
array = w.array_form
s1 = array[0][0]
s2 = array[1][0]
key = ((s2, -1), (s1, 1), (s2, 1))
key = self.free_group.dtype(key)
return self.pc_presentation[key]
def collected_word(self, word):
r"""
Return the collected form of a word.
Explanation
===========
A word ``w`` is called collected, if `w = {x_{i_1}}^{a_1} * \ldots *
{x_{i_r}}^{a_r}` with `i_1 < i_2< \ldots < i_r` and `a_j` is in
`\{1, \ldots, {s_j}-1\}`.
Otherwise w is uncollected.
Parameters
==========
word : FreeGroupElement
An uncollected word.
Returns
=======
word
A collected word of form `w = {x_{i_1}}^{a_1}, \ldots,
{x_{i_r}}^{a_r}` with `i_1, i_2, \ldots, i_r` and `a_j \in
\{1, \ldots, {s_j}-1\}`.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> G = SymmetricGroup(4)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> F, x0, x1, x2, x3 = free_group("x0, x1, x2, x3")
>>> word = x3*x2*x1*x0
>>> collected_word = collector.collected_word(word)
>>> free_to_perm = {}
>>> free_group = collector.free_group
>>> for sym, gen in zip(free_group.symbols, collector.pcgs):
... free_to_perm[sym] = gen
>>> G1 = PermutationGroup()
>>> for w in word:
... sym = w[0]
... perm = free_to_perm[sym]
... G1 = PermutationGroup([perm] + G1.generators)
>>> G2 = PermutationGroup()
>>> for w in collected_word:
... sym = w[0]
... perm = free_to_perm[sym]
... G2 = PermutationGroup([perm] + G2.generators)
>>> G1 == G2
True
See Also
========
minimal_uncollected_subword
"""
free_group = self.free_group
while True:
w = self.minimal_uncollected_subword(word)
if not w:
break
low, high = self.subword_index(word, free_group.dtype(w))
if low == -1:
continue
s1, e1 = w[0]
if len(w) == 1:
re = self.relative_order[self.index[s1]]
q = e1 // re
r = e1-q*re
key = ((w[0][0], re), )
key = free_group.dtype(key)
if self.pc_presentation[key]:
presentation = self.pc_presentation[key].array_form
sym, exp = presentation[0]
word_ = ((w[0][0], r), (sym, q*exp))
word_ = free_group.dtype(word_)
else:
if r != 0:
word_ = ((w[0][0], r), )
word_ = free_group.dtype(word_)
else:
word_ = None
word = word.eliminate_word(free_group.dtype(w), word_)
if len(w) == 2 and w[1][1] > 0:
s2, e2 = w[1]
s2 = ((s2, 1), )
s2 = free_group.dtype(s2)
word_ = self.map_relation(free_group.dtype(w))
word_ = s2*word_**e1
word_ = free_group.dtype(word_)
word = word.substituted_word(low, high, word_)
elif len(w) == 2 and w[1][1] < 0:
s2, e2 = w[1]
s2 = ((s2, 1), )
s2 = free_group.dtype(s2)
word_ = self.map_relation(free_group.dtype(w))
word_ = s2**-1*word_**e1
word_ = free_group.dtype(word_)
word = word.substituted_word(low, high, word_)
return word
def pc_relators(self):
r"""
Return the polycyclic presentation.
Explanation
===========
There are two types of relations used in polycyclic
presentation.
* ``Power relations`` : Power relators are of the form `x_i^{re_i}`,
where `i \in \{0, \ldots, \mathrm{len(pcgs)}\}`, ``x`` represents polycyclic
generator and ``re`` is the corresponding relative order.
* ``Conjugate relations`` : Conjugate relators are of the form `x_j^-1x_ix_j`,
where `j < i \in \{0, \ldots, \mathrm{len(pcgs)}\}`.
Returns
=======
A dictionary with power and conjugate relations as key and
their collected form as corresponding values.
Notes
=====
Identity Permutation is mapped with empty ``()``.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> S = SymmetricGroup(49).sylow_subgroup(7)
>>> der = S.derived_series()
>>> G = der[len(der)-2]
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> pcgs = PcGroup.pcgs
>>> len(pcgs)
6
>>> free_group = collector.free_group
>>> pc_resentation = collector.pc_presentation
>>> free_to_perm = {}
>>> for s, g in zip(free_group.symbols, pcgs):
... free_to_perm[s] = g
>>> for k, v in pc_resentation.items():
... k_array = k.array_form
... if v != ():
... v_array = v.array_form
... lhs = Permutation()
... for gen in k_array:
... s = gen[0]
... e = gen[1]
... lhs = lhs*free_to_perm[s]**e
... if v == ():
... assert lhs.is_identity
... continue
... rhs = Permutation()
... for gen in v_array:
... s = gen[0]
... e = gen[1]
... rhs = rhs*free_to_perm[s]**e
... assert lhs == rhs
"""
free_group = self.free_group
rel_order = self.relative_order
pc_relators = {}
perm_to_free = {}
pcgs = self.pcgs
for gen, s in zip(pcgs, free_group.generators):
perm_to_free[gen**-1] = s**-1
perm_to_free[gen] = s
pcgs = pcgs[::-1]
series = self.pc_series[::-1]
rel_order = rel_order[::-1]
collected_gens = []
for i, gen in enumerate(pcgs):
re = rel_order[i]
relation = perm_to_free[gen]**re
G = series[i]
l = G.generator_product(gen**re, original = True)
l.reverse()
word = free_group.identity
for g in l:
word = word*perm_to_free[g]
word = self.collected_word(word)
pc_relators[relation] = word if word else ()
self.pc_presentation = pc_relators
collected_gens.append(gen)
if len(collected_gens) > 1:
conj = collected_gens[len(collected_gens)-1]
conjugator = perm_to_free[conj]
for j in range(len(collected_gens)-1):
conjugated = perm_to_free[collected_gens[j]]
relation = conjugator**-1*conjugated*conjugator
gens = conj**-1*collected_gens[j]*conj
l = G.generator_product(gens, original = True)
l.reverse()
word = free_group.identity
for g in l:
word = word*perm_to_free[g]
word = self.collected_word(word)
pc_relators[relation] = word if word else ()
self.pc_presentation = pc_relators
return pc_relators
def exponent_vector(self, element):
r"""
Return the exponent vector of length equal to the
length of polycyclic generating sequence.
Explanation
===========
For a given generator/element ``g`` of the polycyclic group,
it can be represented as `g = {x_1}^{e_1}, \ldots, {x_n}^{e_n}`,
where `x_i` represents polycyclic generators and ``n`` is
the number of generators in the free_group equal to the length
of pcgs.
Parameters
==========
element : Permutation
Generator of a polycyclic group.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> G = SymmetricGroup(4)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> pcgs = PcGroup.pcgs
>>> collector.exponent_vector(G[0])
[1, 0, 0, 0]
>>> exp = collector.exponent_vector(G[1])
>>> g = Permutation()
>>> for i in range(len(exp)):
... g = g*pcgs[i]**exp[i] if exp[i] else g
>>> assert g == G[1]
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 8.1.1, Definition 8.4
"""
free_group = self.free_group
G = PermutationGroup()
for g in self.pcgs:
G = PermutationGroup([g] + G.generators)
gens = G.generator_product(element, original = True)
gens.reverse()
perm_to_free = {}
for sym, g in zip(free_group.generators, self.pcgs):
perm_to_free[g**-1] = sym**-1
perm_to_free[g] = sym
w = free_group.identity
for g in gens:
w = w*perm_to_free[g]
word = self.collected_word(w)
index = self.index
exp_vector = [0]*len(free_group)
word = word.array_form
for t in word:
exp_vector[index[t[0]]] = t[1]
return exp_vector
def depth(self, element):
r"""
Return the depth of a given element.
Explanation
===========
The depth of a given element ``g`` is defined by
`\mathrm{dep}[g] = i` if `e_1 = e_2 = \ldots = e_{i-1} = 0`
and `e_i != 0`, where ``e`` represents the exponent-vector.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> G = SymmetricGroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> collector.depth(G[0])
2
>>> collector.depth(G[1])
1
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 8.1.1, Definition 8.5
"""
exp_vector = self.exponent_vector(element)
return next((i+1 for i, x in enumerate(exp_vector) if x), len(self.pcgs)+1)
def leading_exponent(self, element):
r"""
Return the leading non-zero exponent.
Explanation
===========
The leading exponent for a given element `g` is defined
by `\mathrm{leading\_exponent}[g]` `= e_i`, if `\mathrm{depth}[g] = i`.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> G = SymmetricGroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> collector.leading_exponent(G[1])
1
"""
exp_vector = self.exponent_vector(element)
depth = self.depth(element)
if depth != len(self.pcgs)+1:
return exp_vector[depth-1]
return None
def _sift(self, z, g):
h = g
d = self.depth(h)
while d < len(self.pcgs) and z[d-1] != 1:
k = z[d-1]
e = self.leading_exponent(h)*(self.leading_exponent(k))**-1
e = e % self.relative_order[d-1]
h = k**-e*h
d = self.depth(h)
return h
def induced_pcgs(self, gens):
"""
Parameters
==========
gens : list
A list of generators on which polycyclic subgroup
is to be defined.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(8)
>>> G = S.sylow_subgroup(2)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> gens = [G[0], G[1]]
>>> ipcgs = collector.induced_pcgs(gens)
>>> [gen.order() for gen in ipcgs]
[2, 2, 2]
>>> G = S.sylow_subgroup(3)
>>> PcGroup = G.polycyclic_group()
>>> collector = PcGroup.collector
>>> gens = [G[0], G[1]]
>>> ipcgs = collector.induced_pcgs(gens)
>>> [gen.order() for gen in ipcgs]
[3]
"""
z = [1]*len(self.pcgs)
G = gens
while G:
g = G.pop(0)
h = self._sift(z, g)
d = self.depth(h)
if d < len(self.pcgs):
for gen in z:
if gen != 1:
G.append(h**-1*gen**-1*h*gen)
z[d-1] = h;
z = [gen for gen in z if gen != 1]
return z
def constructive_membership_test(self, ipcgs, g):
"""
Return the exponent vector for induced pcgs.
"""
e = [0]*len(ipcgs)
h = g
d = self.depth(h)
for i, gen in enumerate(ipcgs):
while self.depth(gen) == d:
f = self.leading_exponent(h)*self.leading_exponent(gen)
f = f % self.relative_order[d-1]
h = gen**(-f)*h
e[i] = f
d = self.depth(h)
if h == 1:
return e
return False
|
b356a6b760fffb62e9f4a9615d26e6716ea5c1c47add792a123d95aba509a5d6 | from sympy.combinatorics import Permutation as Perm
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.core import Basic, Tuple
from sympy.core.compatibility import as_int
from sympy.sets import FiniteSet
from sympy.utilities.iterables import (minlex, unflatten, flatten)
rmul = Perm.rmul
class Polyhedron(Basic):
"""
Represents the polyhedral symmetry group (PSG).
Explanation
===========
The PSG is one of the symmetry groups of the Platonic solids.
There are three polyhedral groups: the tetrahedral group
of order 12, the octahedral group of order 24, and the
icosahedral group of order 60.
All doctests have been given in the docstring of the
constructor of the object.
References
==========
.. [1] http://mathworld.wolfram.com/PolyhedralGroup.html
"""
_edges = None
def __new__(cls, corners, faces=[], pgroup=[]):
"""
The constructor of the Polyhedron group object.
Explanation
===========
It takes up to three parameters: the corners, faces, and
allowed transformations.
The corners/vertices are entered as a list of arbitrary
expressions that are used to identify each vertex.
The faces are entered as a list of tuples of indices; a tuple
of indices identifies the vertices which define the face. They
should be entered in a cw or ccw order; they will be standardized
by reversal and rotation to be give the lowest lexical ordering.
If no faces are given then no edges will be computed.
>>> from sympy.combinatorics.polyhedron import Polyhedron
>>> Polyhedron(list('abc'), [(1, 2, 0)]).faces
FiniteSet((0, 1, 2))
>>> Polyhedron(list('abc'), [(1, 0, 2)]).faces
FiniteSet((0, 1, 2))
The allowed transformations are entered as allowable permutations
of the vertices for the polyhedron. Instance of Permutations
(as with faces) should refer to the supplied vertices by index.
These permutation are stored as a PermutationGroup.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> from sympy.abc import w, x, y, z
>>> init_printing(pretty_print=False, perm_cyclic=False)
Here we construct the Polyhedron object for a tetrahedron.
>>> corners = [w, x, y, z]
>>> faces = [(0, 1, 2), (0, 2, 3), (0, 3, 1), (1, 2, 3)]
Next, allowed transformations of the polyhedron must be given. This
is given as permutations of vertices.
Although the vertices of a tetrahedron can be numbered in 24 (4!)
different ways, there are only 12 different orientations for a
physical tetrahedron. The following permutations, applied once or
twice, will generate all 12 of the orientations. (The identity
permutation, Permutation(range(4)), is not included since it does
not change the orientation of the vertices.)
>>> pgroup = [Permutation([[0, 1, 2], [3]]), \
Permutation([[0, 1, 3], [2]]), \
Permutation([[0, 2, 3], [1]]), \
Permutation([[1, 2, 3], [0]]), \
Permutation([[0, 1], [2, 3]]), \
Permutation([[0, 2], [1, 3]]), \
Permutation([[0, 3], [1, 2]])]
The Polyhedron is now constructed and demonstrated:
>>> tetra = Polyhedron(corners, faces, pgroup)
>>> tetra.size
4
>>> tetra.edges
FiniteSet((0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3))
>>> tetra.corners
(w, x, y, z)
It can be rotated with an arbitrary permutation of vertices, e.g.
the following permutation is not in the pgroup:
>>> tetra.rotate(Permutation([0, 1, 3, 2]))
>>> tetra.corners
(w, x, z, y)
An allowed permutation of the vertices can be constructed by
repeatedly applying permutations from the pgroup to the vertices.
Here is a demonstration that applying p and p**2 for every p in
pgroup generates all the orientations of a tetrahedron and no others:
>>> all = ( (w, x, y, z), \
(x, y, w, z), \
(y, w, x, z), \
(w, z, x, y), \
(z, w, y, x), \
(w, y, z, x), \
(y, z, w, x), \
(x, z, y, w), \
(z, y, x, w), \
(y, x, z, w), \
(x, w, z, y), \
(z, x, w, y) )
>>> got = []
>>> for p in (pgroup + [p**2 for p in pgroup]):
... h = Polyhedron(corners)
... h.rotate(p)
... got.append(h.corners)
...
>>> set(got) == set(all)
True
The make_perm method of a PermutationGroup will randomly pick
permutations, multiply them together, and return the permutation that
can be applied to the polyhedron to give the orientation produced
by those individual permutations.
Here, 3 permutations are used:
>>> tetra.pgroup.make_perm(3) # doctest: +SKIP
Permutation([0, 3, 1, 2])
To select the permutations that should be used, supply a list
of indices to the permutations in pgroup in the order they should
be applied:
>>> use = [0, 0, 2]
>>> p002 = tetra.pgroup.make_perm(3, use)
>>> p002
Permutation([1, 0, 3, 2])
Apply them one at a time:
>>> tetra.reset()
>>> for i in use:
... tetra.rotate(pgroup[i])
...
>>> tetra.vertices
(x, w, z, y)
>>> sequentially = tetra.vertices
Apply the composite permutation:
>>> tetra.reset()
>>> tetra.rotate(p002)
>>> tetra.corners
(x, w, z, y)
>>> tetra.corners in all and tetra.corners == sequentially
True
Notes
=====
Defining permutation groups
---------------------------
It is not necessary to enter any permutations, nor is necessary to
enter a complete set of transformations. In fact, for a polyhedron,
all configurations can be constructed from just two permutations.
For example, the orientations of a tetrahedron can be generated from
an axis passing through a vertex and face and another axis passing
through a different vertex or from an axis passing through the
midpoints of two edges opposite of each other.
For simplicity of presentation, consider a square --
not a cube -- with vertices 1, 2, 3, and 4:
1-----2 We could think of axes of rotation being:
| | 1) through the face
| | 2) from midpoint 1-2 to 3-4 or 1-3 to 2-4
3-----4 3) lines 1-4 or 2-3
To determine how to write the permutations, imagine 4 cameras,
one at each corner, labeled A-D:
A B A B
1-----2 1-----3 vertex index:
| | | | 1 0
| | | | 2 1
3-----4 2-----4 3 2
C D C D 4 3
original after rotation
along 1-4
A diagonal and a face axis will be chosen for the "permutation group"
from which any orientation can be constructed.
>>> pgroup = []
Imagine a clockwise rotation when viewing 1-4 from camera A. The new
orientation is (in camera-order): 1, 3, 2, 4 so the permutation is
given using the *indices* of the vertices as:
>>> pgroup.append(Permutation((0, 2, 1, 3)))
Now imagine rotating clockwise when looking down an axis entering the
center of the square as viewed. The new camera-order would be
3, 1, 4, 2 so the permutation is (using indices):
>>> pgroup.append(Permutation((2, 0, 3, 1)))
The square can now be constructed:
** use real-world labels for the vertices, entering them in
camera order
** for the faces we use zero-based indices of the vertices
in *edge-order* as the face is traversed; neither the
direction nor the starting point matter -- the faces are
only used to define edges (if so desired).
>>> square = Polyhedron((1, 2, 3, 4), [(0, 1, 3, 2)], pgroup)
To rotate the square with a single permutation we can do:
>>> square.rotate(square.pgroup[0])
>>> square.corners
(1, 3, 2, 4)
To use more than one permutation (or to use one permutation more
than once) it is more convenient to use the make_perm method:
>>> p011 = square.pgroup.make_perm([0, 1, 1]) # diag flip + 2 rotations
>>> square.reset() # return to initial orientation
>>> square.rotate(p011)
>>> square.corners
(4, 2, 3, 1)
Thinking outside the box
------------------------
Although the Polyhedron object has a direct physical meaning, it
actually has broader application. In the most general sense it is
just a decorated PermutationGroup, allowing one to connect the
permutations to something physical. For example, a Rubik's cube is
not a proper polyhedron, but the Polyhedron class can be used to
represent it in a way that helps to visualize the Rubik's cube.
>>> from sympy.utilities.iterables import flatten, unflatten
>>> from sympy import symbols
>>> from sympy.combinatorics import RubikGroup
>>> facelets = flatten([symbols(s+'1:5') for s in 'UFRBLD'])
>>> def show():
... pairs = unflatten(r2.corners, 2)
... print(pairs[::2])
... print(pairs[1::2])
...
>>> r2 = Polyhedron(facelets, pgroup=RubikGroup(2))
>>> show()
[(U1, U2), (F1, F2), (R1, R2), (B1, B2), (L1, L2), (D1, D2)]
[(U3, U4), (F3, F4), (R3, R4), (B3, B4), (L3, L4), (D3, D4)]
>>> r2.rotate(0) # cw rotation of F
>>> show()
[(U1, U2), (F3, F1), (U3, R2), (B1, B2), (L1, D1), (R3, R1)]
[(L4, L2), (F4, F2), (U4, R4), (B3, B4), (L3, D2), (D3, D4)]
Predefined Polyhedra
====================
For convenience, the vertices and faces are defined for the following
standard solids along with a permutation group for transformations.
When the polyhedron is oriented as indicated below, the vertices in
a given horizontal plane are numbered in ccw direction, starting from
the vertex that will give the lowest indices in a given face. (In the
net of the vertices, indices preceded by "-" indicate replication of
the lhs index in the net.)
tetrahedron, tetrahedron_faces
------------------------------
4 vertices (vertex up) net:
0 0-0
1 2 3-1
4 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 1) (1, 2, 3)
cube, cube_faces
----------------
8 vertices (face up) net:
0 1 2 3-0
4 5 6 7-4
6 faces:
(0, 1, 2, 3)
(0, 1, 5, 4) (1, 2, 6, 5) (2, 3, 7, 6) (0, 3, 7, 4)
(4, 5, 6, 7)
octahedron, octahedron_faces
----------------------------
6 vertices (vertex up) net:
0 0 0-0
1 2 3 4-1
5 5 5-5
8 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 4) (0, 1, 4)
(1, 2, 5) (2, 3, 5) (3, 4, 5) (1, 4, 5)
dodecahedron, dodecahedron_faces
--------------------------------
20 vertices (vertex up) net:
0 1 2 3 4 -0
5 6 7 8 9 -5
14 10 11 12 13-14
15 16 17 18 19-15
12 faces:
(0, 1, 2, 3, 4) (0, 1, 6, 10, 5) (1, 2, 7, 11, 6)
(2, 3, 8, 12, 7) (3, 4, 9, 13, 8) (0, 4, 9, 14, 5)
(5, 10, 16, 15, 14) (6, 10, 16, 17, 11) (7, 11, 17, 18, 12)
(8, 12, 18, 19, 13) (9, 13, 19, 15, 14)(15, 16, 17, 18, 19)
icosahedron, icosahedron_faces
------------------------------
12 vertices (face up) net:
0 0 0 0 -0
1 2 3 4 5 -1
6 7 8 9 10 -6
11 11 11 11 -11
20 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 4)
(0, 4, 5) (0, 1, 5) (1, 2, 6)
(2, 3, 7) (3, 4, 8) (4, 5, 9)
(1, 5, 10) (2, 6, 7) (3, 7, 8)
(4, 8, 9) (5, 9, 10) (1, 6, 10)
(6, 7, 11) (7, 8, 11) (8, 9, 11)
(9, 10, 11) (6, 10, 11)
>>> from sympy.combinatorics.polyhedron import cube
>>> cube.edges
FiniteSet((0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (2, 3), (2, 6), (3, 7), (4, 5), (4, 7), (5, 6), (6, 7))
If you want to use letters or other names for the corners you
can still use the pre-calculated faces:
>>> corners = list('abcdefgh')
>>> Polyhedron(corners, cube.faces).corners
(a, b, c, d, e, f, g, h)
References
==========
.. [1] www.ocf.berkeley.edu/~wwu/articles/platonicsolids.pdf
"""
faces = [minlex(f, directed=False, is_set=True) for f in faces]
corners, faces, pgroup = args = \
[Tuple(*a) for a in (corners, faces, pgroup)]
obj = Basic.__new__(cls, *args)
obj._corners = tuple(corners) # in order given
obj._faces = FiniteSet(*faces)
if pgroup and pgroup[0].size != len(corners):
raise ValueError("Permutation size unequal to number of corners.")
# use the identity permutation if none are given
obj._pgroup = PermutationGroup(
pgroup or [Perm(range(len(corners)))] )
return obj
@property
def corners(self):
"""
Get the corners of the Polyhedron.
The method ``vertices`` is an alias for ``corners``.
Examples
========
>>> from sympy.combinatorics import Polyhedron
>>> from sympy.abc import a, b, c, d
>>> p = Polyhedron(list('abcd'))
>>> p.corners == p.vertices == (a, b, c, d)
True
See Also
========
array_form, cyclic_form
"""
return self._corners
vertices = corners
@property
def array_form(self):
"""Return the indices of the corners.
The indices are given relative to the original position of corners.
Examples
========
>>> from sympy.combinatorics.polyhedron import tetrahedron
>>> tetrahedron = tetrahedron.copy()
>>> tetrahedron.array_form
[0, 1, 2, 3]
>>> tetrahedron.rotate(0)
>>> tetrahedron.array_form
[0, 2, 3, 1]
>>> tetrahedron.pgroup[0].array_form
[0, 2, 3, 1]
See Also
========
corners, cyclic_form
"""
corners = list(self.args[0])
return [corners.index(c) for c in self.corners]
@property
def cyclic_form(self):
"""Return the indices of the corners in cyclic notation.
The indices are given relative to the original position of corners.
See Also
========
corners, array_form
"""
return Perm._af_new(self.array_form).cyclic_form
@property
def size(self):
"""
Get the number of corners of the Polyhedron.
"""
return len(self._corners)
@property
def faces(self):
"""
Get the faces of the Polyhedron.
"""
return self._faces
@property
def pgroup(self):
"""
Get the permutations of the Polyhedron.
"""
return self._pgroup
@property
def edges(self):
"""
Given the faces of the polyhedra we can get the edges.
Examples
========
>>> from sympy.combinatorics import Polyhedron
>>> from sympy.abc import a, b, c
>>> corners = (a, b, c)
>>> faces = [(0, 1, 2)]
>>> Polyhedron(corners, faces).edges
FiniteSet((0, 1), (0, 2), (1, 2))
"""
if self._edges is None:
output = set()
for face in self.faces:
for i in range(len(face)):
edge = tuple(sorted([face[i], face[i - 1]]))
output.add(edge)
self._edges = FiniteSet(*output)
return self._edges
def rotate(self, perm):
"""
Apply a permutation to the polyhedron *in place*. The permutation
may be given as a Permutation instance or an integer indicating
which permutation from pgroup of the Polyhedron should be
applied.
This is an operation that is analogous to rotation about
an axis by a fixed increment.
Notes
=====
When a Permutation is applied, no check is done to see if that
is a valid permutation for the Polyhedron. For example, a cube
could be given a permutation which effectively swaps only 2
vertices. A valid permutation (that rotates the object in a
physical way) will be obtained if one only uses
permutations from the ``pgroup`` of the Polyhedron. On the other
hand, allowing arbitrary rotations (applications of permutations)
gives a way to follow named elements rather than indices since
Polyhedron allows vertices to be named while Permutation works
only with indices.
Examples
========
>>> from sympy.combinatorics import Polyhedron, Permutation
>>> from sympy.combinatorics.polyhedron import cube
>>> cube = cube.copy()
>>> cube.corners
(0, 1, 2, 3, 4, 5, 6, 7)
>>> cube.rotate(0)
>>> cube.corners
(1, 2, 3, 0, 5, 6, 7, 4)
A non-physical "rotation" that is not prohibited by this method:
>>> cube.reset()
>>> cube.rotate(Permutation([[1, 2]], size=8))
>>> cube.corners
(0, 2, 1, 3, 4, 5, 6, 7)
Polyhedron can be used to follow elements of set that are
identified by letters instead of integers:
>>> shadow = h5 = Polyhedron(list('abcde'))
>>> p = Permutation([3, 0, 1, 2, 4])
>>> h5.rotate(p)
>>> h5.corners
(d, a, b, c, e)
>>> _ == shadow.corners
True
>>> copy = h5.copy()
>>> h5.rotate(p)
>>> h5.corners == copy.corners
False
"""
if not isinstance(perm, Perm):
perm = self.pgroup[perm]
# and we know it's valid
else:
if perm.size != self.size:
raise ValueError('Polyhedron and Permutation sizes differ.')
a = perm.array_form
corners = [self.corners[a[i]] for i in range(len(self.corners))]
self._corners = tuple(corners)
def reset(self):
"""Return corners to their original positions.
Examples
========
>>> from sympy.combinatorics.polyhedron import tetrahedron as T
>>> T = T.copy()
>>> T.corners
(0, 1, 2, 3)
>>> T.rotate(0)
>>> T.corners
(0, 2, 3, 1)
>>> T.reset()
>>> T.corners
(0, 1, 2, 3)
"""
self._corners = self.args[0]
def _pgroup_calcs():
"""Return the permutation groups for each of the polyhedra and the face
definitions: tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces, dodecahedron_faces,
icosahedron_faces
Explanation
===========
(This author didn't find and didn't know of a better way to do it though
there likely is such a way.)
Although only 2 permutations are needed for a polyhedron in order to
generate all the possible orientations, a group of permutations is
provided instead. A set of permutations is called a "group" if::
a*b = c (for any pair of permutations in the group, a and b, their
product, c, is in the group)
a*(b*c) = (a*b)*c (for any 3 permutations in the group associativity holds)
there is an identity permutation, I, such that I*a = a*I for all elements
in the group
a*b = I (the inverse of each permutation is also in the group)
None of the polyhedron groups defined follow these definitions of a group.
Instead, they are selected to contain those permutations whose powers
alone will construct all orientations of the polyhedron, i.e. for
permutations ``a``, ``b``, etc... in the group, ``a, a**2, ..., a**o_a``,
``b, b**2, ..., b**o_b``, etc... (where ``o_i`` is the order of
permutation ``i``) generate all permutations of the polyhedron instead of
mixed products like ``a*b``, ``a*b**2``, etc....
Note that for a polyhedron with n vertices, the valid permutations of the
vertices exclude those that do not maintain its faces. e.g. the
permutation BCDE of a square's four corners, ABCD, is a valid
permutation while CBDE is not (because this would twist the square).
Examples
========
The is_group checks for: closure, the presence of the Identity permutation,
and the presence of the inverse for each of the elements in the group. This
confirms that none of the polyhedra are true groups:
>>> from sympy.combinatorics.polyhedron import (
... tetrahedron, cube, octahedron, dodecahedron, icosahedron)
...
>>> polyhedra = (tetrahedron, cube, octahedron, dodecahedron, icosahedron)
>>> [h.pgroup.is_group for h in polyhedra]
...
[True, True, True, True, True]
Although tests in polyhedron's test suite check that powers of the
permutations in the groups generate all permutations of the vertices
of the polyhedron, here we also demonstrate the powers of the given
permutations create a complete group for the tetrahedron:
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> for h in polyhedra[:1]:
... G = h.pgroup
... perms = set()
... for g in G:
... for e in range(g.order()):
... p = tuple((g**e).array_form)
... perms.add(p)
...
... perms = [Permutation(p) for p in perms]
... assert PermutationGroup(perms).is_group
In addition to doing the above, the tests in the suite confirm that the
faces are all present after the application of each permutation.
References
==========
.. [1] http://dogschool.tripod.com/trianglegroup.html
"""
def _pgroup_of_double(polyh, ordered_faces, pgroup):
n = len(ordered_faces[0])
# the vertices of the double which sits inside a give polyhedron
# can be found by tracking the faces of the outer polyhedron.
# A map between face and the vertex of the double is made so that
# after rotation the position of the vertices can be located
fmap = dict(zip(ordered_faces,
range(len(ordered_faces))))
flat_faces = flatten(ordered_faces)
new_pgroup = []
for i, p in enumerate(pgroup):
h = polyh.copy()
h.rotate(p)
c = h.corners
# reorder corners in the order they should appear when
# enumerating the faces
reorder = unflatten([c[j] for j in flat_faces], n)
# make them canonical
reorder = [tuple(map(as_int,
minlex(f, directed=False, is_set=True)))
for f in reorder]
# map face to vertex: the resulting list of vertices are the
# permutation that we seek for the double
new_pgroup.append(Perm([fmap[f] for f in reorder]))
return new_pgroup
tetrahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 1), # upper 3
(1, 2, 3), # bottom
]
# cw from top
#
_t_pgroup = [
Perm([[1, 2, 3], [0]]), # cw from top
Perm([[0, 1, 2], [3]]), # cw from front face
Perm([[0, 3, 2], [1]]), # cw from back right face
Perm([[0, 3, 1], [2]]), # cw from back left face
Perm([[0, 1], [2, 3]]), # through front left edge
Perm([[0, 2], [1, 3]]), # through front right edge
Perm([[0, 3], [1, 2]]), # through back edge
]
tetrahedron = Polyhedron(
range(4),
tetrahedron_faces,
_t_pgroup)
cube_faces = [
(0, 1, 2, 3), # upper
(0, 1, 5, 4), (1, 2, 6, 5), (2, 3, 7, 6), (0, 3, 7, 4), # middle 4
(4, 5, 6, 7), # lower
]
# U, D, F, B, L, R = up, down, front, back, left, right
_c_pgroup = [Perm(p) for p in
[
[1, 2, 3, 0, 5, 6, 7, 4], # cw from top, U
[4, 0, 3, 7, 5, 1, 2, 6], # cw from F face
[4, 5, 1, 0, 7, 6, 2, 3], # cw from R face
[1, 0, 4, 5, 2, 3, 7, 6], # cw through UF edge
[6, 2, 1, 5, 7, 3, 0, 4], # cw through UR edge
[6, 7, 3, 2, 5, 4, 0, 1], # cw through UB edge
[3, 7, 4, 0, 2, 6, 5, 1], # cw through UL edge
[4, 7, 6, 5, 0, 3, 2, 1], # cw through FL edge
[6, 5, 4, 7, 2, 1, 0, 3], # cw through FR edge
[0, 3, 7, 4, 1, 2, 6, 5], # cw through UFL vertex
[5, 1, 0, 4, 6, 2, 3, 7], # cw through UFR vertex
[5, 6, 2, 1, 4, 7, 3, 0], # cw through UBR vertex
[7, 4, 0, 3, 6, 5, 1, 2], # cw through UBL
]]
cube = Polyhedron(
range(8),
cube_faces,
_c_pgroup)
octahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 1, 4), # top 4
(1, 2, 5), (2, 3, 5), (3, 4, 5), (1, 4, 5), # bottom 4
]
octahedron = Polyhedron(
range(6),
octahedron_faces,
_pgroup_of_double(cube, cube_faces, _c_pgroup))
dodecahedron_faces = [
(0, 1, 2, 3, 4), # top
(0, 1, 6, 10, 5), (1, 2, 7, 11, 6), (2, 3, 8, 12, 7), # upper 5
(3, 4, 9, 13, 8), (0, 4, 9, 14, 5),
(5, 10, 16, 15, 14), (6, 10, 16, 17, 11), (7, 11, 17, 18,
12), # lower 5
(8, 12, 18, 19, 13), (9, 13, 19, 15, 14),
(15, 16, 17, 18, 19) # bottom
]
def _string_to_perm(s):
rv = [Perm(range(20))]
p = None
for si in s:
if si not in '01':
count = int(si) - 1
else:
count = 1
if si == '0':
p = _f0
elif si == '1':
p = _f1
rv.extend([p]*count)
return Perm.rmul(*rv)
# top face cw
_f0 = Perm([
1, 2, 3, 4, 0, 6, 7, 8, 9, 5, 11,
12, 13, 14, 10, 16, 17, 18, 19, 15])
# front face cw
_f1 = Perm([
5, 0, 4, 9, 14, 10, 1, 3, 13, 15,
6, 2, 8, 19, 16, 17, 11, 7, 12, 18])
# the strings below, like 0104 are shorthand for F0*F1*F0**4 and are
# the remaining 4 face rotations, 15 edge permutations, and the
# 10 vertex rotations.
_dodeca_pgroup = [_f0, _f1] + [_string_to_perm(s) for s in '''
0104 140 014 0410
010 1403 03104 04103 102
120 1304 01303 021302 03130
0412041 041204103 04120410 041204104 041204102
10 01 1402 0140 04102 0412 1204 1302 0130 03120'''.strip().split()]
dodecahedron = Polyhedron(
range(20),
dodecahedron_faces,
_dodeca_pgroup)
icosahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 5), (0, 1, 5),
(1, 6, 7), (1, 2, 7), (2, 7, 8), (2, 3, 8), (3, 8, 9),
(3, 4, 9), (4, 9, 10), (4, 5, 10), (5, 6, 10), (1, 5, 6),
(6, 7, 11), (7, 8, 11), (8, 9, 11), (9, 10, 11), (6, 10, 11)]
icosahedron = Polyhedron(
range(12),
icosahedron_faces,
_pgroup_of_double(
dodecahedron, dodecahedron_faces, _dodeca_pgroup))
return (tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces,
dodecahedron_faces, icosahedron_faces)
# -----------------------------------------------------------------------
# Standard Polyhedron groups
#
# These are generated using _pgroup_calcs() above. However to save
# import time we encode them explicitly here.
# -----------------------------------------------------------------------
tetrahedron = Polyhedron(
Tuple(0, 1, 2, 3),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 1, 3),
Tuple(1, 2, 3)),
Tuple(
Perm(1, 2, 3),
Perm(3)(0, 1, 2),
Perm(0, 3, 2),
Perm(0, 3, 1),
Perm(0, 1)(2, 3),
Perm(0, 2)(1, 3),
Perm(0, 3)(1, 2)
))
cube = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7),
Tuple(
Tuple(0, 1, 2, 3),
Tuple(0, 1, 5, 4),
Tuple(1, 2, 6, 5),
Tuple(2, 3, 7, 6),
Tuple(0, 3, 7, 4),
Tuple(4, 5, 6, 7)),
Tuple(
Perm(0, 1, 2, 3)(4, 5, 6, 7),
Perm(0, 4, 5, 1)(2, 3, 7, 6),
Perm(0, 4, 7, 3)(1, 5, 6, 2),
Perm(0, 1)(2, 4)(3, 5)(6, 7),
Perm(0, 6)(1, 2)(3, 5)(4, 7),
Perm(0, 6)(1, 7)(2, 3)(4, 5),
Perm(0, 3)(1, 7)(2, 4)(5, 6),
Perm(0, 4)(1, 7)(2, 6)(3, 5),
Perm(0, 6)(1, 5)(2, 4)(3, 7),
Perm(1, 3, 4)(2, 7, 5),
Perm(7)(0, 5, 2)(3, 4, 6),
Perm(0, 5, 7)(1, 6, 3),
Perm(0, 7, 2)(1, 4, 6)))
octahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 3, 4),
Tuple(0, 1, 4),
Tuple(1, 2, 5),
Tuple(2, 3, 5),
Tuple(3, 4, 5),
Tuple(1, 4, 5)),
Tuple(
Perm(5)(1, 2, 3, 4),
Perm(0, 4, 5, 2),
Perm(0, 1, 5, 3),
Perm(0, 1)(2, 4)(3, 5),
Perm(0, 2)(1, 3)(4, 5),
Perm(0, 3)(1, 5)(2, 4),
Perm(0, 4)(1, 3)(2, 5),
Perm(0, 5)(1, 4)(2, 3),
Perm(0, 5)(1, 2)(3, 4),
Perm(0, 4, 1)(2, 3, 5),
Perm(0, 1, 2)(3, 4, 5),
Perm(0, 2, 3)(1, 5, 4),
Perm(0, 4, 3)(1, 5, 2)))
dodecahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
Tuple(
Tuple(0, 1, 2, 3, 4),
Tuple(0, 1, 6, 10, 5),
Tuple(1, 2, 7, 11, 6),
Tuple(2, 3, 8, 12, 7),
Tuple(3, 4, 9, 13, 8),
Tuple(0, 4, 9, 14, 5),
Tuple(5, 10, 16, 15, 14),
Tuple(6, 10, 16, 17, 11),
Tuple(7, 11, 17, 18, 12),
Tuple(8, 12, 18, 19, 13),
Tuple(9, 13, 19, 15, 14),
Tuple(15, 16, 17, 18, 19)),
Tuple(
Perm(0, 1, 2, 3, 4)(5, 6, 7, 8, 9)(10, 11, 12, 13, 14)(15, 16, 17, 18, 19),
Perm(0, 5, 10, 6, 1)(2, 4, 14, 16, 11)(3, 9, 15, 17, 7)(8, 13, 19, 18, 12),
Perm(0, 10, 17, 12, 3)(1, 6, 11, 7, 2)(4, 5, 16, 18, 8)(9, 14, 15, 19, 13),
Perm(0, 6, 17, 19, 9)(1, 11, 18, 13, 4)(2, 7, 12, 8, 3)(5, 10, 16, 15, 14),
Perm(0, 2, 12, 19, 14)(1, 7, 18, 15, 5)(3, 8, 13, 9, 4)(6, 11, 17, 16, 10),
Perm(0, 4, 9, 14, 5)(1, 3, 13, 15, 10)(2, 8, 19, 16, 6)(7, 12, 18, 17, 11),
Perm(0, 1)(2, 5)(3, 10)(4, 6)(7, 14)(8, 16)(9, 11)(12, 15)(13, 17)(18, 19),
Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 12)(8, 10)(9, 17)(13, 16)(14, 18)(15, 19),
Perm(0, 12)(1, 8)(2, 3)(4, 7)(5, 18)(6, 13)(9, 11)(10, 19)(14, 17)(15, 16),
Perm(0, 8)(1, 13)(2, 9)(3, 4)(5, 12)(6, 19)(7, 14)(10, 18)(11, 15)(16, 17),
Perm(0, 4)(1, 9)(2, 14)(3, 5)(6, 13)(7, 15)(8, 10)(11, 19)(12, 16)(17, 18),
Perm(0, 5)(1, 14)(2, 15)(3, 16)(4, 10)(6, 9)(7, 19)(8, 17)(11, 13)(12, 18),
Perm(0, 11)(1, 6)(2, 10)(3, 16)(4, 17)(5, 7)(8, 15)(9, 18)(12, 14)(13, 19),
Perm(0, 18)(1, 12)(2, 7)(3, 11)(4, 17)(5, 19)(6, 8)(9, 16)(10, 13)(14, 15),
Perm(0, 18)(1, 19)(2, 13)(3, 8)(4, 12)(5, 17)(6, 15)(7, 9)(10, 16)(11, 14),
Perm(0, 13)(1, 19)(2, 15)(3, 14)(4, 9)(5, 8)(6, 18)(7, 16)(10, 12)(11, 17),
Perm(0, 16)(1, 15)(2, 19)(3, 18)(4, 17)(5, 10)(6, 14)(7, 13)(8, 12)(9, 11),
Perm(0, 18)(1, 17)(2, 16)(3, 15)(4, 19)(5, 12)(6, 11)(7, 10)(8, 14)(9, 13),
Perm(0, 15)(1, 19)(2, 18)(3, 17)(4, 16)(5, 14)(6, 13)(7, 12)(8, 11)(9, 10),
Perm(0, 17)(1, 16)(2, 15)(3, 19)(4, 18)(5, 11)(6, 10)(7, 14)(8, 13)(9, 12),
Perm(0, 19)(1, 18)(2, 17)(3, 16)(4, 15)(5, 13)(6, 12)(7, 11)(8, 10)(9, 14),
Perm(1, 4, 5)(2, 9, 10)(3, 14, 6)(7, 13, 16)(8, 15, 11)(12, 19, 17),
Perm(19)(0, 6, 2)(3, 5, 11)(4, 10, 7)(8, 14, 17)(9, 16, 12)(13, 15, 18),
Perm(0, 11, 8)(1, 7, 3)(4, 6, 12)(5, 17, 13)(9, 10, 18)(14, 16, 19),
Perm(0, 7, 13)(1, 12, 9)(2, 8, 4)(5, 11, 19)(6, 18, 14)(10, 17, 15),
Perm(0, 3, 9)(1, 8, 14)(2, 13, 5)(6, 12, 15)(7, 19, 10)(11, 18, 16),
Perm(0, 14, 10)(1, 9, 16)(2, 13, 17)(3, 19, 11)(4, 15, 6)(7, 8, 18),
Perm(0, 16, 7)(1, 10, 11)(2, 5, 17)(3, 14, 18)(4, 15, 12)(8, 9, 19),
Perm(0, 16, 13)(1, 17, 8)(2, 11, 12)(3, 6, 18)(4, 10, 19)(5, 15, 9),
Perm(0, 11, 15)(1, 17, 14)(2, 18, 9)(3, 12, 13)(4, 7, 19)(5, 6, 16),
Perm(0, 8, 15)(1, 12, 16)(2, 18, 10)(3, 19, 5)(4, 13, 14)(6, 7, 17)))
icosahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 3, 4),
Tuple(0, 4, 5),
Tuple(0, 1, 5),
Tuple(1, 6, 7),
Tuple(1, 2, 7),
Tuple(2, 7, 8),
Tuple(2, 3, 8),
Tuple(3, 8, 9),
Tuple(3, 4, 9),
Tuple(4, 9, 10),
Tuple(4, 5, 10),
Tuple(5, 6, 10),
Tuple(1, 5, 6),
Tuple(6, 7, 11),
Tuple(7, 8, 11),
Tuple(8, 9, 11),
Tuple(9, 10, 11),
Tuple(6, 10, 11)),
Tuple(
Perm(11)(1, 2, 3, 4, 5)(6, 7, 8, 9, 10),
Perm(0, 5, 6, 7, 2)(3, 4, 10, 11, 8),
Perm(0, 1, 7, 8, 3)(4, 5, 6, 11, 9),
Perm(0, 2, 8, 9, 4)(1, 7, 11, 10, 5),
Perm(0, 3, 9, 10, 5)(1, 2, 8, 11, 6),
Perm(0, 4, 10, 6, 1)(2, 3, 9, 11, 7),
Perm(0, 1)(2, 5)(3, 6)(4, 7)(8, 10)(9, 11),
Perm(0, 2)(1, 3)(4, 7)(5, 8)(6, 9)(10, 11),
Perm(0, 3)(1, 9)(2, 4)(5, 8)(6, 11)(7, 10),
Perm(0, 4)(1, 9)(2, 10)(3, 5)(6, 8)(7, 11),
Perm(0, 5)(1, 4)(2, 10)(3, 6)(7, 9)(8, 11),
Perm(0, 6)(1, 5)(2, 10)(3, 11)(4, 7)(8, 9),
Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 8)(9, 10),
Perm(0, 8)(1, 9)(2, 3)(4, 7)(5, 11)(6, 10),
Perm(0, 9)(1, 11)(2, 10)(3, 4)(5, 8)(6, 7),
Perm(0, 10)(1, 9)(2, 11)(3, 6)(4, 5)(7, 8),
Perm(0, 11)(1, 6)(2, 10)(3, 9)(4, 8)(5, 7),
Perm(0, 11)(1, 8)(2, 7)(3, 6)(4, 10)(5, 9),
Perm(0, 11)(1, 10)(2, 9)(3, 8)(4, 7)(5, 6),
Perm(0, 11)(1, 7)(2, 6)(3, 10)(4, 9)(5, 8),
Perm(0, 11)(1, 9)(2, 8)(3, 7)(4, 6)(5, 10),
Perm(0, 5, 1)(2, 4, 6)(3, 10, 7)(8, 9, 11),
Perm(0, 1, 2)(3, 5, 7)(4, 6, 8)(9, 10, 11),
Perm(0, 2, 3)(1, 8, 4)(5, 7, 9)(6, 11, 10),
Perm(0, 3, 4)(1, 8, 10)(2, 9, 5)(6, 7, 11),
Perm(0, 4, 5)(1, 3, 10)(2, 9, 6)(7, 8, 11),
Perm(0, 10, 7)(1, 5, 6)(2, 4, 11)(3, 9, 8),
Perm(0, 6, 8)(1, 7, 2)(3, 5, 11)(4, 10, 9),
Perm(0, 7, 9)(1, 11, 4)(2, 8, 3)(5, 6, 10),
Perm(0, 8, 10)(1, 7, 6)(2, 11, 5)(3, 9, 4),
Perm(0, 9, 6)(1, 3, 11)(2, 8, 7)(4, 10, 5)))
tetrahedron_faces = list(tuple(arg) for arg in tetrahedron.faces)
cube_faces = list(tuple(arg) for arg in cube.faces)
octahedron_faces = list(tuple(arg) for arg in octahedron.faces)
dodecahedron_faces = list(tuple(arg) for arg in dodecahedron.faces)
icosahedron_faces = list(tuple(arg) for arg in icosahedron.faces)
|
154b1e85fb2ec21248a0d65c1c0bc33949f23361183bf4e26e83094c9d58e7cf | import itertools
from sympy.combinatorics.fp_groups import FpGroup, FpSubgroup, simplify_presentation
from sympy.combinatorics.free_groups import FreeGroup
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.core.numbers import igcd
from sympy.ntheory.factor_ import totient
from sympy import S
class GroupHomomorphism:
'''
A class representing group homomorphisms. Instantiate using `homomorphism()`.
References
==========
.. [1] Holt, D., Eick, B. and O'Brien, E. (2005). Handbook of computational group theory.
'''
def __init__(self, domain, codomain, images):
self.domain = domain
self.codomain = codomain
self.images = images
self._inverses = None
self._kernel = None
self._image = None
def _invs(self):
'''
Return a dictionary with `{gen: inverse}` where `gen` is a rewriting
generator of `codomain` (e.g. strong generator for permutation groups)
and `inverse` is an element of its preimage
'''
image = self.image()
inverses = {}
for k in list(self.images.keys()):
v = self.images[k]
if not (v in inverses
or v.is_identity):
inverses[v] = k
if isinstance(self.codomain, PermutationGroup):
gens = image.strong_gens
else:
gens = image.generators
for g in gens:
if g in inverses or g.is_identity:
continue
w = self.domain.identity
if isinstance(self.codomain, PermutationGroup):
parts = image._strong_gens_slp[g][::-1]
else:
parts = g
for s in parts:
if s in inverses:
w = w*inverses[s]
else:
w = w*inverses[s**-1]**-1
inverses[g] = w
return inverses
def invert(self, g):
'''
Return an element of the preimage of ``g`` or of each element
of ``g`` if ``g`` is a list.
Explanation
===========
If the codomain is an FpGroup, the inverse for equal
elements might not always be the same unless the FpGroup's
rewriting system is confluent. However, making a system
confluent can be time-consuming. If it's important, try
`self.codomain.make_confluent()` first.
'''
from sympy.combinatorics import Permutation
from sympy.combinatorics.free_groups import FreeGroupElement
if isinstance(g, (Permutation, FreeGroupElement)):
if isinstance(self.codomain, FpGroup):
g = self.codomain.reduce(g)
if self._inverses is None:
self._inverses = self._invs()
image = self.image()
w = self.domain.identity
if isinstance(self.codomain, PermutationGroup):
gens = image.generator_product(g)[::-1]
else:
gens = g
# the following can't be "for s in gens:"
# because that would be equivalent to
# "for s in gens.array_form:" when g is
# a FreeGroupElement. On the other hand,
# when you call gens by index, the generator
# (or inverse) at position i is returned.
for i in range(len(gens)):
s = gens[i]
if s.is_identity:
continue
if s in self._inverses:
w = w*self._inverses[s]
else:
w = w*self._inverses[s**-1]**-1
return w
elif isinstance(g, list):
return [self.invert(e) for e in g]
def kernel(self):
'''
Compute the kernel of `self`.
'''
if self._kernel is None:
self._kernel = self._compute_kernel()
return self._kernel
def _compute_kernel(self):
from sympy import S
G = self.domain
G_order = G.order()
if G_order is S.Infinity:
raise NotImplementedError(
"Kernel computation is not implemented for infinite groups")
gens = []
if isinstance(G, PermutationGroup):
K = PermutationGroup(G.identity)
else:
K = FpSubgroup(G, gens, normal=True)
i = self.image().order()
while K.order()*i != G_order:
r = G.random()
k = r*self.invert(self(r))**-1
if not k in K:
gens.append(k)
if isinstance(G, PermutationGroup):
K = PermutationGroup(gens)
else:
K = FpSubgroup(G, gens, normal=True)
return K
def image(self):
'''
Compute the image of `self`.
'''
if self._image is None:
values = list(set(self.images.values()))
if isinstance(self.codomain, PermutationGroup):
self._image = self.codomain.subgroup(values)
else:
self._image = FpSubgroup(self.codomain, values)
return self._image
def _apply(self, elem):
'''
Apply `self` to `elem`.
'''
if not elem in self.domain:
if isinstance(elem, (list, tuple)):
return [self._apply(e) for e in elem]
raise ValueError("The supplied element doesn't belong to the domain")
if elem.is_identity:
return self.codomain.identity
else:
images = self.images
value = self.codomain.identity
if isinstance(self.domain, PermutationGroup):
gens = self.domain.generator_product(elem, original=True)
for g in gens:
if g in self.images:
value = images[g]*value
else:
value = images[g**-1]**-1*value
else:
i = 0
for _, p in elem.array_form:
if p < 0:
g = elem[i]**-1
else:
g = elem[i]
value = value*images[g]**p
i += abs(p)
return value
def __call__(self, elem):
return self._apply(elem)
def is_injective(self):
'''
Check if the homomorphism is injective
'''
return self.kernel().order() == 1
def is_surjective(self):
'''
Check if the homomorphism is surjective
'''
from sympy import S
im = self.image().order()
oth = self.codomain.order()
if im is S.Infinity and oth is S.Infinity:
return None
else:
return im == oth
def is_isomorphism(self):
'''
Check if `self` is an isomorphism.
'''
return self.is_injective() and self.is_surjective()
def is_trivial(self):
'''
Check is `self` is a trivial homomorphism, i.e. all elements
are mapped to the identity.
'''
return self.image().order() == 1
def compose(self, other):
'''
Return the composition of `self` and `other`, i.e.
the homomorphism phi such that for all g in the domain
of `other`, phi(g) = self(other(g))
'''
if not other.image().is_subgroup(self.domain):
raise ValueError("The image of `other` must be a subgroup of "
"the domain of `self`")
images = {g: self(other(g)) for g in other.images}
return GroupHomomorphism(other.domain, self.codomain, images)
def restrict_to(self, H):
'''
Return the restriction of the homomorphism to the subgroup `H`
of the domain.
'''
if not isinstance(H, PermutationGroup) or not H.is_subgroup(self.domain):
raise ValueError("Given H is not a subgroup of the domain")
domain = H
images = {g: self(g) for g in H.generators}
return GroupHomomorphism(domain, self.codomain, images)
def invert_subgroup(self, H):
'''
Return the subgroup of the domain that is the inverse image
of the subgroup ``H`` of the homomorphism image
'''
if not H.is_subgroup(self.image()):
raise ValueError("Given H is not a subgroup of the image")
gens = []
P = PermutationGroup(self.image().identity)
for h in H.generators:
h_i = self.invert(h)
if h_i not in P:
gens.append(h_i)
P = PermutationGroup(gens)
for k in self.kernel().generators:
if k*h_i not in P:
gens.append(k*h_i)
P = PermutationGroup(gens)
return P
def homomorphism(domain, codomain, gens, images=[], check=True):
'''
Create (if possible) a group homomorphism from the group ``domain``
to the group ``codomain`` defined by the images of the domain's
generators ``gens``. ``gens`` and ``images`` can be either lists or tuples
of equal sizes. If ``gens`` is a proper subset of the group's generators,
the unspecified generators will be mapped to the identity. If the
images are not specified, a trivial homomorphism will be created.
If the given images of the generators do not define a homomorphism,
an exception is raised.
If ``check`` is ``False``, don't check whether the given images actually
define a homomorphism.
'''
if not isinstance(domain, (PermutationGroup, FpGroup, FreeGroup)):
raise TypeError("The domain must be a group")
if not isinstance(codomain, (PermutationGroup, FpGroup, FreeGroup)):
raise TypeError("The codomain must be a group")
generators = domain.generators
if any([g not in generators for g in gens]):
raise ValueError("The supplied generators must be a subset of the domain's generators")
if any([g not in codomain for g in images]):
raise ValueError("The images must be elements of the codomain")
if images and len(images) != len(gens):
raise ValueError("The number of images must be equal to the number of generators")
gens = list(gens)
images = list(images)
images.extend([codomain.identity]*(len(generators)-len(images)))
gens.extend([g for g in generators if g not in gens])
images = dict(zip(gens,images))
if check and not _check_homomorphism(domain, codomain, images):
raise ValueError("The given images do not define a homomorphism")
return GroupHomomorphism(domain, codomain, images)
def _check_homomorphism(domain, codomain, images):
if hasattr(domain, 'relators'):
rels = domain.relators
else:
gens = domain.presentation().generators
rels = domain.presentation().relators
identity = codomain.identity
def _image(r):
if r.is_identity:
return identity
else:
w = identity
r_arr = r.array_form
i = 0
j = 0
# i is the index for r and j is for
# r_arr. r_arr[j] is the tuple (sym, p)
# where sym is the generator symbol
# and p is the power to which it is
# raised while r[i] is a generator
# (not just its symbol) or the inverse of
# a generator - hence the need for
# both indices
while i < len(r):
power = r_arr[j][1]
if isinstance(domain, PermutationGroup) and r[i] in gens:
s = domain.generators[gens.index(r[i])]
else:
s = r[i]
if s in images:
w = w*images[s]**power
elif s**-1 in images:
w = w*images[s**-1]**power
i += abs(power)
j += 1
return w
for r in rels:
if isinstance(codomain, FpGroup):
s = codomain.equals(_image(r), identity)
if s is None:
# only try to make the rewriting system
# confluent when it can't determine the
# truth of equality otherwise
success = codomain.make_confluent()
s = codomain.equals(_image(r), identity)
if s is None and not success:
raise RuntimeError("Can't determine if the images "
"define a homomorphism. Try increasing "
"the maximum number of rewriting rules "
"(group._rewriting_system.set_max(new_value); "
"the current value is stored in group._rewriting"
"_system.maxeqns)")
else:
s = _image(r).is_identity
if not s:
return False
return True
def orbit_homomorphism(group, omega):
'''
Return the homomorphism induced by the action of the permutation
group ``group`` on the set ``omega`` that is closed under the action.
'''
from sympy.combinatorics import Permutation
from sympy.combinatorics.named_groups import SymmetricGroup
codomain = SymmetricGroup(len(omega))
identity = codomain.identity
omega = list(omega)
images = {g: identity*Permutation([omega.index(o^g) for o in omega]) for g in group.generators}
group._schreier_sims(base=omega)
H = GroupHomomorphism(group, codomain, images)
if len(group.basic_stabilizers) > len(omega):
H._kernel = group.basic_stabilizers[len(omega)]
else:
H._kernel = PermutationGroup([group.identity])
return H
def block_homomorphism(group, blocks):
'''
Return the homomorphism induced by the action of the permutation
group ``group`` on the block system ``blocks``. The latter should be
of the same form as returned by the ``minimal_block`` method for
permutation groups, namely a list of length ``group.degree`` where
the i-th entry is a representative of the block i belongs to.
'''
from sympy.combinatorics import Permutation
from sympy.combinatorics.named_groups import SymmetricGroup
n = len(blocks)
# number the blocks; m is the total number,
# b is such that b[i] is the number of the block i belongs to,
# p is the list of length m such that p[i] is the representative
# of the i-th block
m = 0
p = []
b = [None]*n
for i in range(n):
if blocks[i] == i:
p.append(i)
b[i] = m
m += 1
for i in range(n):
b[i] = b[blocks[i]]
codomain = SymmetricGroup(m)
# the list corresponding to the identity permutation in codomain
identity = range(m)
images = {g: Permutation([b[p[i]^g] for i in identity]) for g in group.generators}
H = GroupHomomorphism(group, codomain, images)
return H
def group_isomorphism(G, H, isomorphism=True):
'''
Compute an isomorphism between 2 given groups.
Parameters
==========
G : A finite ``FpGroup`` or a ``PermutationGroup``.
First group.
H : A finite ``FpGroup`` or a ``PermutationGroup``
Second group.
isomorphism : bool
This is used to avoid the computation of homomorphism
when the user only wants to check if there exists
an isomorphism between the groups.
Returns
=======
If isomorphism = False -- Returns a boolean.
If isomorphism = True -- Returns a boolean and an isomorphism between `G` and `H`.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> from sympy.combinatorics.homomorphisms import group_isomorphism
>>> from sympy.combinatorics.named_groups import DihedralGroup, AlternatingGroup
>>> D = DihedralGroup(8)
>>> p = Permutation(0, 1, 2, 3, 4, 5, 6, 7)
>>> P = PermutationGroup(p)
>>> group_isomorphism(D, P)
(False, None)
>>> F, a, b = free_group("a, b")
>>> G = FpGroup(F, [a**3, b**3, (a*b)**2])
>>> H = AlternatingGroup(4)
>>> (check, T) = group_isomorphism(G, H)
>>> check
True
>>> T(b*a*b**-1*a**-1*b**-1)
(0 2 3)
Notes
=====
Uses the approach suggested by Robert Tarjan to compute the isomorphism between two groups.
First, the generators of ``G`` are mapped to the elements of ``H`` and
we check if the mapping induces an isomorphism.
'''
if not isinstance(G, (PermutationGroup, FpGroup)):
raise TypeError("The group must be a PermutationGroup or an FpGroup")
if not isinstance(H, (PermutationGroup, FpGroup)):
raise TypeError("The group must be a PermutationGroup or an FpGroup")
if isinstance(G, FpGroup) and isinstance(H, FpGroup):
G = simplify_presentation(G)
H = simplify_presentation(H)
# Two infinite FpGroups with the same generators are isomorphic
# when the relators are same but are ordered differently.
if G.generators == H.generators and (G.relators).sort() == (H.relators).sort():
if not isomorphism:
return True
return (True, homomorphism(G, H, G.generators, H.generators))
# `_H` is the permutation group isomorphic to `H`.
_H = H
g_order = G.order()
h_order = H.order()
if g_order is S.Infinity:
raise NotImplementedError("Isomorphism methods are not implemented for infinite groups.")
if isinstance(H, FpGroup):
if h_order is S.Infinity:
raise NotImplementedError("Isomorphism methods are not implemented for infinite groups.")
_H, h_isomorphism = H._to_perm_group()
if (g_order != h_order) or (G.is_abelian != H.is_abelian):
if not isomorphism:
return False
return (False, None)
if not isomorphism:
# Two groups of the same cyclic numbered order
# are isomorphic to each other.
n = g_order
if (igcd(n, totient(n))) == 1:
return True
# Match the generators of `G` with subsets of `_H`
gens = list(G.generators)
for subset in itertools.permutations(_H, len(gens)):
images = list(subset)
images.extend([_H.identity]*(len(G.generators)-len(images)))
_images = dict(zip(gens,images))
if _check_homomorphism(G, _H, _images):
if isinstance(H, FpGroup):
images = h_isomorphism.invert(images)
T = homomorphism(G, H, G.generators, images, check=False)
if T.is_isomorphism():
# It is a valid isomorphism
if not isomorphism:
return True
return (True, T)
if not isomorphism:
return False
return (False, None)
def is_isomorphic(G, H):
'''
Check if the groups are isomorphic to each other
Parameters
==========
G : A finite ``FpGroup`` or a ``PermutationGroup``
First group.
H : A finite ``FpGroup`` or a ``PermutationGroup``
Second group.
Returns
=======
boolean
'''
return group_isomorphism(G, H, isomorphism=False)
|
9565b5f31f6b52a1711728114843fecc9843c6179d18e729c9945f8cee9e4b02 | from sympy.core import Basic, Dict, sympify
from sympy.core.compatibility import as_int, default_sort_key
from sympy.core.sympify import _sympify
from sympy.functions.combinatorial.numbers import bell
from sympy.matrices import zeros
from sympy.sets.sets import FiniteSet, Union
from sympy.utilities.iterables import flatten, group
from collections import defaultdict
class Partition(FiniteSet):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose union equals a given set.
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
"""
_rank = None
_partition = None
def __new__(cls, *partition):
"""
Generates a new partition object.
This method also verifies if the arguments passed are
valid and raises a ValueError if they are not.
Examples
========
Creating Partition from Python lists:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a
Partition(FiniteSet(1, 2), FiniteSet(3))
>>> a.partition
[[1, 2], [3]]
>>> len(a)
2
>>> a.members
(1, 2, 3)
Creating Partition from Python sets:
>>> Partition({1, 2, 3}, {4, 5})
Partition(FiniteSet(1, 2, 3), FiniteSet(4, 5))
Creating Partition from SymPy finite sets:
>>> from sympy.sets.sets import FiniteSet
>>> a = FiniteSet(1, 2, 3)
>>> b = FiniteSet(4, 5)
>>> Partition(a, b)
Partition(FiniteSet(1, 2, 3), FiniteSet(4, 5))
"""
args = []
dups = False
for arg in partition:
if isinstance(arg, list):
as_set = set(arg)
if len(as_set) < len(arg):
dups = True
break # error below
arg = as_set
args.append(_sympify(arg))
if not all(isinstance(part, FiniteSet) for part in args):
raise ValueError(
"Each argument to Partition should be " \
"a list, set, or a FiniteSet")
# sort so we have a canonical reference for RGS
U = Union(*args)
if dups or len(U) < sum(len(arg) for arg in args):
raise ValueError("Partition contained duplicate elements.")
obj = FiniteSet.__new__(cls, *args)
obj.members = tuple(U)
obj.size = len(U)
return obj
def sort_key(self, order=None):
"""Return a canonical key that can be used for sorting.
Ordering is based on the size and sorted elements of the partition
and ties are broken with the rank.
Examples
========
>>> from sympy.utilities.iterables import default_sort_key
>>> from sympy.combinatorics.partitions import Partition
>>> from sympy.abc import x
>>> a = Partition([1, 2])
>>> b = Partition([3, 4])
>>> c = Partition([1, x])
>>> d = Partition(list(range(4)))
>>> l = [d, b, a + 1, a, c]
>>> l.sort(key=default_sort_key); l
[Partition(FiniteSet(1, 2)), Partition(FiniteSet(1), FiniteSet(2)), Partition(FiniteSet(1, x)), Partition(FiniteSet(3, 4)), Partition(FiniteSet(0, 1, 2, 3))]
"""
if order is None:
members = self.members
else:
members = tuple(sorted(self.members,
key=lambda w: default_sort_key(w, order)))
return tuple(map(default_sort_key, (self.size, members, self.rank)))
@property
def partition(self):
"""Return partition as a sorted list of lists.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition([1], [2, 3]).partition
[[1], [2, 3]]
"""
if self._partition is None:
self._partition = sorted([sorted(p, key=default_sort_key)
for p in self.args])
return self._partition
def __add__(self, other):
"""
Return permutation whose rank is ``other`` greater than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a + 1).rank
2
>>> (a + 100).rank
1
"""
other = as_int(other)
offset = self.rank + other
result = RGS_unrank((offset) %
RGS_enum(self.size),
self.size)
return Partition.from_rgs(result, self.members)
def __sub__(self, other):
"""
Return permutation whose rank is ``other`` less than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a - 1).rank
0
>>> (a - 100).rank
1
"""
return self.__add__(-other)
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other based on rank.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a <= a
True
>>> a <= b
True
"""
return self.sort_key() <= sympify(other).sort_key()
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a < b
True
"""
return self.sort_key() < sympify(other).sort_key()
@property
def rank(self):
"""
Gets the rank of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.rank
13
"""
if self._rank is not None:
return self._rank
self._rank = RGS_rank(self.RGS)
return self._rank
@property
def RGS(self):
"""
Returns the "restricted growth string" of the partition.
Explanation
===========
The RGS is returned as a list of indices, L, where L[i] indicates
the block in which element i appears. For example, in a partition
of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is
[1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.members
(1, 2, 3, 4, 5)
>>> a.RGS
(0, 0, 1, 2, 2)
>>> a + 1
Partition(FiniteSet(1, 2), FiniteSet(3), FiniteSet(4), FiniteSet(5))
>>> _.RGS
(0, 0, 1, 2, 3)
"""
rgs = {}
partition = self.partition
for i, part in enumerate(partition):
for j in part:
rgs[j] = i
return tuple([rgs[i] for i in sorted(
[i for p in partition for i in p], key=default_sort_key)])
@classmethod
def from_rgs(self, rgs, elements):
"""
Creates a set partition from a restricted growth string.
Explanation
===========
The indices given in rgs are assumed to be the index
of the element as given in elements *as provided* (the
elements are not sorted by this routine). Block numbering
starts from 0. If any block was not referenced in ``rgs``
an error will be raised.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde'))
Partition(FiniteSet(c), FiniteSet(a, d), FiniteSet(b, e))
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead'))
Partition(FiniteSet(e), FiniteSet(a, c), FiniteSet(b, d))
>>> a = Partition([1, 4], [2], [3, 5])
>>> Partition.from_rgs(a.RGS, a.members)
Partition(FiniteSet(1, 4), FiniteSet(2), FiniteSet(3, 5))
"""
if len(rgs) != len(elements):
raise ValueError('mismatch in rgs and element lengths')
max_elem = max(rgs) + 1
partition = [[] for i in range(max_elem)]
j = 0
for i in rgs:
partition[i].append(elements[j])
j += 1
if not all(p for p in partition):
raise ValueError('some blocks of the partition were empty.')
return Partition(*partition)
class IntegerPartition(Basic):
"""
This class represents an integer partition.
Explanation
===========
In number theory and combinatorics, a partition of a positive integer,
``n``, also called an integer partition, is a way of writing ``n`` as a
list of positive integers that sum to n. Two partitions that differ only
in the order of summands are considered to be the same partition; if order
matters then the partitions are referred to as compositions. For example,
4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1];
the compositions [1, 2, 1] and [1, 1, 2] are the same as partition
[2, 1, 1].
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
References
==========
https://en.wikipedia.org/wiki/Partition_%28number_theory%29
"""
_dict = None
_keys = None
def __new__(cls, partition, integer=None):
"""
Generates a new IntegerPartition object from a list or dictionary.
Explantion
==========
The partition can be given as a list of positive integers or a
dictionary of (integer, multiplicity) items. If the partition is
preceded by an integer an error will be raised if the partition
does not sum to that given integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([5, 4, 3, 1, 1])
>>> a
IntegerPartition(14, (5, 4, 3, 1, 1))
>>> print(a)
[5, 4, 3, 1, 1]
>>> IntegerPartition({1:3, 2:1})
IntegerPartition(5, (2, 1, 1, 1))
If the value that the partition should sum to is given first, a check
will be made to see n error will be raised if there is a discrepancy:
>>> IntegerPartition(10, [5, 4, 3, 1])
Traceback (most recent call last):
...
ValueError: The partition is not valid
"""
if integer is not None:
integer, partition = partition, integer
if isinstance(partition, (dict, Dict)):
_ = []
for k, v in sorted(list(partition.items()), reverse=True):
if not v:
continue
k, v = as_int(k), as_int(v)
_.extend([k]*v)
partition = tuple(_)
else:
partition = tuple(sorted(map(as_int, partition), reverse=True))
sum_ok = False
if integer is None:
integer = sum(partition)
sum_ok = True
else:
integer = as_int(integer)
if not sum_ok and sum(partition) != integer:
raise ValueError("Partition did not add to %s" % integer)
if any(i < 1 for i in partition):
raise ValueError("The summands must all be positive.")
obj = Basic.__new__(cls, integer, partition)
obj.partition = list(partition)
obj.integer = integer
return obj
def prev_lex(self):
"""Return the previous partition of the integer, n, in lexical order,
wrapping around to [1, ..., 1] if the partition is [n].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([4])
>>> print(p.prev_lex())
[3, 1]
>>> p.partition > p.prev_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
keys = self._keys
if keys == [1]:
return IntegerPartition({self.integer: 1})
if keys[-1] != 1:
d[keys[-1]] -= 1
if keys[-1] == 2:
d[1] = 2
else:
d[keys[-1] - 1] = d[1] = 1
else:
d[keys[-2]] -= 1
left = d[1] + keys[-2]
new = keys[-2]
d[1] = 0
while left:
new -= 1
if left - new >= 0:
d[new] += left//new
left -= d[new]*new
return IntegerPartition(self.integer, d)
def next_lex(self):
"""Return the next partition of the integer, n, in lexical order,
wrapping around to [n] if the partition is [1, ..., 1].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([3, 1])
>>> print(p.next_lex())
[4]
>>> p.partition < p.next_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
key = self._keys
a = key[-1]
if a == self.integer:
d.clear()
d[1] = self.integer
elif a == 1:
if d[a] > 1:
d[a + 1] += 1
d[a] -= 2
else:
b = key[-2]
d[b + 1] += 1
d[1] = (d[b] - 1)*b
d[b] = 0
else:
if d[a] > 1:
if len(key) == 1:
d.clear()
d[a + 1] = 1
d[1] = self.integer - a - 1
else:
a1 = a + 1
d[a1] += 1
d[1] = d[a]*a - a1
d[a] = 0
else:
b = key[-2]
b1 = b + 1
d[b1] += 1
need = d[b]*b + d[a]*a - b1
d[a] = d[b] = 0
d[1] = need
return IntegerPartition(self.integer, d)
def as_dict(self):
"""Return the partition as a dictionary whose keys are the
partition integers and the values are the multiplicity of that
integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict()
{1: 3, 2: 1, 3: 4}
"""
if self._dict is None:
groups = group(self.partition, multiple=False)
self._keys = [g[0] for g in groups]
self._dict = dict(groups)
return self._dict
@property
def conjugate(self):
"""
Computes the conjugate partition of itself.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6, 3, 3, 2, 1])
>>> a.conjugate
[5, 4, 3, 1, 1, 1]
"""
j = 1
temp_arr = list(self.partition) + [0]
k = temp_arr[0]
b = [0]*k
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __lt__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3, 1])
>>> a < a
False
>>> b = a.next_lex()
>>> a < b
True
>>> a == b
False
"""
return list(reversed(self.partition)) < list(reversed(other.partition))
def __le__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([4])
>>> a <= a
True
"""
return list(reversed(self.partition)) <= list(reversed(other.partition))
def as_ferrers(self, char='#'):
"""
Prints the ferrer diagram of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> print(IntegerPartition([1, 1, 5]).as_ferrers())
#####
#
#
"""
return "\n".join([char*i for i in self.partition])
def __str__(self):
return str(list(self.partition))
def random_integer_partition(n, seed=None):
"""
Generates a random integer partition summing to ``n`` as a list
of reverse-sorted integers.
Examples
========
>>> from sympy.combinatorics.partitions import random_integer_partition
For the following, a seed is given so a known value can be shown; in
practice, the seed would not be given.
>>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1])
[85, 12, 2, 1]
>>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1])
[5, 3, 1, 1]
>>> random_integer_partition(1)
[1]
"""
from sympy.testing.randtest import _randint
n = as_int(n)
if n < 1:
raise ValueError('n must be a positive integer')
randint = _randint(seed)
partition = []
while (n > 0):
k = randint(1, n)
mult = randint(1, n//k)
partition.append((k, mult))
n -= k*mult
partition.sort(reverse=True)
partition = flatten([[k]*m for k, m in partition])
return partition
def RGS_generalized(m):
"""
Computes the m + 1 generalized unrestricted growth strings
and returns them as rows in matrix.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_generalized
>>> RGS_generalized(6)
Matrix([
[ 1, 1, 1, 1, 1, 1, 1],
[ 1, 2, 3, 4, 5, 6, 0],
[ 2, 5, 10, 17, 26, 0, 0],
[ 5, 15, 37, 77, 0, 0, 0],
[ 15, 52, 151, 0, 0, 0, 0],
[ 52, 203, 0, 0, 0, 0, 0],
[203, 0, 0, 0, 0, 0, 0]])
"""
d = zeros(m + 1)
for i in range(0, m + 1):
d[0, i] = 1
for i in range(1, m + 1):
for j in range(m):
if j <= m - i:
d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1]
else:
d[i, j] = 0
return d
def RGS_enum(m):
"""
RGS_enum computes the total number of restricted growth strings
possible for a superset of size m.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_enum
>>> from sympy.combinatorics.partitions import Partition
>>> RGS_enum(4)
15
>>> RGS_enum(5)
52
>>> RGS_enum(6)
203
We can check that the enumeration is correct by actually generating
the partitions. Here, the 15 partitions of 4 items are generated:
>>> a = Partition(list(range(4)))
>>> s = set()
>>> for i in range(20):
... s.add(a)
... a += 1
...
>>> assert len(s) == 15
"""
if (m < 1):
return 0
elif (m == 1):
return 1
else:
return bell(m)
def RGS_unrank(rank, m):
"""
Gives the unranked restricted growth string for a given
superset size.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_unrank
>>> RGS_unrank(14, 4)
[0, 1, 2, 3]
>>> RGS_unrank(0, 4)
[0, 0, 0, 0]
"""
if m < 1:
raise ValueError("The superset size must be >= 1")
if rank < 0 or RGS_enum(m) <= rank:
raise ValueError("Invalid arguments")
L = [1] * (m + 1)
j = 1
D = RGS_generalized(m)
for i in range(2, m + 1):
v = D[m - i, j]
cr = j*v
if cr <= rank:
L[i] = j + 1
rank -= cr
j += 1
else:
L[i] = int(rank / v + 1)
rank %= v
return [x - 1 for x in L[1:]]
def RGS_rank(rgs):
"""
Computes the rank of a restricted growth string.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank
>>> RGS_rank([0, 1, 2, 1, 3])
42
>>> RGS_rank(RGS_unrank(4, 7))
4
"""
rgs_size = len(rgs)
rank = 0
D = RGS_generalized(rgs_size)
for i in range(1, rgs_size):
n = len(rgs[(i + 1):])
m = max(rgs[0:i])
rank += D[n, m + 1] * rgs[i]
return rank
|
8623821dff0c0e6e9e487fee93f54fbf2677ac5735901ac5dd6cc51e85fff678 | from sympy.combinatorics.permutations import Permutation, _af_invert, _af_rmul
from sympy.ntheory import isprime
rmul = Permutation.rmul
_af_new = Permutation._af_new
############################################
#
# Utilities for computational group theory
#
############################################
def _base_ordering(base, degree):
r"""
Order `\{0, 1, ..., n-1\}` so that base points come first and in order.
Parameters
==========
``base`` : the base
``degree`` : the degree of the associated permutation group
Returns
=======
A list ``base_ordering`` such that ``base_ordering[point]`` is the
number of ``point`` in the ordering.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _base_ordering
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> _base_ordering(S.base, S.degree)
[0, 1, 2, 3]
Notes
=====
This is used in backtrack searches, when we define a relation `<<` on
the underlying set for a permutation group of degree `n`,
`\{0, 1, ..., n-1\}`, so that if `(b_1, b_2, ..., b_k)` is a base we
have `b_i << b_j` whenever `i<j` and `b_i << a` for all
`i\in\{1,2, ..., k\}` and `a` is not in the base. The idea is developed
and applied to backtracking algorithms in [1], pp.108-132. The points
that are not in the base are taken in increasing order.
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
"""
base_len = len(base)
ordering = [0]*degree
for i in range(base_len):
ordering[base[i]] = i
current = base_len
for i in range(degree):
if i not in base:
ordering[i] = current
current += 1
return ordering
def _check_cycles_alt_sym(perm):
"""
Checks for cycles of prime length p with n/2 < p < n-2.
Explanation
===========
Here `n` is the degree of the permutation. This is a helper function for
the function is_alt_sym from sympy.combinatorics.perm_groups.
Examples
========
>>> from sympy.combinatorics.util import _check_cycles_alt_sym
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12]])
>>> _check_cycles_alt_sym(a)
False
>>> b = Permutation([[0, 1, 2, 3, 4, 5, 6], [7, 8, 9, 10]])
>>> _check_cycles_alt_sym(b)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.is_alt_sym
"""
n = perm.size
af = perm.array_form
current_len = 0
total_len = 0
used = set()
for i in range(n//2):
if not i in used and i < n//2 - total_len:
current_len = 1
used.add(i)
j = i
while af[j] != i:
current_len += 1
j = af[j]
used.add(j)
total_len += current_len
if current_len > n//2 and current_len < n - 2 and isprime(current_len):
return True
return False
def _distribute_gens_by_base(base, gens):
r"""
Distribute the group elements ``gens`` by membership in basic stabilizers.
Explanation
===========
Notice that for a base `(b_1, b_2, ..., b_k)`, the basic stabilizers
are defined as `G^{(i)} = G_{b_1, ..., b_{i-1}}` for
`i \in\{1, 2, ..., k\}`.
Parameters
==========
``base`` : a sequence of points in `\{0, 1, ..., n-1\}`
``gens`` : a list of elements of a permutation group of degree `n`.
Returns
=======
List of length `k`, where `k` is
the length of ``base``. The `i`-th entry contains those elements in
``gens`` which fix the first `i` elements of ``base`` (so that the
`0`-th entry is equal to ``gens`` itself). If no element fixes the first
`i` elements of ``base``, the `i`-th element is set to a list containing
the identity element.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.util import _distribute_gens_by_base
>>> D = DihedralGroup(3)
>>> D.schreier_sims()
>>> D.strong_gens
[(0 1 2), (0 2), (1 2)]
>>> D.base
[0, 1]
>>> _distribute_gens_by_base(D.base, D.strong_gens)
[[(0 1 2), (0 2), (1 2)],
[(1 2)]]
See Also
========
_strong_gens_from_distr, _orbits_transversals_from_bsgs,
_handle_precomputed_bsgs
"""
base_len = len(base)
degree = gens[0].size
stabs = [[] for _ in range(base_len)]
max_stab_index = 0
for gen in gens:
j = 0
while j < base_len - 1 and gen._array_form[base[j]] == base[j]:
j += 1
if j > max_stab_index:
max_stab_index = j
for k in range(j + 1):
stabs[k].append(gen)
for i in range(max_stab_index + 1, base_len):
stabs[i].append(_af_new(list(range(degree))))
return stabs
def _handle_precomputed_bsgs(base, strong_gens, transversals=None,
basic_orbits=None, strong_gens_distr=None):
"""
Calculate BSGS-related structures from those present.
Explanation
===========
The base and strong generating set must be provided; if any of the
transversals, basic orbits or distributed strong generators are not
provided, they will be calculated from the base and strong generating set.
Parameters
==========
``base`` - the base
``strong_gens`` - the strong generators
``transversals`` - basic transversals
``basic_orbits`` - basic orbits
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Returns
=======
``(transversals, basic_orbits, strong_gens_distr)`` where ``transversals``
are the basic transversals, ``basic_orbits`` are the basic orbits, and
``strong_gens_distr`` are the strong generators distributed by membership
in basic stabilizers.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.util import _handle_precomputed_bsgs
>>> D = DihedralGroup(3)
>>> D.schreier_sims()
>>> _handle_precomputed_bsgs(D.base, D.strong_gens,
... basic_orbits=D.basic_orbits)
([{0: (2), 1: (0 1 2), 2: (0 2)}, {1: (2), 2: (1 2)}], [[0, 1, 2], [1, 2]], [[(0 1 2), (0 2), (1 2)], [(1 2)]])
See Also
========
_orbits_transversals_from_bsgs, _distribute_gens_by_base
"""
if strong_gens_distr is None:
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
if transversals is None:
if basic_orbits is None:
basic_orbits, transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
else:
transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr,
transversals_only=True)
else:
if basic_orbits is None:
base_len = len(base)
basic_orbits = [None]*base_len
for i in range(base_len):
basic_orbits[i] = list(transversals[i].keys())
return transversals, basic_orbits, strong_gens_distr
def _orbits_transversals_from_bsgs(base, strong_gens_distr,
transversals_only=False, slp=False):
"""
Compute basic orbits and transversals from a base and strong generating set.
Explanation
===========
The generators are provided as distributed across the basic stabilizers.
If the optional argument ``transversals_only`` is set to True, only the
transversals are returned.
Parameters
==========
``base`` - The base.
``strong_gens_distr`` - Strong generators distributed by membership in basic
stabilizers.
``transversals_only`` - bool
A flag switching between returning only the
transversals and both orbits and transversals.
``slp`` -
If ``True``, return a list of dictionaries containing the
generator presentations of the elements of the transversals,
i.e. the list of indices of generators from ``strong_gens_distr[i]``
such that their product is the relevant transversal element.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _distribute_gens_by_base
>>> S = SymmetricGroup(3)
>>> S.schreier_sims()
>>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens)
>>> (S.base, strong_gens_distr)
([0, 1], [[(0 1 2), (2)(0 1), (1 2)], [(1 2)]])
See Also
========
_distribute_gens_by_base, _handle_precomputed_bsgs
"""
from sympy.combinatorics.perm_groups import _orbit_transversal
base_len = len(base)
degree = strong_gens_distr[0][0].size
transversals = [None]*base_len
slps = [None]*base_len
if transversals_only is False:
basic_orbits = [None]*base_len
for i in range(base_len):
transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],
base[i], pairs=True, slp=True)
transversals[i] = dict(transversals[i])
if transversals_only is False:
basic_orbits[i] = list(transversals[i].keys())
if transversals_only:
return transversals
else:
if not slp:
return basic_orbits, transversals
return basic_orbits, transversals, slps
def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None):
"""
Remove redundant generators from a strong generating set.
Parameters
==========
``base`` - a base
``strong_gens`` - a strong generating set relative to ``base``
``basic_orbits`` - basic orbits
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Returns
=======
A strong generating set with respect to ``base`` which is a subset of
``strong_gens``.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _remove_gens
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(15)
>>> base, strong_gens = S.schreier_sims_incremental()
>>> new_gens = _remove_gens(base, strong_gens)
>>> len(new_gens)
14
>>> _verify_bsgs(S, base, new_gens)
True
Notes
=====
This procedure is outlined in [1],p.95.
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
"""
from sympy.combinatorics.perm_groups import _orbit
base_len = len(base)
degree = strong_gens[0].size
if strong_gens_distr is None:
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
if basic_orbits is None:
basic_orbits = []
for i in range(base_len):
basic_orbit = _orbit(degree, strong_gens_distr[i], base[i])
basic_orbits.append(basic_orbit)
strong_gens_distr.append([])
res = strong_gens[:]
for i in range(base_len - 1, -1, -1):
gens_copy = strong_gens_distr[i][:]
for gen in strong_gens_distr[i]:
if gen not in strong_gens_distr[i + 1]:
temp_gens = gens_copy[:]
temp_gens.remove(gen)
if temp_gens == []:
continue
temp_orbit = _orbit(degree, temp_gens, base[i])
if temp_orbit == basic_orbits[i]:
gens_copy.remove(gen)
res.remove(gen)
return res
def _strip(g, base, orbits, transversals):
"""
Attempt to decompose a permutation using a (possibly partial) BSGS
structure.
Explanation
===========
This is done by treating the sequence ``base`` as an actual base, and
the orbits ``orbits`` and transversals ``transversals`` as basic orbits and
transversals relative to it.
This process is called "sifting". A sift is unsuccessful when a certain
orbit element is not found or when after the sift the decomposition
doesn't end with the identity element.
The argument ``transversals`` is a list of dictionaries that provides
transversal elements for the orbits ``orbits``.
Parameters
==========
``g`` - permutation to be decomposed
``base`` - sequence of points
``orbits`` - a list in which the ``i``-th entry is an orbit of ``base[i]``
under some subgroup of the pointwise stabilizer of `
`base[0], base[1], ..., base[i - 1]``. The groups themselves are implicit
in this function since the only information we need is encoded in the orbits
and transversals
``transversals`` - a list of orbit transversals associated with the orbits
``orbits``.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.util import _strip
>>> S = SymmetricGroup(5)
>>> S.schreier_sims()
>>> g = Permutation([0, 2, 3, 1, 4])
>>> _strip(g, S.base, S.basic_orbits, S.basic_transversals)
((4), 5)
Notes
=====
The algorithm is described in [1],pp.89-90. The reason for returning
both the current state of the element being decomposed and the level
at which the sifting ends is that they provide important information for
the randomized version of the Schreier-Sims algorithm.
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E."Handbook of computational group theory"
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims_random
"""
h = g._array_form
base_len = len(base)
for i in range(base_len):
beta = h[base[i]]
if beta == base[i]:
continue
if beta not in orbits[i]:
return _af_new(h), i + 1
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
return _af_new(h), base_len + 1
def _strip_af(h, base, orbits, transversals, j, slp=[], slps={}):
"""
optimized _strip, with h, transversals and result in array form
if the stripped elements is the identity, it returns False, base_len + 1
j h[base[i]] == base[i] for i <= j
"""
base_len = len(base)
for i in range(j+1, base_len):
beta = h[base[i]]
if beta == base[i]:
continue
if beta not in orbits[i]:
if not slp:
return h, i + 1
return h, i + 1, slp
u = transversals[i][beta]
if h == u:
if not slp:
return False, base_len + 1
return False, base_len + 1, slp
h = _af_rmul(_af_invert(u), h)
if slp:
u_slp = slps[i][beta][:]
u_slp.reverse()
u_slp = [(i, (g,)) for g in u_slp]
slp = u_slp + slp
if not slp:
return h, base_len + 1
return h, base_len + 1, slp
def _strong_gens_from_distr(strong_gens_distr):
"""
Retrieve strong generating set from generators of basic stabilizers.
This is just the union of the generators of the first and second basic
stabilizers.
Parameters
==========
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import (_strong_gens_from_distr,
... _distribute_gens_by_base)
>>> S = SymmetricGroup(3)
>>> S.schreier_sims()
>>> S.strong_gens
[(0 1 2), (2)(0 1), (1 2)]
>>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens)
>>> _strong_gens_from_distr(strong_gens_distr)
[(0 1 2), (2)(0 1), (1 2)]
See Also
========
_distribute_gens_by_base
"""
if len(strong_gens_distr) == 1:
return strong_gens_distr[0][:]
else:
result = strong_gens_distr[0]
for gen in strong_gens_distr[1]:
if gen not in result:
result.append(gen)
return result
|
8026745e91aed7a9e06c1461e66801e858648459e3b3010d6f91f590b762c781 | from sympy.combinatorics.group_constructs import DirectProduct
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.permutations import Permutation
_af_new = Permutation._af_new
def AbelianGroup(*cyclic_orders):
"""
Returns the direct product of cyclic groups with the given orders.
Explanation
===========
According to the structure theorem for finite abelian groups ([1]),
every finite abelian group can be written as the direct product of
finitely many cyclic groups.
Examples
========
>>> from sympy.combinatorics.named_groups import AbelianGroup
>>> AbelianGroup(3, 4)
PermutationGroup([
(6)(0 1 2),
(3 4 5 6)])
>>> _.is_group
True
See Also
========
DirectProduct
References
==========
.. [1] http://groupprops.subwiki.org/wiki/Structure_theorem_for_finitely_generated_abelian_groups
"""
groups = []
degree = 0
order = 1
for size in cyclic_orders:
degree += size
order *= size
groups.append(CyclicGroup(size))
G = DirectProduct(*groups)
G._is_abelian = True
G._degree = degree
G._order = order
return G
def AlternatingGroup(n):
"""
Generates the alternating group on ``n`` elements as a permutation group.
Explanation
===========
For ``n > 2``, the generators taken are ``(0 1 2), (0 1 2 ... n-1)`` for
``n`` odd
and ``(0 1 2), (1 2 ... n-1)`` for ``n`` even (See [1], p.31, ex.6.9.).
After the group is generated, some of its basic properties are set.
The cases ``n = 1, 2`` are handled separately.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> G = AlternatingGroup(4)
>>> G.is_group
True
>>> a = list(G.generate_dimino())
>>> len(a)
12
>>> all(perm.is_even for perm in a)
True
See Also
========
SymmetricGroup, CyclicGroup, DihedralGroup
References
==========
.. [1] Armstrong, M. "Groups and Symmetry"
"""
# small cases are special
if n in (1, 2):
return PermutationGroup([Permutation([0])])
a = list(range(n))
a[0], a[1], a[2] = a[1], a[2], a[0]
gen1 = a
if n % 2:
a = list(range(1, n))
a.append(0)
gen2 = a
else:
a = list(range(2, n))
a.append(1)
a.insert(0, 0)
gen2 = a
gens = [gen1, gen2]
if gen1 == gen2:
gens = gens[:1]
G = PermutationGroup([_af_new(a) for a in gens], dups=False)
if n < 4:
G._is_abelian = True
G._is_nilpotent = True
else:
G._is_abelian = False
G._is_nilpotent = False
if n < 5:
G._is_solvable = True
else:
G._is_solvable = False
G._degree = n
G._is_transitive = True
G._is_alt = True
return G
def CyclicGroup(n):
"""
Generates the cyclic group of order ``n`` as a permutation group.
Explanation
===========
The generator taken is the ``n``-cycle ``(0 1 2 ... n-1)``
(in cycle notation). After the group is generated, some of its basic
properties are set.
Examples
========
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(6)
>>> G.is_group
True
>>> G.order()
6
>>> list(G.generate_schreier_sims(af=True))
[[0, 1, 2, 3, 4, 5], [1, 2, 3, 4, 5, 0], [2, 3, 4, 5, 0, 1],
[3, 4, 5, 0, 1, 2], [4, 5, 0, 1, 2, 3], [5, 0, 1, 2, 3, 4]]
See Also
========
SymmetricGroup, DihedralGroup, AlternatingGroup
"""
a = list(range(1, n))
a.append(0)
gen = _af_new(a)
G = PermutationGroup([gen])
G._is_abelian = True
G._is_nilpotent = True
G._is_solvable = True
G._degree = n
G._is_transitive = True
G._order = n
return G
def DihedralGroup(n):
r"""
Generates the dihedral group `D_n` as a permutation group.
Explanation
===========
The dihedral group `D_n` is the group of symmetries of the regular
``n``-gon. The generators taken are the ``n``-cycle ``a = (0 1 2 ... n-1)``
(a rotation of the ``n``-gon) and ``b = (0 n-1)(1 n-2)...``
(a reflection of the ``n``-gon) in cycle rotation. It is easy to see that
these satisfy ``a**n = b**2 = 1`` and ``bab = ~a`` so they indeed generate
`D_n` (See [1]). After the group is generated, some of its basic properties
are set.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(5)
>>> G.is_group
True
>>> a = list(G.generate_dimino())
>>> [perm.cyclic_form for perm in a]
[[], [[0, 1, 2, 3, 4]], [[0, 2, 4, 1, 3]],
[[0, 3, 1, 4, 2]], [[0, 4, 3, 2, 1]], [[0, 4], [1, 3]],
[[1, 4], [2, 3]], [[0, 1], [2, 4]], [[0, 2], [3, 4]],
[[0, 3], [1, 2]]]
See Also
========
SymmetricGroup, CyclicGroup, AlternatingGroup
References
==========
.. [1] https://en.wikipedia.org/wiki/Dihedral_group
"""
# small cases are special
if n == 1:
return PermutationGroup([Permutation([1, 0])])
if n == 2:
return PermutationGroup([Permutation([1, 0, 3, 2]),
Permutation([2, 3, 0, 1]), Permutation([3, 2, 1, 0])])
a = list(range(1, n))
a.append(0)
gen1 = _af_new(a)
a = list(range(n))
a.reverse()
gen2 = _af_new(a)
G = PermutationGroup([gen1, gen2])
# if n is a power of 2, group is nilpotent
if n & (n-1) == 0:
G._is_nilpotent = True
else:
G._is_nilpotent = False
G._is_abelian = False
G._is_solvable = True
G._degree = n
G._is_transitive = True
G._order = 2*n
return G
def SymmetricGroup(n):
"""
Generates the symmetric group on ``n`` elements as a permutation group.
Explanation
===========
The generators taken are the ``n``-cycle
``(0 1 2 ... n-1)`` and the transposition ``(0 1)`` (in cycle notation).
(See [1]). After the group is generated, some of its basic properties
are set.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> G = SymmetricGroup(4)
>>> G.is_group
True
>>> G.order()
24
>>> list(G.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [1, 2, 3, 0], [2, 3, 0, 1], [3, 1, 2, 0], [0, 2, 3, 1],
[1, 3, 0, 2], [2, 0, 1, 3], [3, 2, 0, 1], [0, 3, 1, 2], [1, 0, 2, 3],
[2, 1, 3, 0], [3, 0, 1, 2], [0, 1, 3, 2], [1, 2, 0, 3], [2, 3, 1, 0],
[3, 1, 0, 2], [0, 2, 1, 3], [1, 3, 2, 0], [2, 0, 3, 1], [3, 2, 1, 0],
[0, 3, 2, 1], [1, 0, 3, 2], [2, 1, 0, 3], [3, 0, 2, 1]]
See Also
========
CyclicGroup, DihedralGroup, AlternatingGroup
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_group#Generators_and_relations
"""
if n == 1:
G = PermutationGroup([Permutation([0])])
elif n == 2:
G = PermutationGroup([Permutation([1, 0])])
else:
a = list(range(1, n))
a.append(0)
gen1 = _af_new(a)
a = list(range(n))
a[0], a[1] = a[1], a[0]
gen2 = _af_new(a)
G = PermutationGroup([gen1, gen2])
if n < 3:
G._is_abelian = True
G._is_nilpotent = True
else:
G._is_abelian = False
G._is_nilpotent = False
if n < 5:
G._is_solvable = True
else:
G._is_solvable = False
G._degree = n
G._is_transitive = True
G._is_sym = True
return G
def RubikGroup(n):
"""Return a group of Rubik's cube generators
>>> from sympy.combinatorics.named_groups import RubikGroup
>>> RubikGroup(2).is_group
True
"""
from sympy.combinatorics.generators import rubik
if n <= 1:
raise ValueError("Invalid cube. n has to be greater than 1")
return PermutationGroup(rubik(n))
|
b469c409285f40e99271b4fd2d8158e2101659962af8d7ff68794a281de4705b | from sympy import Integer
from sympy.core import Symbol
from sympy.utilities import public
@public
def approximants(l, X=Symbol('x'), simplify=False):
"""
Return a generator for consecutive Pade approximants for a series.
It can also be used for computing the rational generating function of a
series when possible, since the last approximant returned by the generator
will be the generating function (if any).
The input list can contain more complex expressions than integer or rational
numbers; symbols may also be involved in the computation. An example below
show how to compute the generating function of the whole Pascal triangle.
The generator can be asked to apply the sympy.simplify function on each
generated term, which will make the computation slower; however it may be
useful when symbols are involved in the expressions.
Examples
========
>>> from sympy.series import approximants
>>> from sympy import lucas, fibonacci, symbols, binomial
>>> g = [lucas(k) for k in range(16)]
>>> [e for e in approximants(g)]
[2, -4/(x - 2), (5*x - 2)/(3*x - 1), (x - 2)/(x**2 + x - 1)]
>>> h = [fibonacci(k) for k in range(16)]
>>> [e for e in approximants(h)]
[x, -x/(x - 1), (x**2 - x)/(2*x - 1), -x/(x**2 + x - 1)]
>>> x, t = symbols("x,t")
>>> p=[sum(binomial(k,i)*x**i for i in range(k+1)) for k in range(16)]
>>> y = approximants(p, t)
>>> for k in range(3): print(next(y))
1
(x + 1)/((-x - 1)*(t*(x + 1) + (x + 1)/(-x - 1)))
nan
>>> y = approximants(p, t, simplify=True)
>>> for k in range(3): print(next(y))
1
-1/(t*(x + 1) - 1)
nan
See Also
========
See function sympy.concrete.guess.guess_generating_function_rational and
function mpmath.pade
"""
p1, q1 = [Integer(1)], [Integer(0)]
p2, q2 = [Integer(0)], [Integer(1)]
while len(l):
b = 0
while l[b]==0:
b += 1
if b == len(l):
return
m = [Integer(1)/l[b]]
for k in range(b+1, len(l)):
s = 0
for j in range(b, k):
s -= l[j+1] * m[b-j-1]
m.append(s/l[b])
l = m
a, l[0] = l[0], 0
p = [0] * max(len(p2), b+len(p1))
q = [0] * max(len(q2), b+len(q1))
for k in range(len(p2)):
p[k] = a*p2[k]
for k in range(b, b+len(p1)):
p[k] += p1[k-b]
for k in range(len(q2)):
q[k] = a*q2[k]
for k in range(b, b+len(q1)):
q[k] += q1[k-b]
while p[-1]==0: p.pop()
while q[-1]==0: q.pop()
p1, p2 = p2, p
q1, q2 = q2, q
# yield result
from sympy import denom, lcm, simplify as simp
c = 1
for x in p:
c = lcm(c, denom(x))
for x in q:
c = lcm(c, denom(x))
out = ( sum(c*e*X**k for k, e in enumerate(p))
/ sum(c*e*X**k for k, e in enumerate(q)) )
if simplify: yield(simp(out))
else: yield out
return
|
d37dcd27945ef004b87aaf9c8667dcb77f8bcf6bc1a9d16a5ed9eb6f1b304f1d | """
Contains the base class for series
Made using sequences in mind
"""
from sympy.core.expr import Expr
from sympy.core.singleton import S
from sympy.core.cache import cacheit
class SeriesBase(Expr):
"""Base Class for series"""
@property
def interval(self):
"""The interval on which the series is defined"""
raise NotImplementedError("(%s).interval" % self)
@property
def start(self):
"""The starting point of the series. This point is included"""
raise NotImplementedError("(%s).start" % self)
@property
def stop(self):
"""The ending point of the series. This point is included"""
raise NotImplementedError("(%s).stop" % self)
@property
def length(self):
"""Length of the series expansion"""
raise NotImplementedError("(%s).length" % self)
@property
def variables(self):
"""Returns a tuple of variables that are bounded"""
return ()
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
"""
return ({j for i in self.args for j in i.free_symbols}
.difference(self.variables))
@cacheit
def term(self, pt):
"""Term at point pt of a series"""
if pt < self.start or pt > self.stop:
raise IndexError("Index %s out of bounds %s" % (pt, self.interval))
return self._eval_term(pt)
def _eval_term(self, pt):
raise NotImplementedError("The _eval_term method should be added to"
"%s to return series term so it is available"
"when 'term' calls it."
% self.func)
def _ith_point(self, i):
"""
Returns the i'th point of a series
If start point is negative infinity, point is returned from the end.
Assumes the first point to be indexed zero.
Examples
========
TODO
"""
if self.start is S.NegativeInfinity:
initial = self.stop
step = -1
else:
initial = self.start
step = 1
return initial + i*step
def __iter__(self):
i = 0
while i < self.length:
pt = self._ith_point(i)
yield self.term(pt)
i += 1
def __getitem__(self, index):
if isinstance(index, int):
index = self._ith_point(index)
return self.term(index)
elif isinstance(index, slice):
start, stop = index.start, index.stop
if start is None:
start = 0
if stop is None:
stop = self.length
return [self.term(self._ith_point(i)) for i in
range(start, stop, index.step or 1)]
|
72b4a440ad7003a15000a2358ac946eaba40cef5a5cf581964fc76030a78dd08 | from sympy.core.sympify import sympify
def series(expr, x=None, x0=0, n=6, dir="+"):
"""Series expansion of expr around point `x = x0`.
Parameters
==========
expr : Expression
The expression whose series is to be expanded.
x : Symbol
It is the variable of the expression to be calculated.
x0 : Value
The value around which ``x`` is calculated. Can be any value
from ``-oo`` to ``oo``.
n : Value
The number of terms upto which the series is to be expanded.
dir : String, optional
The series-expansion can be bi-directional. If ``dir="+"``,
then (x->x0+). If ``dir="-", then (x->x0-). For infinite
``x0`` (``oo`` or ``-oo``), the ``dir`` argument is determined
from the direction of the infinity (i.e., ``dir="-"`` for
``oo``).
Examples
========
>>> from sympy import series, tan, oo
>>> from sympy.abc import x
>>> f = tan(x)
>>> series(f, x, 2, 6, "+")
tan(2) + (1 + tan(2)**2)*(x - 2) + (x - 2)**2*(tan(2)**3 + tan(2)) +
(x - 2)**3*(1/3 + 4*tan(2)**2/3 + tan(2)**4) + (x - 2)**4*(tan(2)**5 +
5*tan(2)**3/3 + 2*tan(2)/3) + (x - 2)**5*(2/15 + 17*tan(2)**2/15 +
2*tan(2)**4 + tan(2)**6) + O((x - 2)**6, (x, 2))
>>> series(f, x, 2, 3, "-")
tan(2) + (2 - x)*(-tan(2)**2 - 1) + (2 - x)**2*(tan(2)**3 + tan(2))
+ O((x - 2)**3, (x, 2))
>>> series(f, x, 2, oo, "+")
Traceback (most recent call last):
...
TypeError: 'Infinity' object cannot be interpreted as an integer
Returns
=======
Expr
Series expansion of the expression about x0
See Also
========
sympy.core.expr.Expr.series: See the docstring of Expr.series() for complete details of this wrapper.
"""
expr = sympify(expr)
return expr.series(x, x0, n, dir)
|
7075464506b36b86b2a88ea38ba38736475010c57b064a387f8bee8366ee1106 | from sympy.core.sympify import sympify
def aseries(expr, x=None, n=6, bound=0, hir=False):
"""
See the docstring of Expr.aseries() for complete details of this wrapper.
"""
expr = sympify(expr)
return expr.aseries(x, n, bound, hir)
|
ecbf10de75ef3b36145a643cbaec3bd045061e5f9afb83f83953214eb1a9337d | """
Convergence acceleration / extrapolation methods for series and
sequences.
References:
Carl M. Bender & Steven A. Orszag, "Advanced Mathematical Methods for
Scientists and Engineers: Asymptotic Methods and Perturbation Theory",
Springer 1999. (Shanks transformation: pp. 368-375, Richardson
extrapolation: pp. 375-377.)
"""
from sympy import factorial, Integer, S
def richardson(A, k, n, N):
"""
Calculate an approximation for lim k->oo A(k) using Richardson
extrapolation with the terms A(n), A(n+1), ..., A(n+N+1).
Choosing N ~= 2*n often gives good results.
A simple example is to calculate exp(1) using the limit definition.
This limit converges slowly; n = 100 only produces two accurate
digits:
>>> from sympy.abc import n
>>> e = (1 + 1/n)**n
>>> print(round(e.subs(n, 100).evalf(), 10))
2.7048138294
Richardson extrapolation with 11 appropriately chosen terms gives
a value that is accurate to the indicated precision:
>>> from sympy import E
>>> from sympy.series.acceleration import richardson
>>> print(round(richardson(e, n, 10, 20).evalf(), 10))
2.7182818285
>>> print(round(E.evalf(), 10))
2.7182818285
Another useful application is to speed up convergence of series.
Computing 100 terms of the zeta(2) series 1/k**2 yields only
two accurate digits:
>>> from sympy.abc import k, n
>>> from sympy import Sum
>>> A = Sum(k**-2, (k, 1, n))
>>> print(round(A.subs(n, 100).evalf(), 10))
1.6349839002
Richardson extrapolation performs much better:
>>> from sympy import pi
>>> print(round(richardson(A, n, 10, 20).evalf(), 10))
1.6449340668
>>> print(round(((pi**2)/6).evalf(), 10)) # Exact value
1.6449340668
"""
s = S.Zero
for j in range(0, N + 1):
s += A.subs(k, Integer(n + j)).doit() * (n + j)**N * (-1)**(j + N) / \
(factorial(j) * factorial(N - j))
return s
def shanks(A, k, n, m=1):
"""
Calculate an approximation for lim k->oo A(k) using the n-term Shanks
transformation S(A)(n). With m > 1, calculate the m-fold recursive
Shanks transformation S(S(...S(A)...))(n).
The Shanks transformation is useful for summing Taylor series that
converge slowly near a pole or singularity, e.g. for log(2):
>>> from sympy.abc import k, n
>>> from sympy import Sum, Integer
>>> from sympy.series.acceleration import shanks
>>> A = Sum(Integer(-1)**(k+1) / k, (k, 1, n))
>>> print(round(A.subs(n, 100).doit().evalf(), 10))
0.6881721793
>>> print(round(shanks(A, n, 25).evalf(), 10))
0.6931396564
>>> print(round(shanks(A, n, 25, 5).evalf(), 10))
0.6931471806
The correct value is 0.6931471805599453094172321215.
"""
table = [A.subs(k, Integer(j)).doit() for j in range(n + m + 2)]
table2 = table[:]
for i in range(1, m + 1):
for j in range(i, n + m + 1):
x, y, z = table[j - 1], table[j], table[j + 1]
table2[j] = (z*x - y**2) / (z + x - 2*y)
table = table2[:]
return table[n]
|
7e6503609ee98d51720d7e261f57f930c5a147cf7bbcad96b064446db53698d3 | """
Limits
======
Implemented according to the PhD thesis
http://www.cybertester.com/data/gruntz.pdf, which contains very thorough
descriptions of the algorithm including many examples. We summarize here
the gist of it.
All functions are sorted according to how rapidly varying they are at
infinity using the following rules. Any two functions f and g can be
compared using the properties of L:
L=lim log|f(x)| / log|g(x)| (for x -> oo)
We define >, < ~ according to::
1. f > g .... L=+-oo
we say that:
- f is greater than any power of g
- f is more rapidly varying than g
- f goes to infinity/zero faster than g
2. f < g .... L=0
we say that:
- f is lower than any power of g
3. f ~ g .... L!=0, +-oo
we say that:
- both f and g are bounded from above and below by suitable integral
powers of the other
Examples
========
::
2 < x < exp(x) < exp(x**2) < exp(exp(x))
2 ~ 3 ~ -5
x ~ x**2 ~ x**3 ~ 1/x ~ x**m ~ -x
exp(x) ~ exp(-x) ~ exp(2x) ~ exp(x)**2 ~ exp(x+exp(-x))
f ~ 1/f
So we can divide all the functions into comparability classes (x and x^2
belong to one class, exp(x) and exp(-x) belong to some other class). In
principle, we could compare any two functions, but in our algorithm, we
don't compare anything below the class 2~3~-5 (for example log(x) is
below this), so we set 2~3~-5 as the lowest comparability class.
Given the function f, we find the list of most rapidly varying (mrv set)
subexpressions of it. This list belongs to the same comparability class.
Let's say it is {exp(x), exp(2x)}. Using the rule f ~ 1/f we find an
element "w" (either from the list or a new one) from the same
comparability class which goes to zero at infinity. In our example we
set w=exp(-x) (but we could also set w=exp(-2x) or w=exp(-3x) ...). We
rewrite the mrv set using w, in our case {1/w, 1/w^2}, and substitute it
into f. Then we expand f into a series in w::
f = c0*w^e0 + c1*w^e1 + ... + O(w^en), where e0<e1<...<en, c0!=0
but for x->oo, lim f = lim c0*w^e0, because all the other terms go to zero,
because w goes to zero faster than the ci and ei. So::
for e0>0, lim f = 0
for e0<0, lim f = +-oo (the sign depends on the sign of c0)
for e0=0, lim f = lim c0
We need to recursively compute limits at several places of the algorithm, but
as is shown in the PhD thesis, it always finishes.
Important functions from the implementation:
compare(a, b, x) compares "a" and "b" by computing the limit L.
mrv(e, x) returns list of most rapidly varying (mrv) subexpressions of "e"
rewrite(e, Omega, x, wsym) rewrites "e" in terms of w
leadterm(f, x) returns the lowest power term in the series of f
mrv_leadterm(e, x) returns the lead term (c0, e0) for e
limitinf(e, x) computes lim e (for x->oo)
limit(e, z, z0) computes any limit by converting it to the case x->oo
All the functions are really simple and straightforward except
rewrite(), which is the most difficult/complex part of the algorithm.
When the algorithm fails, the bugs are usually in the series expansion
(i.e. in SymPy) or in rewrite.
This code is almost exact rewrite of the Maple code inside the Gruntz
thesis.
Debugging
---------
Because the gruntz algorithm is highly recursive, it's difficult to
figure out what went wrong inside a debugger. Instead, turn on nice
debug prints by defining the environment variable SYMPY_DEBUG. For
example:
[user@localhost]: SYMPY_DEBUG=True ./bin/isympy
In [1]: limit(sin(x)/x, x, 0)
limitinf(_x*sin(1/_x), _x) = 1
+-mrv_leadterm(_x*sin(1/_x), _x) = (1, 0)
| +-mrv(_x*sin(1/_x), _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| | +-mrv(sin(1/_x), _x) = set([_x])
| | +-mrv(1/_x, _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| +-mrv_leadterm(exp(_x)*sin(exp(-_x)), _x, set([exp(_x)])) = (1, 0)
| +-rewrite(exp(_x)*sin(exp(-_x)), set([exp(_x)]), _x, _w) = (1/_w*sin(_w), -_x)
| +-sign(_x, _x) = 1
| +-mrv_leadterm(1, _x) = (1, 0)
+-sign(0, _x) = 0
+-limitinf(1, _x) = 1
And check manually which line is wrong. Then go to the source code and
debug this function to figure out the exact problem.
"""
from sympy import cacheit
from sympy.core import Basic, S, oo, I, Dummy, Wild, Mul
from sympy.core.compatibility import reduce
from sympy.functions import log, exp
from sympy.series.order import Order
from sympy.simplify.powsimp import powsimp, powdenest
from sympy.utilities.misc import debug_decorator as debug
from sympy.utilities.timeutils import timethis
timeit = timethis('gruntz')
def compare(a, b, x):
"""Returns "<" if a<b, "=" for a == b, ">" for a>b"""
# log(exp(...)) must always be simplified here for termination
la, lb = log(a), log(b)
if isinstance(a, Basic) and isinstance(a, exp):
la = a.args[0]
if isinstance(b, Basic) and isinstance(b, exp):
lb = b.args[0]
c = limitinf(la/lb, x)
if c == 0:
return "<"
elif c.is_infinite:
return ">"
else:
return "="
class SubsSet(dict):
"""
Stores (expr, dummy) pairs, and how to rewrite expr-s.
The gruntz algorithm needs to rewrite certain expressions in term of a new
variable w. We cannot use subs, because it is just too smart for us. For
example::
> Omega=[exp(exp(_p - exp(-_p))/(1 - 1/_p)), exp(exp(_p))]
> O2=[exp(-exp(_p) + exp(-exp(-_p))*exp(_p)/(1 - 1/_p))/_w, 1/_w]
> e = exp(exp(_p - exp(-_p))/(1 - 1/_p)) - exp(exp(_p))
> e.subs(Omega[0],O2[0]).subs(Omega[1],O2[1])
-1/w + exp(exp(p)*exp(-exp(-p))/(1 - 1/p))
is really not what we want!
So we do it the hard way and keep track of all the things we potentially
want to substitute by dummy variables. Consider the expression::
exp(x - exp(-x)) + exp(x) + x.
The mrv set is {exp(x), exp(-x), exp(x - exp(-x))}.
We introduce corresponding dummy variables d1, d2, d3 and rewrite::
d3 + d1 + x.
This class first of all keeps track of the mapping expr->variable, i.e.
will at this stage be a dictionary::
{exp(x): d1, exp(-x): d2, exp(x - exp(-x)): d3}.
[It turns out to be more convenient this way round.]
But sometimes expressions in the mrv set have other expressions from the
mrv set as subexpressions, and we need to keep track of that as well. In
this case, d3 is really exp(x - d2), so rewrites at this stage is::
{d3: exp(x-d2)}.
The function rewrite uses all this information to correctly rewrite our
expression in terms of w. In this case w can be chosen to be exp(-x),
i.e. d2. The correct rewriting then is::
exp(-w)/w + 1/w + x.
"""
def __init__(self):
self.rewrites = {}
def __repr__(self):
return super().__repr__() + ', ' + self.rewrites.__repr__()
def __getitem__(self, key):
if not key in self:
self[key] = Dummy()
return dict.__getitem__(self, key)
def do_subs(self, e):
"""Substitute the variables with expressions"""
for expr, var in self.items():
e = e.xreplace({var: expr})
return e
def meets(self, s2):
"""Tell whether or not self and s2 have non-empty intersection"""
return set(self.keys()).intersection(list(s2.keys())) != set()
def union(self, s2, exps=None):
"""Compute the union of self and s2, adjusting exps"""
res = self.copy()
tr = {}
for expr, var in s2.items():
if expr in self:
if exps:
exps = exps.xreplace({var: res[expr]})
tr[var] = res[expr]
else:
res[expr] = var
for var, rewr in s2.rewrites.items():
res.rewrites[var] = rewr.xreplace(tr)
return res, exps
def copy(self):
"""Create a shallow copy of SubsSet"""
r = SubsSet()
r.rewrites = self.rewrites.copy()
for expr, var in self.items():
r[expr] = var
return r
@debug
def mrv(e, x):
"""Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e',
and e rewritten in terms of these"""
e = powsimp(e, deep=True, combine='exp')
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if not e.has(x):
return SubsSet(), e
elif e == x:
s = SubsSet()
return s, s[x]
elif e.is_Mul or e.is_Add:
i, d = e.as_independent(x) # throw away x-independent terms
if d.func != e.func:
s, expr = mrv(d, x)
return s, e.func(i, expr)
a, b = d.as_two_terms()
s1, e1 = mrv(a, x)
s2, e2 = mrv(b, x)
return mrv_max1(s1, s2, e.func(i, e1, e2), x)
elif e.is_Pow:
e1 = S.One
while e.is_Pow:
b1 = e.base
e1 *= e.exp
e = b1
if b1 == 1:
return SubsSet(), b1
if e1.has(x):
base_lim = limitinf(b1, x)
if base_lim is S.One:
return mrv(exp(e1 * (b1 - 1)), x)
return mrv(exp(e1 * log(b1)), x)
else:
s, expr = mrv(b1, x)
return s, expr**e1
elif isinstance(e, log):
s, expr = mrv(e.args[0], x)
return s, log(expr)
elif isinstance(e, exp):
# We know from the theory of this algorithm that exp(log(...)) may always
# be simplified here, and doing so is vital for termination.
if isinstance(e.args[0], log):
return mrv(e.args[0].args[0], x)
# if a product has an infinite factor the result will be
# infinite if there is no zero, otherwise NaN; here, we
# consider the result infinite if any factor is infinite
li = limitinf(e.args[0], x)
if any(_.is_infinite for _ in Mul.make_args(li)):
s1 = SubsSet()
e1 = s1[e]
s2, e2 = mrv(e.args[0], x)
su = s1.union(s2)[0]
su.rewrites[e1] = exp(e2)
return mrv_max3(s1, e1, s2, exp(e2), su, e1, x)
else:
s, expr = mrv(e.args[0], x)
return s, exp(expr)
elif e.is_Function:
l = [mrv(a, x) for a in e.args]
l2 = [s for (s, _) in l if s != SubsSet()]
if len(l2) != 1:
# e.g. something like BesselJ(x, x)
raise NotImplementedError("MRV set computation for functions in"
" several variables not implemented.")
s, ss = l2[0], SubsSet()
args = [ss.do_subs(x[1]) for x in l]
return s, e.func(*args)
elif e.is_Derivative:
raise NotImplementedError("MRV set computation for derviatives"
" not implemented yet.")
return mrv(e.args[0], x)
raise NotImplementedError(
"Don't know how to calculate the mrv of '%s'" % e)
def mrv_max3(f, expsf, g, expsg, union, expsboth, x):
"""Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. max() compares (two elements of)
f and g and returns either (f, expsf) [if f is larger], (g, expsg)
[if g is larger] or (union, expsboth) [if f, g are of the same class].
"""
if not isinstance(f, SubsSet):
raise TypeError("f should be an instance of SubsSet")
if not isinstance(g, SubsSet):
raise TypeError("g should be an instance of SubsSet")
if f == SubsSet():
return g, expsg
elif g == SubsSet():
return f, expsf
elif f.meets(g):
return union, expsboth
c = compare(list(f.keys())[0], list(g.keys())[0], x)
if c == ">":
return f, expsf
elif c == "<":
return g, expsg
else:
if c != "=":
raise ValueError("c should be =")
return union, expsboth
def mrv_max1(f, g, exps, x):
"""Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. mrv_max1() compares (two elements of)
f and g and returns the set, which is in the higher comparability class
of the union of both, if they have the same order of variation.
Also returns exps, with the appropriate substitutions made.
"""
u, b = f.union(g, exps)
return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),
u, b, x)
@debug
@cacheit
@timeit
def sign(e, x):
"""
Returns a sign of an expression e(x) for x->oo.
::
e > 0 for x sufficiently large ... 1
e == 0 for x sufficiently large ... 0
e < 0 for x sufficiently large ... -1
The result of this function is currently undefined if e changes sign
arbitrarily often for arbitrarily large x (e.g. sin(x)).
Note that this returns zero only if e is *constantly* zero
for x sufficiently large. [If e is constant, of course, this is just
the same thing as the sign of e.]
"""
from sympy import sign as _sign
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if e.is_positive:
return 1
elif e.is_negative:
return -1
elif e.is_zero:
return 0
elif not e.has(x):
return _sign(e)
elif e == x:
return 1
elif e.is_Mul:
a, b = e.as_two_terms()
sa = sign(a, x)
if not sa:
return 0
return sa * sign(b, x)
elif isinstance(e, exp):
return 1
elif e.is_Pow:
s = sign(e.base, x)
if s == 1:
return 1
if e.exp.is_Integer:
return s**e.exp
elif isinstance(e, log):
return sign(e.args[0] - 1, x)
# if all else fails, do it the hard way
c0, e0 = mrv_leadterm(e, x)
return sign(c0, x)
@debug
@timeit
@cacheit
def limitinf(e, x, leadsimp=False):
"""Limit e(x) for x-> oo.
If ``leadsimp`` is True, an attempt is made to simplify the leading
term of the series expansion of ``e``. That may succeed even if
``e`` cannot be simplified.
"""
# rewrite e in terms of tractable functions only
if not e.has(x):
return e # e is a constant
if e.has(Order):
e = e.expand().removeO()
if not x.is_positive or x.is_integer:
# We make sure that x.is_positive is True and x.is_integer is None
# so we get all the correct mathematical behavior from the expression.
# We need a fresh variable.
p = Dummy('p', positive=True)
e = e.subs(x, p)
x = p
e = e.rewrite('tractable', deep=True, limitvar=x)
e = powdenest(e)
c0, e0 = mrv_leadterm(e, x)
sig = sign(e0, x)
if sig == 1:
return S.Zero # e0>0: lim f = 0
elif sig == -1: # e0<0: lim f = +-oo (the sign depends on the sign of c0)
if c0.match(I*Wild("a", exclude=[I])):
return c0*oo
s = sign(c0, x)
# the leading term shouldn't be 0:
if s == 0:
raise ValueError("Leading term should not be 0")
return s*oo
elif sig == 0:
if leadsimp:
c0 = c0.simplify()
return limitinf(c0, x, leadsimp) # e0=0: lim f = lim c0
else:
raise ValueError("{} could not be evaluated".format(sig))
def moveup2(s, x):
r = SubsSet()
for expr, var in s.items():
r[expr.xreplace({x: exp(x)})] = var
for var, expr in s.rewrites.items():
r.rewrites[var] = s.rewrites[var].xreplace({x: exp(x)})
return r
def moveup(l, x):
return [e.xreplace({x: exp(x)}) for e in l]
@debug
@timeit
def calculate_series(e, x, logx=None):
""" Calculates at least one term of the series of "e" in "x".
This is a place that fails most often, so it is in its own function.
"""
from sympy.polys import cancel
for t in e.lseries(x, logx=logx):
t = cancel(t)
if t.has(exp) and t.has(log):
t = powdenest(t)
if t.simplify():
break
return t
@debug
@timeit
@cacheit
def mrv_leadterm(e, x):
"""Returns (c0, e0) for e."""
Omega = SubsSet()
if not e.has(x):
return (e, S.Zero)
if Omega == SubsSet():
Omega, exps = mrv(e, x)
if not Omega:
# e really does not depend on x after simplification
return exps, S.Zero
if x in Omega:
# move the whole omega up (exponentiate each term):
Omega_up = moveup2(Omega, x)
e_up = moveup([e], x)[0]
exps_up = moveup([exps], x)[0]
# NOTE: there is no need to move this down!
e = e_up
Omega = Omega_up
exps = exps_up
#
# The positive dummy, w, is used here so log(w*2) etc. will expand;
# a unique dummy is needed in this algorithm
#
# For limits of complex functions, the algorithm would have to be
# improved, or just find limits of Re and Im components separately.
#
w = Dummy("w", real=True, positive=True)
f, logw = rewrite(exps, Omega, x, w)
series = calculate_series(f, w, logx=logw)
return series.leadterm(w)
def build_expression_tree(Omega, rewrites):
r""" Helper function for rewrite.
We need to sort Omega (mrv set) so that we replace an expression before
we replace any expression in terms of which it has to be rewritten::
e1 ---> e2 ---> e3
\
-> e4
Here we can do e1, e2, e3, e4 or e1, e2, e4, e3.
To do this we assemble the nodes into a tree, and sort them by height.
This function builds the tree, rewrites then sorts the nodes.
"""
class Node:
def ht(self):
return reduce(lambda x, y: x + y,
[x.ht() for x in self.before], 1)
nodes = {}
for expr, v in Omega:
n = Node()
n.before = []
n.var = v
n.expr = expr
nodes[v] = n
for _, v in Omega:
if v in rewrites:
n = nodes[v]
r = rewrites[v]
for _, v2 in Omega:
if r.has(v2):
n.before.append(nodes[v2])
return nodes
@debug
@timeit
def rewrite(e, Omega, x, wsym):
"""e(x) ... the function
Omega ... the mrv set
wsym ... the symbol which is going to be used for w
Returns the rewritten e in terms of w and log(w). See test_rewrite1()
for examples and correct results.
"""
from sympy import ilcm
if not isinstance(Omega, SubsSet):
raise TypeError("Omega should be an instance of SubsSet")
if len(Omega) == 0:
raise ValueError("Length can not be 0")
# all items in Omega must be exponentials
for t in Omega.keys():
if not isinstance(t, exp):
raise ValueError("Value should be exp")
rewrites = Omega.rewrites
Omega = list(Omega.items())
nodes = build_expression_tree(Omega, rewrites)
Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True)
# make sure we know the sign of each exp() term; after the loop,
# g is going to be the "w" - the simplest one in the mrv set
for g, _ in Omega:
sig = sign(g.args[0], x)
if sig != 1 and sig != -1:
raise NotImplementedError('Result depends on the sign of %s' % sig)
if sig == 1:
wsym = 1/wsym # if g goes to oo, substitute 1/w
# O2 is a list, which results by rewriting each item in Omega using "w"
O2 = []
denominators = []
for f, var in Omega:
c = limitinf(f.args[0]/g.args[0], x)
if c.is_Rational:
denominators.append(c.q)
arg = f.args[0]
if var in rewrites:
if not isinstance(rewrites[var], exp):
raise ValueError("Value should be exp")
arg = rewrites[var].args[0]
O2.append((var, exp((arg - c*g.args[0]).expand())*wsym**c))
# Remember that Omega contains subexpressions of "e". So now we find
# them in "e" and substitute them for our rewriting, stored in O2
# the following powsimp is necessary to automatically combine exponentials,
# so that the .xreplace() below succeeds:
# TODO this should not be necessary
f = powsimp(e, deep=True, combine='exp')
for a, b in O2:
f = f.xreplace({a: b})
for _, var in Omega:
assert not f.has(var)
# finally compute the logarithm of w (logw).
logw = g.args[0]
if sig == 1:
logw = -logw # log(w)->log(1/w)=-log(w)
# Some parts of sympy have difficulty computing series expansions with
# non-integral exponents. The following heuristic improves the situation:
exponent = reduce(ilcm, denominators, 1)
f = f.subs({wsym: wsym**exponent})
logw /= exponent
return f, logw
def gruntz(e, z, z0, dir="+"):
"""
Compute the limit of e(z) at the point z0 using the Gruntz algorithm.
z0 can be any expression, including oo and -oo.
For dir="+" (default) it calculates the limit from the right
(z->z0+) and for dir="-" the limit from the left (z->z0-). For infinite z0
(oo or -oo), the dir argument doesn't matter.
This algorithm is fully described in the module docstring in the gruntz.py
file. It relies heavily on the series expansion. Most frequently, gruntz()
is only used if the faster limit() function (which uses heuristics) fails.
"""
if not z.is_symbol:
raise NotImplementedError("Second argument must be a Symbol")
# convert all limits to the limit z->oo; sign of z is handled in limitinf
r = None
if z0 == oo:
e0 = e
elif z0 == -oo:
e0 = e.subs(z, -z)
else:
if str(dir) == "-":
e0 = e.subs(z, z0 - 1/z)
elif str(dir) == "+":
e0 = e.subs(z, z0 + 1/z)
else:
raise NotImplementedError("dir must be '+' or '-'")
try:
r = limitinf(e0, z)
except ValueError:
r = limitinf(e0, z, leadsimp=True)
# This is a bit of a heuristic for nice results... we always rewrite
# tractable functions in terms of familiar intractable ones.
# It might be nicer to rewrite the exactly to what they were initially,
# but that would take some work to implement.
return r.rewrite('intractable', deep=True)
|
7fb8f5541de050478d729516884b9bac61c2cf5e21939ec52bb18a2b00bd036e | from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core.compatibility import is_sequence, iterable, ordered
from sympy.core.containers import Tuple
from sympy.core.decorators import call_highest_priority
from sympy.core.parameters import global_parameters
from sympy.core.function import AppliedUndef
from sympy.core.mul import Mul
from sympy.core.numbers import Integer
from sympy.core.relational import Eq
from sympy.core.singleton import S, Singleton
from sympy.core.symbol import Dummy, Symbol, Wild
from sympy.core.sympify import sympify
from sympy.polys import lcm, factor
from sympy.sets.sets import Interval, Intersection
from sympy.simplify import simplify
from sympy.tensor.indexed import Idx
from sympy.utilities.iterables import flatten
from sympy import expand
###############################################################################
# SEQUENCES #
###############################################################################
class SeqBase(Basic):
"""Base class for sequences"""
is_commutative = True
_op_priority = 15
@staticmethod
def _start_key(expr):
"""Return start (if possible) else S.Infinity.
adapted from Set._infimum_key
"""
try:
start = expr.start
except (NotImplementedError,
AttributeError, ValueError):
start = S.Infinity
return start
def _intersect_interval(self, other):
"""Returns start and stop.
Takes intersection over the two intervals.
"""
interval = Intersection(self.interval, other.interval)
return interval.inf, interval.sup
@property
def gen(self):
"""Returns the generator for the sequence"""
raise NotImplementedError("(%s).gen" % self)
@property
def interval(self):
"""The interval on which the sequence is defined"""
raise NotImplementedError("(%s).interval" % self)
@property
def start(self):
"""The starting point of the sequence. This point is included"""
raise NotImplementedError("(%s).start" % self)
@property
def stop(self):
"""The ending point of the sequence. This point is included"""
raise NotImplementedError("(%s).stop" % self)
@property
def length(self):
"""Length of the sequence"""
raise NotImplementedError("(%s).length" % self)
@property
def variables(self):
"""Returns a tuple of variables that are bounded"""
return ()
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n, m
>>> SeqFormula(m*n**2, (n, 0, 5)).free_symbols
{m}
"""
return ({j for i in self.args for j in i.free_symbols
.difference(self.variables)})
@cacheit
def coeff(self, pt):
"""Returns the coefficient at point pt"""
if pt < self.start or pt > self.stop:
raise IndexError("Index %s out of bounds %s" % (pt, self.interval))
return self._eval_coeff(pt)
def _eval_coeff(self, pt):
raise NotImplementedError("The _eval_coeff method should be added to"
"%s to return coefficient so it is available"
"when coeff calls it."
% self.func)
def _ith_point(self, i):
"""Returns the i'th point of a sequence.
If start point is negative infinity, point is returned from the end.
Assumes the first point to be indexed zero.
Examples
=========
>>> from sympy import oo
>>> from sympy.series.sequences import SeqPer
bounded
>>> SeqPer((1, 2, 3), (-10, 10))._ith_point(0)
-10
>>> SeqPer((1, 2, 3), (-10, 10))._ith_point(5)
-5
End is at infinity
>>> SeqPer((1, 2, 3), (0, oo))._ith_point(5)
5
Starts at negative infinity
>>> SeqPer((1, 2, 3), (-oo, 0))._ith_point(5)
-5
"""
if self.start is S.NegativeInfinity:
initial = self.stop
else:
initial = self.start
if self.start is S.NegativeInfinity:
step = -1
else:
step = 1
return initial + i*step
def _add(self, other):
"""
Should only be used internally.
self._add(other) returns a new, term-wise added sequence if self
knows how to add with other, otherwise it returns ``None``.
``other`` should only be a sequence object.
Used within :class:`SeqAdd` class.
"""
return None
def _mul(self, other):
"""
Should only be used internally.
self._mul(other) returns a new, term-wise multiplied sequence if self
knows how to multiply with other, otherwise it returns ``None``.
``other`` should only be a sequence object.
Used within :class:`SeqMul` class.
"""
return None
def coeff_mul(self, other):
"""
Should be used when ``other`` is not a sequence. Should be
defined to define custom behaviour.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> SeqFormula(n**2).coeff_mul(2)
SeqFormula(2*n**2, (n, 0, oo))
Notes
=====
'*' defines multiplication of sequences with sequences only.
"""
return Mul(self, other)
def __add__(self, other):
"""Returns the term-wise addition of 'self' and 'other'.
``other`` should be a sequence.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> SeqFormula(n**2) + SeqFormula(n**3)
SeqFormula(n**3 + n**2, (n, 0, oo))
"""
if not isinstance(other, SeqBase):
raise TypeError('cannot add sequence and %s' % type(other))
return SeqAdd(self, other)
@call_highest_priority('__add__')
def __radd__(self, other):
return self + other
def __sub__(self, other):
"""Returns the term-wise subtraction of 'self' and 'other'.
``other`` should be a sequence.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> SeqFormula(n**2) - (SeqFormula(n))
SeqFormula(n**2 - n, (n, 0, oo))
"""
if not isinstance(other, SeqBase):
raise TypeError('cannot subtract sequence and %s' % type(other))
return SeqAdd(self, -other)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return (-self) + other
def __neg__(self):
"""Negates the sequence.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> -SeqFormula(n**2)
SeqFormula(-n**2, (n, 0, oo))
"""
return self.coeff_mul(-1)
def __mul__(self, other):
"""Returns the term-wise multiplication of 'self' and 'other'.
``other`` should be a sequence. For ``other`` not being a
sequence see :func:`coeff_mul` method.
Examples
========
>>> from sympy import SeqFormula
>>> from sympy.abc import n
>>> SeqFormula(n**2) * (SeqFormula(n))
SeqFormula(n**3, (n, 0, oo))
"""
if not isinstance(other, SeqBase):
raise TypeError('cannot multiply sequence and %s' % type(other))
return SeqMul(self, other)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return self * other
def __iter__(self):
for i in range(self.length):
pt = self._ith_point(i)
yield self.coeff(pt)
def __getitem__(self, index):
if isinstance(index, int):
index = self._ith_point(index)
return self.coeff(index)
elif isinstance(index, slice):
start, stop = index.start, index.stop
if start is None:
start = 0
if stop is None:
stop = self.length
return [self.coeff(self._ith_point(i)) for i in
range(start, stop, index.step or 1)]
def find_linear_recurrence(self,n,d=None,gfvar=None):
r"""
Finds the shortest linear recurrence that satisfies the first n
terms of sequence of order `\leq` n/2 if possible.
If d is specified, find shortest linear recurrence of order
`\leq` min(d, n/2) if possible.
Returns list of coefficients ``[b(1), b(2), ...]`` corresponding to the
recurrence relation ``x(n) = b(1)*x(n-1) + b(2)*x(n-2) + ...``
Returns ``[]`` if no recurrence is found.
If gfvar is specified, also returns ordinary generating function as a
function of gfvar.
Examples
========
>>> from sympy import sequence, sqrt, oo, lucas
>>> from sympy.abc import n, x, y
>>> sequence(n**2).find_linear_recurrence(10, 2)
[]
>>> sequence(n**2).find_linear_recurrence(10)
[3, -3, 1]
>>> sequence(2**n).find_linear_recurrence(10)
[2]
>>> sequence(23*n**4+91*n**2).find_linear_recurrence(10)
[5, -10, 10, -5, 1]
>>> sequence(sqrt(5)*(((1 + sqrt(5))/2)**n - (-(1 + sqrt(5))/2)**(-n))/5).find_linear_recurrence(10)
[1, 1]
>>> sequence(x+y*(-2)**(-n), (n, 0, oo)).find_linear_recurrence(30)
[1/2, 1/2]
>>> sequence(3*5**n + 12).find_linear_recurrence(20,gfvar=x)
([6, -5], 3*(5 - 21*x)/((x - 1)*(5*x - 1)))
>>> sequence(lucas(n)).find_linear_recurrence(15,gfvar=x)
([1, 1], (x - 2)/(x**2 + x - 1))
"""
from sympy.matrices import Matrix
x = [simplify(expand(t)) for t in self[:n]]
lx = len(x)
if d is None:
r = lx//2
else:
r = min(d,lx//2)
coeffs = []
for l in range(1, r+1):
l2 = 2*l
mlist = []
for k in range(l):
mlist.append(x[k:k+l])
m = Matrix(mlist)
if m.det() != 0:
y = simplify(m.LUsolve(Matrix(x[l:l2])))
if lx == l2:
coeffs = flatten(y[::-1])
break
mlist = []
for k in range(l,lx-l):
mlist.append(x[k:k+l])
m = Matrix(mlist)
if m*y == Matrix(x[l2:]):
coeffs = flatten(y[::-1])
break
if gfvar is None:
return coeffs
else:
l = len(coeffs)
if l == 0:
return [], None
else:
n, d = x[l-1]*gfvar**(l-1), 1 - coeffs[l-1]*gfvar**l
for i in range(l-1):
n += x[i]*gfvar**i
for j in range(l-i-1):
n -= coeffs[i]*x[j]*gfvar**(i+j+1)
d -= coeffs[i]*gfvar**(i+1)
return coeffs, simplify(factor(n)/factor(d))
class EmptySequence(SeqBase, metaclass=Singleton):
"""Represents an empty sequence.
The empty sequence is also available as a singleton as
``S.EmptySequence``.
Examples
========
>>> from sympy import EmptySequence, SeqPer
>>> from sympy.abc import x
>>> EmptySequence
EmptySequence
>>> SeqPer((1, 2), (x, 0, 10)) + EmptySequence
SeqPer((1, 2), (x, 0, 10))
>>> SeqPer((1, 2)) * EmptySequence
EmptySequence
>>> EmptySequence.coeff_mul(-1)
EmptySequence
"""
@property
def interval(self):
return S.EmptySet
@property
def length(self):
return S.Zero
def coeff_mul(self, coeff):
"""See docstring of SeqBase.coeff_mul"""
return self
def __iter__(self):
return iter([])
class SeqExpr(SeqBase):
"""Sequence expression class.
Various sequences should inherit from this class.
Examples
========
>>> from sympy.series.sequences import SeqExpr
>>> from sympy.abc import x
>>> s = SeqExpr((1, 2, 3), (x, 0, 10))
>>> s.gen
(1, 2, 3)
>>> s.interval
Interval(0, 10)
>>> s.length
11
See Also
========
sympy.series.sequences.SeqPer
sympy.series.sequences.SeqFormula
"""
@property
def gen(self):
return self.args[0]
@property
def interval(self):
return Interval(self.args[1][1], self.args[1][2])
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return self.stop - self.start + 1
@property
def variables(self):
return (self.args[1][0],)
class SeqPer(SeqExpr):
"""Represents a periodic sequence.
The elements are repeated after a given period.
Examples
========
>>> from sympy import SeqPer, oo
>>> from sympy.abc import k
>>> s = SeqPer((1, 2, 3), (0, 5))
>>> s.periodical
(1, 2, 3)
>>> s.period
3
For value at a particular point
>>> s.coeff(3)
1
supports slicing
>>> s[:]
[1, 2, 3, 1, 2, 3]
iterable
>>> list(s)
[1, 2, 3, 1, 2, 3]
sequence starts from negative infinity
>>> SeqPer((1, 2, 3), (-oo, 0))[0:6]
[1, 2, 3, 1, 2, 3]
Periodic formulas
>>> SeqPer((k, k**2, k**3), (k, 0, oo))[0:6]
[0, 1, 8, 3, 16, 125]
See Also
========
sympy.series.sequences.SeqFormula
"""
def __new__(cls, periodical, limits=None):
periodical = sympify(periodical)
def _find_x(periodical):
free = periodical.free_symbols
if len(periodical.free_symbols) == 1:
return free.pop()
else:
return Dummy('k')
x, start, stop = None, None, None
if limits is None:
x, start, stop = _find_x(periodical), 0, S.Infinity
if is_sequence(limits, Tuple):
if len(limits) == 3:
x, start, stop = limits
elif len(limits) == 2:
x = _find_x(periodical)
start, stop = limits
if not isinstance(x, (Symbol, Idx)) or start is None or stop is None:
raise ValueError('Invalid limits given: %s' % str(limits))
if start is S.NegativeInfinity and stop is S.Infinity:
raise ValueError("Both the start and end value"
"cannot be unbounded")
limits = sympify((x, start, stop))
if is_sequence(periodical, Tuple):
periodical = sympify(tuple(flatten(periodical)))
else:
raise ValueError("invalid period %s should be something "
"like e.g (1, 2) " % periodical)
if Interval(limits[1], limits[2]) is S.EmptySet:
return S.EmptySequence
return Basic.__new__(cls, periodical, limits)
@property
def period(self):
return len(self.gen)
@property
def periodical(self):
return self.gen
def _eval_coeff(self, pt):
if self.start is S.NegativeInfinity:
idx = (self.stop - pt) % self.period
else:
idx = (pt - self.start) % self.period
return self.periodical[idx].subs(self.variables[0], pt)
def _add(self, other):
"""See docstring of SeqBase._add"""
if isinstance(other, SeqPer):
per1, lper1 = self.periodical, self.period
per2, lper2 = other.periodical, other.period
per_length = lcm(lper1, lper2)
new_per = []
for x in range(per_length):
ele1 = per1[x % lper1]
ele2 = per2[x % lper2]
new_per.append(ele1 + ele2)
start, stop = self._intersect_interval(other)
return SeqPer(new_per, (self.variables[0], start, stop))
def _mul(self, other):
"""See docstring of SeqBase._mul"""
if isinstance(other, SeqPer):
per1, lper1 = self.periodical, self.period
per2, lper2 = other.periodical, other.period
per_length = lcm(lper1, lper2)
new_per = []
for x in range(per_length):
ele1 = per1[x % lper1]
ele2 = per2[x % lper2]
new_per.append(ele1 * ele2)
start, stop = self._intersect_interval(other)
return SeqPer(new_per, (self.variables[0], start, stop))
def coeff_mul(self, coeff):
"""See docstring of SeqBase.coeff_mul"""
coeff = sympify(coeff)
per = [x * coeff for x in self.periodical]
return SeqPer(per, self.args[1])
class SeqFormula(SeqExpr):
"""Represents sequence based on a formula.
Elements are generated using a formula.
Examples
========
>>> from sympy import SeqFormula, oo, Symbol
>>> n = Symbol('n')
>>> s = SeqFormula(n**2, (n, 0, 5))
>>> s.formula
n**2
For value at a particular point
>>> s.coeff(3)
9
supports slicing
>>> s[:]
[0, 1, 4, 9, 16, 25]
iterable
>>> list(s)
[0, 1, 4, 9, 16, 25]
sequence starts from negative infinity
>>> SeqFormula(n**2, (-oo, 0))[0:6]
[0, 1, 4, 9, 16, 25]
See Also
========
sympy.series.sequences.SeqPer
"""
def __new__(cls, formula, limits=None):
formula = sympify(formula)
def _find_x(formula):
free = formula.free_symbols
if len(free) == 1:
return free.pop()
elif not free:
return Dummy('k')
else:
raise ValueError(
" specify dummy variables for %s. If the formula contains"
" more than one free symbol, a dummy variable should be"
" supplied explicitly e.g., SeqFormula(m*n**2, (n, 0, 5))"
% formula)
x, start, stop = None, None, None
if limits is None:
x, start, stop = _find_x(formula), 0, S.Infinity
if is_sequence(limits, Tuple):
if len(limits) == 3:
x, start, stop = limits
elif len(limits) == 2:
x = _find_x(formula)
start, stop = limits
if not isinstance(x, (Symbol, Idx)) or start is None or stop is None:
raise ValueError('Invalid limits given: %s' % str(limits))
if start is S.NegativeInfinity and stop is S.Infinity:
raise ValueError("Both the start and end value "
"cannot be unbounded")
limits = sympify((x, start, stop))
if Interval(limits[1], limits[2]) is S.EmptySet:
return S.EmptySequence
return Basic.__new__(cls, formula, limits)
@property
def formula(self):
return self.gen
def _eval_coeff(self, pt):
d = self.variables[0]
return self.formula.subs(d, pt)
def _add(self, other):
"""See docstring of SeqBase._add"""
if isinstance(other, SeqFormula):
form1, v1 = self.formula, self.variables[0]
form2, v2 = other.formula, other.variables[0]
formula = form1 + form2.subs(v2, v1)
start, stop = self._intersect_interval(other)
return SeqFormula(formula, (v1, start, stop))
def _mul(self, other):
"""See docstring of SeqBase._mul"""
if isinstance(other, SeqFormula):
form1, v1 = self.formula, self.variables[0]
form2, v2 = other.formula, other.variables[0]
formula = form1 * form2.subs(v2, v1)
start, stop = self._intersect_interval(other)
return SeqFormula(formula, (v1, start, stop))
def coeff_mul(self, coeff):
"""See docstring of SeqBase.coeff_mul"""
coeff = sympify(coeff)
formula = self.formula * coeff
return SeqFormula(formula, self.args[1])
def expand(self, *args, **kwargs):
return SeqFormula(expand(self.formula, *args, **kwargs), self.args[1])
class RecursiveSeq(SeqBase):
"""A finite degree recursive sequence.
That is, a sequence a(n) that depends on a fixed, finite number of its
previous values. The general form is
a(n) = f(a(n - 1), a(n - 2), ..., a(n - d))
for some fixed, positive integer d, where f is some function defined by a
SymPy expression.
Parameters
==========
recurrence : SymPy expression defining recurrence
This is *not* an equality, only the expression that the nth term is
equal to. For example, if :code:`a(n) = f(a(n - 1), ..., a(n - d))`,
then the expression should be :code:`f(a(n - 1), ..., a(n - d))`.
yn : applied undefined function
Represents the nth term of the sequence as e.g. :code:`y(n)` where
:code:`y` is an undefined function and `n` is the sequence index.
n : symbolic argument
The name of the variable that the recurrence is in, e.g., :code:`n` if
the recurrence function is :code:`y(n)`.
initial : iterable with length equal to the degree of the recurrence
The initial values of the recurrence.
start : start value of sequence (inclusive)
Examples
========
>>> from sympy import Function, symbols
>>> from sympy.series.sequences import RecursiveSeq
>>> y = Function("y")
>>> n = symbols("n")
>>> fib = RecursiveSeq(y(n - 1) + y(n - 2), y(n), n, [0, 1])
>>> fib.coeff(3) # Value at a particular point
2
>>> fib[:6] # supports slicing
[0, 1, 1, 2, 3, 5]
>>> fib.recurrence # inspect recurrence
Eq(y(n), y(n - 2) + y(n - 1))
>>> fib.degree # automatically determine degree
2
>>> for x in zip(range(10), fib): # supports iteration
... print(x)
(0, 0)
(1, 1)
(2, 1)
(3, 2)
(4, 3)
(5, 5)
(6, 8)
(7, 13)
(8, 21)
(9, 34)
See Also
========
sympy.series.sequences.SeqFormula
"""
def __new__(cls, recurrence, yn, n, initial=None, start=0):
if not isinstance(yn, AppliedUndef):
raise TypeError("recurrence sequence must be an applied undefined function"
", found `{}`".format(yn))
if not isinstance(n, Basic) or not n.is_symbol:
raise TypeError("recurrence variable must be a symbol"
", found `{}`".format(n))
if yn.args != (n,):
raise TypeError("recurrence sequence does not match symbol")
y = yn.func
k = Wild("k", exclude=(n,))
degree = 0
# Find all applications of y in the recurrence and check that:
# 1. The function y is only being used with a single argument; and
# 2. All arguments are n + k for constant negative integers k.
prev_ys = recurrence.find(y)
for prev_y in prev_ys:
if len(prev_y.args) != 1:
raise TypeError("Recurrence should be in a single variable")
shift = prev_y.args[0].match(n + k)[k]
if not (shift.is_constant() and shift.is_integer and shift < 0):
raise TypeError("Recurrence should have constant,"
" negative, integer shifts"
" (found {})".format(prev_y))
if -shift > degree:
degree = -shift
if not initial:
initial = [Dummy("c_{}".format(k)) for k in range(degree)]
if len(initial) != degree:
raise ValueError("Number of initial terms must equal degree")
degree = Integer(degree)
start = sympify(start)
initial = Tuple(*(sympify(x) for x in initial))
seq = Basic.__new__(cls, recurrence, yn, n, initial, start)
seq.cache = {y(start + k): init for k, init in enumerate(initial)}
seq.degree = degree
return seq
@property
def _recurrence(self):
"""Equation defining recurrence."""
return self.args[0]
@property
def recurrence(self):
"""Equation defining recurrence."""
return Eq(self.yn, self.args[0])
@property
def yn(self):
"""Applied function representing the nth term"""
return self.args[1]
@property
def y(self):
"""Undefined function for the nth term of the sequence"""
return self.yn.func
@property
def n(self):
"""Sequence index symbol"""
return self.args[2]
@property
def initial(self):
"""The initial values of the sequence"""
return self.args[3]
@property
def start(self):
"""The starting point of the sequence. This point is included"""
return self.args[4]
@property
def stop(self):
"""The ending point of the sequence. (oo)"""
return S.Infinity
@property
def interval(self):
"""Interval on which sequence is defined."""
return (self.start, S.Infinity)
def _eval_coeff(self, index):
if index - self.start < len(self.cache):
return self.cache[self.y(index)]
for current in range(len(self.cache), index + 1):
# Use xreplace over subs for performance.
# See issue #10697.
seq_index = self.start + current
current_recurrence = self._recurrence.xreplace({self.n: seq_index})
new_term = current_recurrence.xreplace(self.cache)
self.cache[self.y(seq_index)] = new_term
return self.cache[self.y(self.start + current)]
def __iter__(self):
index = self.start
while True:
yield self._eval_coeff(index)
index += 1
def sequence(seq, limits=None):
"""Returns appropriate sequence object.
If ``seq`` is a sympy sequence, returns :class:`SeqPer` object
otherwise returns :class:`SeqFormula` object.
Examples
========
>>> from sympy import sequence
>>> from sympy.abc import n
>>> sequence(n**2, (n, 0, 5))
SeqFormula(n**2, (n, 0, 5))
>>> sequence((1, 2, 3), (n, 0, 5))
SeqPer((1, 2, 3), (n, 0, 5))
See Also
========
sympy.series.sequences.SeqPer
sympy.series.sequences.SeqFormula
"""
seq = sympify(seq)
if is_sequence(seq, Tuple):
return SeqPer(seq, limits)
else:
return SeqFormula(seq, limits)
###############################################################################
# OPERATIONS #
###############################################################################
class SeqExprOp(SeqBase):
"""Base class for operations on sequences.
Examples
========
>>> from sympy.series.sequences import SeqExprOp, sequence
>>> from sympy.abc import n
>>> s1 = sequence(n**2, (n, 0, 10))
>>> s2 = sequence((1, 2, 3), (n, 5, 10))
>>> s = SeqExprOp(s1, s2)
>>> s.gen
(n**2, (1, 2, 3))
>>> s.interval
Interval(5, 10)
>>> s.length
6
See Also
========
sympy.series.sequences.SeqAdd
sympy.series.sequences.SeqMul
"""
@property
def gen(self):
"""Generator for the sequence.
returns a tuple of generators of all the argument sequences.
"""
return tuple(a.gen for a in self.args)
@property
def interval(self):
"""Sequence is defined on the intersection
of all the intervals of respective sequences
"""
return Intersection(*(a.interval for a in self.args))
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def variables(self):
"""Cumulative of all the bound variables"""
return tuple(flatten([a.variables for a in self.args]))
@property
def length(self):
return self.stop - self.start + 1
class SeqAdd(SeqExprOp):
"""Represents term-wise addition of sequences.
Rules:
* The interval on which sequence is defined is the intersection
of respective intervals of sequences.
* Anything + :class:`EmptySequence` remains unchanged.
* Other rules are defined in ``_add`` methods of sequence classes.
Examples
========
>>> from sympy import EmptySequence, oo, SeqAdd, SeqPer, SeqFormula
>>> from sympy.abc import n
>>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), EmptySequence)
SeqPer((1, 2), (n, 0, oo))
>>> SeqAdd(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10)))
EmptySequence
>>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2, (n, 0, oo)))
SeqAdd(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo)))
>>> SeqAdd(SeqFormula(n**3), SeqFormula(n**2))
SeqFormula(n**3 + n**2, (n, 0, oo))
See Also
========
sympy.series.sequences.SeqMul
"""
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs
args = list(args)
# adapted from sympy.sets.sets.Union
def _flatten(arg):
if isinstance(arg, SeqBase):
if isinstance(arg, SeqAdd):
return sum(map(_flatten, arg.args), [])
else:
return [arg]
if iterable(arg):
return sum(map(_flatten, arg), [])
raise TypeError("Input must be Sequences or "
" iterables of Sequences")
args = _flatten(args)
args = [a for a in args if a is not S.EmptySequence]
# Addition of no sequences is EmptySequence
if not args:
return S.EmptySequence
if Intersection(*(a.interval for a in args)) is S.EmptySet:
return S.EmptySequence
# reduce using known rules
if evaluate:
return SeqAdd.reduce(args)
args = list(ordered(args, SeqBase._start_key))
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""Simplify :class:`SeqAdd` using known rules.
Iterates through all pairs and ask the constituent
sequences if they can simplify themselves with any other constituent.
Notes
=====
adapted from ``Union.reduce``
"""
new_args = True
while new_args:
for id1, s in enumerate(args):
new_args = False
for id2, t in enumerate(args):
if id1 == id2:
continue
new_seq = s._add(t)
# This returns None if s does not know how to add
# with t. Returns the newly added sequence otherwise
if new_seq is not None:
new_args = [a for a in args if a not in (s, t)]
new_args.append(new_seq)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return SeqAdd(args, evaluate=False)
def _eval_coeff(self, pt):
"""adds up the coefficients of all the sequences at point pt"""
return sum(a.coeff(pt) for a in self.args)
class SeqMul(SeqExprOp):
r"""Represents term-wise multiplication of sequences.
Handles multiplication of sequences only. For multiplication
with other objects see :func:`SeqBase.coeff_mul`.
Rules:
* The interval on which sequence is defined is the intersection
of respective intervals of sequences.
* Anything \* :class:`EmptySequence` returns :class:`EmptySequence`.
* Other rules are defined in ``_mul`` methods of sequence classes.
Examples
========
>>> from sympy import EmptySequence, oo, SeqMul, SeqPer, SeqFormula
>>> from sympy.abc import n
>>> SeqMul(SeqPer((1, 2), (n, 0, oo)), EmptySequence)
EmptySequence
>>> SeqMul(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10)))
EmptySequence
>>> SeqMul(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2))
SeqMul(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo)))
>>> SeqMul(SeqFormula(n**3), SeqFormula(n**2))
SeqFormula(n**5, (n, 0, oo))
See Also
========
sympy.series.sequences.SeqAdd
"""
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs
args = list(args)
# adapted from sympy.sets.sets.Union
def _flatten(arg):
if isinstance(arg, SeqBase):
if isinstance(arg, SeqMul):
return sum(map(_flatten, arg.args), [])
else:
return [arg]
elif iterable(arg):
return sum(map(_flatten, arg), [])
raise TypeError("Input must be Sequences or "
" iterables of Sequences")
args = _flatten(args)
# Multiplication of no sequences is EmptySequence
if not args:
return S.EmptySequence
if Intersection(*(a.interval for a in args)) is S.EmptySet:
return S.EmptySequence
# reduce using known rules
if evaluate:
return SeqMul.reduce(args)
args = list(ordered(args, SeqBase._start_key))
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""Simplify a :class:`SeqMul` using known rules.
Iterates through all pairs and ask the constituent
sequences if they can simplify themselves with any other constituent.
Notes
=====
adapted from ``Union.reduce``
"""
new_args = True
while new_args:
for id1, s in enumerate(args):
new_args = False
for id2, t in enumerate(args):
if id1 == id2:
continue
new_seq = s._mul(t)
# This returns None if s does not know how to multiply
# with t. Returns the newly multiplied sequence otherwise
if new_seq is not None:
new_args = [a for a in args if a not in (s, t)]
new_args.append(new_seq)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return SeqMul(args, evaluate=False)
def _eval_coeff(self, pt):
"""multiplies the coefficients of all the sequences at point pt"""
val = 1
for a in self.args:
val *= a.coeff(pt)
return val
|
53ec675f12ed86d53c3ce4201d982e01f27e6b10e33a0ad9d3df5319bcd926b3 | from sympy.core import S, Symbol, Add, sympify, Expr, PoleError, Mul
from sympy.core.exprtools import factor_terms
from sympy.core.symbol import Dummy
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.special.gamma_functions import gamma
from sympy.polys import PolynomialError, factor
from sympy.series.order import Order
from sympy.simplify.ratsimp import ratsimp
from sympy.simplify.simplify import together
from .gruntz import gruntz
def limit(e, z, z0, dir="+"):
"""Computes the limit of ``e(z)`` at the point ``z0``.
Parameters
==========
e : expression, the limit of which is to be taken
z : symbol representing the variable in the limit.
Other symbols are treated as constants. Multivariate limits
are not supported.
z0 : the value toward which ``z`` tends. Can be any expression,
including ``oo`` and ``-oo``.
dir : string, optional (default: "+")
The limit is bi-directional if ``dir="+-"``, from the right
(z->z0+) if ``dir="+"``, and from the left (z->z0-) if
``dir="-"``. For infinite ``z0`` (``oo`` or ``-oo``), the ``dir``
argument is determined from the direction of the infinity
(i.e., ``dir="-"`` for ``oo``).
Examples
========
>>> from sympy import limit, sin, oo
>>> from sympy.abc import x
>>> limit(sin(x)/x, x, 0)
1
>>> limit(1/x, x, 0) # default dir='+'
oo
>>> limit(1/x, x, 0, dir="-")
-oo
>>> limit(1/x, x, 0, dir='+-')
zoo
>>> limit(1/x, x, oo)
0
Notes
=====
First we try some heuristics for easy and frequent cases like "x", "1/x",
"x**2" and similar, so that it's fast. For all other cases, we use the
Gruntz algorithm (see the gruntz() function).
See Also
========
limit_seq : returns the limit of a sequence.
"""
return Limit(e, z, z0, dir).doit(deep=False)
def heuristics(e, z, z0, dir):
"""Computes the limit of an expression term-wise.
Parameters are the same as for the ``limit`` function.
Works with the arguments of expression ``e`` one by one, computing
the limit of each and then combining the results. This approach
works only for simple limits, but it is fast.
"""
from sympy.calculus.util import AccumBounds
rv = None
if abs(z0) is S.Infinity:
rv = limit(e.subs(z, 1/z), z, S.Zero, "+" if z0 is S.Infinity else "-")
if isinstance(rv, Limit):
return
elif e.is_Mul or e.is_Add or e.is_Pow or e.is_Function:
r = []
for a in e.args:
l = limit(a, z, z0, dir)
if l.has(S.Infinity) and l.is_finite is None:
if isinstance(e, Add):
m = factor_terms(e)
if not isinstance(m, Mul): # try together
m = together(m)
if not isinstance(m, Mul): # try factor if the previous methods failed
m = factor(e)
if isinstance(m, Mul):
return heuristics(m, z, z0, dir)
return
return
elif isinstance(l, Limit):
return
elif l is S.NaN:
return
else:
r.append(l)
if r:
rv = e.func(*r)
if rv is S.NaN and e.is_Mul and any(isinstance(rr, AccumBounds) for rr in r):
r2 = []
e2 = []
for ii in range(len(r)):
if isinstance(r[ii], AccumBounds):
r2.append(r[ii])
else:
e2.append(e.args[ii])
if len(e2) > 0:
e3 = Mul(*e2).simplify()
l = limit(e3, z, z0, dir)
rv = l * Mul(*r2)
if rv is S.NaN:
try:
rat_e = ratsimp(e)
except PolynomialError:
return
if rat_e is S.NaN or rat_e == e:
return
return limit(rat_e, z, z0, dir)
return rv
class Limit(Expr):
"""Represents an unevaluated limit.
Examples
========
>>> from sympy import Limit, sin
>>> from sympy.abc import x
>>> Limit(sin(x)/x, x, 0)
Limit(sin(x)/x, x, 0)
>>> Limit(1/x, x, 0, dir="-")
Limit(1/x, x, 0, dir='-')
"""
def __new__(cls, e, z, z0, dir="+"):
e = sympify(e)
z = sympify(z)
z0 = sympify(z0)
if z0 is S.Infinity:
dir = "-"
elif z0 is S.NegativeInfinity:
dir = "+"
if isinstance(dir, str):
dir = Symbol(dir)
elif not isinstance(dir, Symbol):
raise TypeError("direction must be of type basestring or "
"Symbol, not %s" % type(dir))
if str(dir) not in ('+', '-', '+-'):
raise ValueError("direction must be one of '+', '-' "
"or '+-', not %s" % dir)
obj = Expr.__new__(cls)
obj._args = (e, z, z0, dir)
return obj
@property
def free_symbols(self):
e = self.args[0]
isyms = e.free_symbols
isyms.difference_update(self.args[1].free_symbols)
isyms.update(self.args[2].free_symbols)
return isyms
def doit(self, **hints):
"""Evaluates the limit.
Parameters
==========
deep : bool, optional (default: True)
Invoke the ``doit`` method of the expressions involved before
taking the limit.
hints : optional keyword arguments
To be passed to ``doit`` methods; only used if deep is True.
"""
from sympy import Abs, exp, log, sign
from sympy.calculus.util import AccumBounds
e, z, z0, dir = self.args
if z0 is S.ComplexInfinity:
raise NotImplementedError("Limits at complex "
"infinity are not implemented")
if hints.get('deep', True):
e = e.doit(**hints)
z = z.doit(**hints)
z0 = z0.doit(**hints)
if e == z:
return z0
if not e.has(z):
return e
cdir = 0
if str(dir) == "+":
cdir = 1
elif str(dir) == "-":
cdir = -1
def remove_abs(expr):
if not expr.args:
return expr
newargs = tuple(remove_abs(arg) for arg in expr.args)
if newargs != expr.args:
expr = expr.func(*newargs)
if isinstance(expr, Abs):
sig = limit(expr.args[0], z, z0, dir)
if sig.is_zero:
sig = limit(1/expr.args[0], z, z0, dir)
if sig.is_extended_real:
if (sig < 0) == True:
return -expr.args[0]
elif (sig > 0) == True:
return expr.args[0]
return expr
e = remove_abs(e)
if e.is_meromorphic(z, z0):
if abs(z0) is S.Infinity:
newe = e.subs(z, -1/z)
else:
newe = e.subs(z, z + z0)
try:
coeff, ex = newe.leadterm(z, cdir)
except (ValueError, NotImplementedError):
pass
else:
if ex > 0:
return S.Zero
elif ex == 0:
return coeff
if str(dir) == "+" or not(int(ex) & 1):
return S.Infinity*sign(coeff)
elif str(dir) == "-":
return S.NegativeInfinity*sign(coeff)
else:
return S.ComplexInfinity
# gruntz fails on factorials but works with the gamma function
# If no factorial term is present, e should remain unchanged.
# factorial is defined to be zero for negative inputs (which
# differs from gamma) so only rewrite for positive z0.
if z0.is_extended_positive:
e = e.rewrite(factorial, gamma)
if e.is_Mul and abs(z0) is S.Infinity:
e = factor_terms(e)
u = Dummy('u', positive=True)
if z0 is S.NegativeInfinity:
inve = e.subs(z, -1/u)
else:
inve = e.subs(z, 1/u)
try:
f = inve.as_leading_term(u).gammasimp()
if f.is_meromorphic(u, S.Zero):
r = limit(f, u, S.Zero, "+")
if isinstance(r, Limit):
return self
else:
return r
except (ValueError, NotImplementedError, PoleError):
pass
if e.is_Order:
return Order(limit(e.expr, z, z0), *e.args[1:])
if e.is_Pow:
if e.has(S.Infinity, S.NegativeInfinity, S.ComplexInfinity, S.NaN):
return self
b1, e1 = e.base, e.exp
f1 = e1*log(b1)
if f1.is_meromorphic(z, z0):
res = limit(f1, z, z0)
return exp(res)
ex_lim = limit(e1, z, z0)
base_lim = limit(b1, z, z0)
if base_lim is S.One:
if ex_lim in (S.Infinity, S.NegativeInfinity):
res = limit(e1*(b1 - 1), z, z0)
return exp(res)
elif ex_lim.is_real:
return S.One
if base_lim in (S.Zero, S.Infinity, S.NegativeInfinity) and ex_lim is S.Zero:
res = limit(f1, z, z0)
return exp(res)
if base_lim is S.NegativeInfinity:
if ex_lim is S.NegativeInfinity:
return S.Zero
if ex_lim is S.Infinity:
return S.ComplexInfinity
if not isinstance(base_lim, AccumBounds) and not isinstance(ex_lim, AccumBounds):
res = base_lim**ex_lim
if res is not S.ComplexInfinity and not res.is_Pow:
return res
l = None
try:
if str(dir) == '+-':
r = gruntz(e, z, z0, '+')
l = gruntz(e, z, z0, '-')
if l != r:
raise ValueError("The limit does not exist since "
"left hand limit = %s and right hand limit = %s"
% (l, r))
else:
r = gruntz(e, z, z0, dir)
if r is S.NaN or l is S.NaN:
raise PoleError()
except (PoleError, ValueError):
if l is not None:
raise
r = heuristics(e, z, z0, dir)
if r is None:
return self
return r
|
468436b1499e66d2ade09cff6a77a7eff94c065ce9cb7f3db10be053d58dfb09 | """Limits of sequences"""
from sympy.core.add import Add
from sympy.core.function import PoleError
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.numbers import fibonacci
from sympy.functions.combinatorial.factorials import factorial, subfactorial
from sympy.functions.special.gamma_functions import gamma
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.miscellaneous import Max, Min
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.series.limits import Limit
def difference_delta(expr, n=None, step=1):
"""Difference Operator.
Discrete analog of differential operator. Given a sequence x[n],
returns the sequence x[n + step] - x[n].
Examples
========
>>> from sympy import difference_delta as dd
>>> from sympy.abc import n
>>> dd(n*(n + 1), n)
2*n + 2
>>> dd(n*(n + 1), n, 2)
4*n + 6
References
==========
.. [1] https://reference.wolfram.com/language/ref/DifferenceDelta.html
"""
expr = sympify(expr)
if n is None:
f = expr.free_symbols
if len(f) == 1:
n = f.pop()
elif len(f) == 0:
return S.Zero
else:
raise ValueError("Since there is more than one variable in the"
" expression, a variable must be supplied to"
" take the difference of %s" % expr)
step = sympify(step)
if step.is_number is False or step.is_finite is False:
raise ValueError("Step should be a finite number.")
if hasattr(expr, '_eval_difference_delta'):
result = expr._eval_difference_delta(n, step)
if result:
return result
return expr.subs(n, n + step) - expr
def dominant(expr, n):
"""Finds the dominant term in a sum, that is a term that dominates
every other term.
If limit(a/b, n, oo) is oo then a dominates b.
If limit(a/b, n, oo) is 0 then b dominates a.
Otherwise, a and b are comparable.
If there is no unique dominant term, then returns ``None``.
Examples
========
>>> from sympy import Sum
>>> from sympy.series.limitseq import dominant
>>> from sympy.abc import n, k
>>> dominant(5*n**3 + 4*n**2 + n + 1, n)
5*n**3
>>> dominant(2**n + Sum(k, (k, 0, n)), n)
2**n
See Also
========
sympy.series.limitseq.dominant
"""
terms = Add.make_args(expr.expand(func=True))
term0 = terms[-1]
comp = [term0] # comparable terms
for t in terms[:-1]:
e = (term0 / t).gammasimp()
l = limit_seq(e, n)
if l is None:
return None
elif l.is_zero:
term0 = t
comp = [term0]
elif l not in [S.Infinity, S.NegativeInfinity]:
comp.append(t)
if len(comp) > 1:
return None
return term0
def _limit_inf(expr, n):
try:
return Limit(expr, n, S.Infinity).doit(deep=False)
except (NotImplementedError, PoleError):
return None
def _limit_seq(expr, n, trials):
from sympy.concrete.summations import Sum
for i in range(trials):
if not expr.has(Sum):
result = _limit_inf(expr, n)
if result is not None:
return result
num, den = expr.as_numer_denom()
if not den.has(n) or not num.has(n):
result = _limit_inf(expr.doit(), n)
if result is not None:
return result
return None
num, den = (difference_delta(t.expand(), n) for t in [num, den])
expr = (num / den).gammasimp()
if not expr.has(Sum):
result = _limit_inf(expr, n)
if result is not None:
return result
num, den = expr.as_numer_denom()
num = dominant(num, n)
if num is None:
return None
den = dominant(den, n)
if den is None:
return None
expr = (num / den).gammasimp()
def limit_seq(expr, n=None, trials=5):
"""Finds the limit of a sequence as index n tends to infinity.
Parameters
==========
expr : Expr
SymPy expression for the n-th term of the sequence
n : Symbol, optional
The index of the sequence, an integer that tends to positive
infinity. If None, inferred from the expression unless it has
multiple symbols.
trials: int, optional
The algorithm is highly recursive. ``trials`` is a safeguard from
infinite recursion in case the limit is not easily computed by the
algorithm. Try increasing ``trials`` if the algorithm returns ``None``.
Admissible Terms
================
The algorithm is designed for sequences built from rational functions,
indefinite sums, and indefinite products over an indeterminate n. Terms of
alternating sign are also allowed, but more complex oscillatory behavior is
not supported.
Examples
========
>>> from sympy import limit_seq, Sum, binomial
>>> from sympy.abc import n, k, m
>>> limit_seq((5*n**3 + 3*n**2 + 4) / (3*n**3 + 4*n - 5), n)
5/3
>>> limit_seq(binomial(2*n, n) / Sum(binomial(2*k, k), (k, 1, n)), n)
3/4
>>> limit_seq(Sum(k**2 * Sum(2**m/m, (m, 1, k)), (k, 1, n)) / (2**n*n), n)
4
See Also
========
sympy.series.limitseq.dominant
References
==========
.. [1] Computing Limits of Sequences - Manuel Kauers
"""
from sympy.concrete.summations import Sum
from sympy.calculus.util import AccumulationBounds
if n is None:
free = expr.free_symbols
if len(free) == 1:
n = free.pop()
elif not free:
return expr
else:
raise ValueError("Expression has more than one variable. "
"Please specify a variable.")
elif n not in expr.free_symbols:
return expr
expr = expr.rewrite(fibonacci, S.GoldenRatio)
expr = expr.rewrite(factorial, subfactorial, gamma)
n_ = Dummy("n", integer=True, positive=True)
n1 = Dummy("n", odd=True, positive=True)
n2 = Dummy("n", even=True, positive=True)
# If there is a negative term raised to a power involving n, or a
# trigonometric function, then consider even and odd n separately.
powers = (p.as_base_exp() for p in expr.atoms(Pow))
if (any(b.is_negative and e.has(n) for b, e in powers) or
expr.has(cos, sin)):
L1 = _limit_seq(expr.xreplace({n: n1}), n1, trials)
if L1 is not None:
L2 = _limit_seq(expr.xreplace({n: n2}), n2, trials)
if L1 != L2:
if L1.is_comparable and L2.is_comparable:
return AccumulationBounds(Min(L1, L2), Max(L1, L2))
else:
return None
else:
L1 = _limit_seq(expr.xreplace({n: n_}), n_, trials)
if L1 is not None:
return L1
else:
if expr.is_Add:
limits = [limit_seq(term, n, trials) for term in expr.args]
if any(result is None for result in limits):
return None
else:
return Add(*limits)
# Maybe the absolute value is easier to deal with (though not if
# it has a Sum). If it tends to 0, the limit is 0.
elif not expr.has(Sum):
lim = _limit_seq(Abs(expr.xreplace({n: n_})), n_, trials)
if lim is not None and lim.is_zero:
return S.Zero
|
8844cdb1f5e648976b6fbc504dc3502f1267e18e232d1f199091c7d0f3366078 | """Fourier Series"""
from sympy import pi, oo, Wild
from sympy.core.expr import Expr
from sympy.core.add import Add
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import sin, cos, sinc
from sympy.series.series_class import SeriesBase
from sympy.series.sequences import SeqFormula
from sympy.sets.sets import Interval
from sympy.simplify.fu import TR2, TR1, TR10, sincos_to_sum
def fourier_cos_seq(func, limits, n):
"""Returns the cos sequence in a Fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
cos_term = cos(2*n*pi*x / L)
formula = 2 * cos_term * integrate(func * cos_term, limits) / L
a0 = formula.subs(n, S.Zero) / 2
return a0, SeqFormula(2 * cos_term * integrate(func * cos_term, limits)
/ L, (n, 1, oo))
def fourier_sin_seq(func, limits, n):
"""Returns the sin sequence in a Fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
sin_term = sin(2*n*pi*x / L)
return SeqFormula(2 * sin_term * integrate(func * sin_term, limits)
/ L, (n, 1, oo))
def _process_limits(func, limits):
"""
Limits should be of the form (x, start, stop).
x should be a symbol. Both start and stop should be bounded.
* If x is not given, x is determined from func.
* If limits is None. Limit of the form (x, -pi, pi) is returned.
Examples
========
>>> from sympy.series.fourier import _process_limits as pari
>>> from sympy.abc import x
>>> pari(x**2, (x, -2, 2))
(x, -2, 2)
>>> pari(x**2, (-2, 2))
(x, -2, 2)
>>> pari(x**2, None)
(x, -pi, pi)
"""
def _find_x(func):
free = func.free_symbols
if len(free) == 1:
return free.pop()
elif not free:
return Dummy('k')
else:
raise ValueError(
" specify dummy variables for %s. If the function contains"
" more than one free symbol, a dummy variable should be"
" supplied explicitly e.g. FourierSeries(m*n**2, (n, -pi, pi))"
% func)
x, start, stop = None, None, None
if limits is None:
x, start, stop = _find_x(func), -pi, pi
if is_sequence(limits, Tuple):
if len(limits) == 3:
x, start, stop = limits
elif len(limits) == 2:
x = _find_x(func)
start, stop = limits
if not isinstance(x, Symbol) or start is None or stop is None:
raise ValueError('Invalid limits given: %s' % str(limits))
unbounded = [S.NegativeInfinity, S.Infinity]
if start in unbounded or stop in unbounded:
raise ValueError("Both the start and end value should be bounded")
return sympify((x, start, stop))
def finite_check(f, x, L):
def check_fx(exprs, x):
return x not in exprs.free_symbols
def check_sincos(_expr, x, L):
if isinstance(_expr, (sin, cos)):
sincos_args = _expr.args[0]
if sincos_args.match(a*(pi/L)*x + b) is not None:
return True
else:
return False
_expr = sincos_to_sum(TR2(TR1(f)))
add_coeff = _expr.as_coeff_add()
a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k != S.Zero, ])
b = Wild('b', properties=[lambda k: x not in k.free_symbols, ])
for s in add_coeff[1]:
mul_coeffs = s.as_coeff_mul()[1]
for t in mul_coeffs:
if not (check_fx(t, x) or check_sincos(t, x, L)):
return False, f
return True, _expr
class FourierSeries(SeriesBase):
r"""Represents Fourier sine/cosine series.
This class only represents a fourier series.
No computation is performed.
For how to compute Fourier series, see the :func:`fourier_series`
docstring.
See Also
========
sympy.series.fourier.fourier_series
"""
def __new__(cls, *args):
args = map(sympify, args)
return Expr.__new__(cls, *args)
@property
def function(self):
return self.args[0]
@property
def x(self):
return self.args[1][0]
@property
def period(self):
return (self.args[1][1], self.args[1][2])
@property
def a0(self):
return self.args[2][0]
@property
def an(self):
return self.args[2][1]
@property
def bn(self):
return self.args[2][2]
@property
def interval(self):
return Interval(0, oo)
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return oo
@property
def L(self):
return abs(self.period[1] - self.period[0]) / 2
def _eval_subs(self, old, new):
x = self.x
if old.has(x):
return self
def truncate(self, n=3):
"""
Return the first n nonzero terms of the series.
If n is None return an iterator.
Parameters
==========
n : int or None
Amount of non-zero terms in approximation or None.
Returns
=======
Expr or iterator
Approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.truncate(4)
2*sin(x) - sin(2*x) + 2*sin(3*x)/3 - sin(4*x)/2
See Also
========
sympy.series.fourier.FourierSeries.sigma_approximation
"""
if n is None:
return iter(self)
terms = []
for t in self:
if len(terms) == n:
break
if t is not S.Zero:
terms.append(t)
return Add(*terms)
def sigma_approximation(self, n=3):
r"""
Return :math:`\sigma`-approximation of Fourier series with respect
to order n.
Sigma approximation adjusts a Fourier summation to eliminate the Gibbs
phenomenon which would otherwise occur at discontinuities.
A sigma-approximated summation for a Fourier series of a T-periodical
function can be written as
.. math::
s(\theta) = \frac{1}{2} a_0 + \sum _{k=1}^{m-1}
\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr) \cdot
\left[ a_k \cos \Bigl( \frac{2\pi k}{T} \theta \Bigr)
+ b_k \sin \Bigl( \frac{2\pi k}{T} \theta \Bigr) \right],
where :math:`a_0, a_k, b_k, k=1,\ldots,{m-1}` are standard Fourier
series coefficients and
:math:`\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr)` is a Lanczos
:math:`\sigma` factor (expressed in terms of normalized
:math:`\operatorname{sinc}` function).
Parameters
==========
n : int
Highest order of the terms taken into account in approximation.
Returns
=======
Expr
Sigma approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.sigma_approximation(4)
2*sin(x)*sinc(pi/4) - 2*sin(2*x)/pi + 2*sin(3*x)*sinc(3*pi/4)/3
See Also
========
sympy.series.fourier.FourierSeries.truncate
Notes
=====
The behaviour of
:meth:`~sympy.series.fourier.FourierSeries.sigma_approximation`
is different from :meth:`~sympy.series.fourier.FourierSeries.truncate`
- it takes all nonzero terms of degree smaller than n, rather than
first n nonzero ones.
References
==========
.. [1] https://en.wikipedia.org/wiki/Gibbs_phenomenon
.. [2] https://en.wikipedia.org/wiki/Sigma_approximation
"""
terms = [sinc(pi * i / n) * t for i, t in enumerate(self[:n])
if t is not S.Zero]
return Add(*terms)
def shift(self, s):
"""Shift the function by a term independent of x.
f(x) -> f(x) + s
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
a0 = self.a0 + s
sfunc = self.function + s
return self.func(sfunc, self.args[1], (a0, self.an, self.bn))
def shiftx(self, s):
"""Shift x by a term independent of x.
f(x) -> f(x + s)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x + s)
bn = self.bn.subs(x, x + s)
sfunc = self.function.subs(x, x + s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def scale(self, s):
"""Scale the function by a term independent of x.
f(x) -> s * f(x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.coeff_mul(s)
bn = self.bn.coeff_mul(s)
a0 = self.a0 * s
sfunc = self.args[0] * s
return self.func(sfunc, self.args[1], (a0, an, bn))
def scalex(self, s):
"""Scale x by a term independent of x.
f(x) -> f(s*x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x * s)
bn = self.bn.subs(x, x * s)
sfunc = self.function.subs(x, x * s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def _eval_as_leading_term(self, x, cdir=0):
for t in self:
if t is not S.Zero:
return t
def _eval_term(self, pt):
if pt == 0:
return self.a0
return self.an.coeff(pt) + self.bn.coeff(pt)
def __neg__(self):
return self.scale(-1)
def __add__(self, other):
if isinstance(other, FourierSeries):
if self.period != other.period:
raise ValueError("Both the series should have same periods")
x, y = self.x, other.x
function = self.function + other.function.subs(y, x)
if self.x not in function.free_symbols:
return function
an = self.an + other.an
bn = self.bn + other.bn
a0 = self.a0 + other.a0
return self.func(function, self.args[1], (a0, an, bn))
return Add(self, other)
def __sub__(self, other):
return self.__add__(-other)
class FiniteFourierSeries(FourierSeries):
r"""Represents Finite Fourier sine/cosine series.
For how to compute Fourier series, see the :func:`fourier_series`
docstring.
Parameters
==========
f : Expr
Expression for finding fourier_series
limits : ( x, start, stop)
x is the independent variable for the expression f
(start, stop) is the period of the fourier series
exprs: (a0, an, bn) or Expr
a0 is the constant term a0 of the fourier series
an is a dictionary of coefficients of cos terms
an[k] = coefficient of cos(pi*(k/L)*x)
bn is a dictionary of coefficients of sin terms
bn[k] = coefficient of sin(pi*(k/L)*x)
or exprs can be an expression to be converted to fourier form
Methods
=======
This class is an extension of FourierSeries class.
Please refer to sympy.series.fourier.FourierSeries for
further information.
See Also
========
sympy.series.fourier.FourierSeries
sympy.series.fourier.fourier_series
"""
def __new__(cls, f, limits, exprs):
f = sympify(f)
limits = sympify(limits)
exprs = sympify(exprs)
if not (type(exprs) == Tuple and len(exprs) == 3): # exprs is not of form (a0, an, bn)
# Converts the expression to fourier form
c, e = exprs.as_coeff_add()
rexpr = c + Add(*[TR10(i) for i in e])
a0, exp_ls = rexpr.expand(trig=False, power_base=False, power_exp=False, log=False).as_coeff_add()
x = limits[0]
L = abs(limits[2] - limits[1]) / 2
a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k is not S.Zero, ])
b = Wild('b', properties=[lambda k: x not in k.free_symbols, ])
an = dict()
bn = dict()
# separates the coefficients of sin and cos terms in dictionaries an, and bn
for p in exp_ls:
t = p.match(b * cos(a * (pi / L) * x))
q = p.match(b * sin(a * (pi / L) * x))
if t:
an[t[a]] = t[b] + an.get(t[a], S.Zero)
elif q:
bn[q[a]] = q[b] + bn.get(q[a], S.Zero)
else:
a0 += p
exprs = Tuple(a0, an, bn)
return Expr.__new__(cls, f, limits, exprs)
@property
def interval(self):
_length = 1 if self.a0 else 0
_length += max(set(self.an.keys()).union(set(self.bn.keys()))) + 1
return Interval(0, _length)
@property
def length(self):
return self.stop - self.start
def shiftx(self, s):
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
_expr = self.truncate().subs(x, x + s)
sfunc = self.function.subs(x, x + s)
return self.func(sfunc, self.args[1], _expr)
def scale(self, s):
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
_expr = self.truncate() * s
sfunc = self.function * s
return self.func(sfunc, self.args[1], _expr)
def scalex(self, s):
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
_expr = self.truncate().subs(x, x * s)
sfunc = self.function.subs(x, x * s)
return self.func(sfunc, self.args[1], _expr)
def _eval_term(self, pt):
if pt == 0:
return self.a0
_term = self.an.get(pt, S.Zero) * cos(pt * (pi / self.L) * self.x) \
+ self.bn.get(pt, S.Zero) * sin(pt * (pi / self.L) * self.x)
return _term
def __add__(self, other):
if isinstance(other, FourierSeries):
return other.__add__(fourier_series(self.function, self.args[1],\
finite=False))
elif isinstance(other, FiniteFourierSeries):
if self.period != other.period:
raise ValueError("Both the series should have same periods")
x, y = self.x, other.x
function = self.function + other.function.subs(y, x)
if self.x not in function.free_symbols:
return function
return fourier_series(function, limits=self.args[1])
def fourier_series(f, limits=None, finite=True):
r"""Computes the Fourier trigonometric series expansion.
Explanation
===========
Fourier trigonometric series of $f(x)$ over the interval $(a, b)$
is defined as:
.. math::
\frac{a_0}{2} + \sum_{n=1}^{\infty}
(a_n \cos(\frac{2n \pi x}{L}) + b_n \sin(\frac{2n \pi x}{L}))
where the coefficients are:
.. math::
L = b - a
.. math::
a_0 = \frac{2}{L} \int_{a}^{b}{f(x) dx}
.. math::
a_n = \frac{2}{L} \int_{a}^{b}{f(x) \cos(\frac{2n \pi x}{L}) dx}
.. math::
b_n = \frac{2}{L} \int_{a}^{b}{f(x) \sin(\frac{2n \pi x}{L}) dx}
The condition whether the function $f(x)$ given should be periodic
or not is more than necessary, because it is sufficient to consider
the series to be converging to $f(x)$ only in the given interval,
not throughout the whole real line.
This also brings a lot of ease for the computation because
you don't have to make $f(x)$ artificially periodic by
wrapping it with piecewise, modulo operations,
but you can shape the function to look like the desired periodic
function only in the interval $(a, b)$, and the computed series will
automatically become the series of the periodic version of $f(x)$.
This property is illustrated in the examples section below.
Parameters
==========
limits : (sym, start, end), optional
*sym* denotes the symbol the series is computed with respect to.
*start* and *end* denotes the start and the end of the interval
where the fourier series converges to the given function.
Default range is specified as $-\pi$ and $\pi$.
Returns
=======
FourierSeries
A symbolic object representing the Fourier trigonometric series.
Examples
========
Computing the Fourier series of $f(x) = x^2$:
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> f = x**2
>>> s = fourier_series(f, (x, -pi, pi))
>>> s1 = s.truncate(n=3)
>>> s1
-4*cos(x) + cos(2*x) + pi**2/3
Shifting of the Fourier series:
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
Scaling of the Fourier series:
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
Computing the Fourier series of $f(x) = x$:
This illustrates how truncating to the higher order gives better
convergence.
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import fourier_series, pi, plot
>>> from sympy.abc import x
>>> f = x
>>> s = fourier_series(f, (x, -pi, pi))
>>> s1 = s.truncate(n = 3)
>>> s2 = s.truncate(n = 5)
>>> s3 = s.truncate(n = 7)
>>> p = plot(f, s1, s2, s3, (x, -pi, pi), show=False, legend=True)
>>> p[0].line_color = (0, 0, 0)
>>> p[0].label = 'x'
>>> p[1].line_color = (0.7, 0.7, 0.7)
>>> p[1].label = 'n=3'
>>> p[2].line_color = (0.5, 0.5, 0.5)
>>> p[2].label = 'n=5'
>>> p[3].line_color = (0.3, 0.3, 0.3)
>>> p[3].label = 'n=7'
>>> p.show()
This illustrates how the series converges to different sawtooth
waves if the different ranges are specified.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> s1 = fourier_series(x, (x, -1, 1)).truncate(10)
>>> s2 = fourier_series(x, (x, -pi, pi)).truncate(10)
>>> s3 = fourier_series(x, (x, 0, 1)).truncate(10)
>>> p = plot(x, s1, s2, s3, (x, -5, 5), show=False, legend=True)
>>> p[0].line_color = (0, 0, 0)
>>> p[0].label = 'x'
>>> p[1].line_color = (0.7, 0.7, 0.7)
>>> p[1].label = '[-1, 1]'
>>> p[2].line_color = (0.5, 0.5, 0.5)
>>> p[2].label = '[-pi, pi]'
>>> p[3].line_color = (0.3, 0.3, 0.3)
>>> p[3].label = '[0, 1]'
>>> p.show()
Notes
=====
Computing Fourier series can be slow
due to the integration required in computing
an, bn.
It is faster to compute Fourier series of a function
by using shifting and scaling on an already
computed Fourier series rather than computing
again.
e.g. If the Fourier series of ``x**2`` is known
the Fourier series of ``x**2 - 1`` can be found by shifting by ``-1``.
See Also
========
sympy.series.fourier.FourierSeries
References
==========
.. [1] https://mathworld.wolfram.com/FourierSeries.html
"""
f = sympify(f)
limits = _process_limits(f, limits)
x = limits[0]
if x not in f.free_symbols:
return f
if finite:
L = abs(limits[2] - limits[1]) / 2
is_finite, res_f = finite_check(f, x, L)
if is_finite:
return FiniteFourierSeries(f, limits, res_f)
n = Dummy('n')
center = (limits[1] + limits[2]) / 2
if center.is_zero:
neg_f = f.subs(x, -x)
if f == neg_f:
a0, an = fourier_cos_seq(f, limits, n)
bn = SeqFormula(0, (1, oo))
return FourierSeries(f, limits, (a0, an, bn))
elif f == -neg_f:
a0 = S.Zero
an = SeqFormula(0, (1, oo))
bn = fourier_sin_seq(f, limits, n)
return FourierSeries(f, limits, (a0, an, bn))
a0, an = fourier_cos_seq(f, limits, n)
bn = fourier_sin_seq(f, limits, n)
return FourierSeries(f, limits, (a0, an, bn))
|
5844a3acac836e1030ec18ed03e3b63e1ca71213790653f69a4f9c62cfffe584 | """
This module implements the Residue function and related tools for working
with residues.
"""
from sympy import sympify
from sympy.utilities.timeutils import timethis
@timethis('residue')
def residue(expr, x, x0):
"""
Finds the residue of ``expr`` at the point x=x0.
The residue is defined as the coefficient of 1/(x-x0) in the power series
expansion about x=x0.
Examples
========
>>> from sympy import Symbol, residue, sin
>>> x = Symbol("x")
>>> residue(1/x, x, 0)
1
>>> residue(1/x**2, x, 0)
0
>>> residue(2/sin(x), x, 0)
2
This function is essential for the Residue Theorem [1].
References
==========
.. [1] https://en.wikipedia.org/wiki/Residue_theorem
"""
# The current implementation uses series expansion to
# calculate it. A more general implementation is explained in
# the section 5.6 of the Bronstein's book {M. Bronstein:
# Symbolic Integration I, Springer Verlag (2005)}. For purely
# rational functions, the algorithm is much easier. See
# sections 2.4, 2.5, and 2.7 (this section actually gives an
# algorithm for computing any Laurent series coefficient for
# a rational function). The theory in section 2.4 will help to
# understand why the resultant works in the general algorithm.
# For the definition of a resultant, see section 1.4 (and any
# previous sections for more review).
from sympy import collect, Mul, Order, S
expr = sympify(expr)
if x0 != 0:
expr = expr.subs(x, x + x0)
for n in [0, 1, 2, 4, 8, 16, 32]:
s = expr.nseries(x, n=n)
if not s.has(Order) or s.getn() >= 0:
break
s = collect(s.removeO(), x)
if s.is_Add:
args = s.args
else:
args = [s]
res = S.Zero
for arg in args:
c, m = arg.as_coeff_mul(x)
m = Mul(*m)
if not (m == 1 or m == x or (m.is_Pow and m.exp.is_Integer)):
raise NotImplementedError('term of unexpected form: %s' % m)
if m == 1/x:
res += c
return res
|
2f39430a9d0224e848a62b0f493977273a4cc94f377d5a4fd9d626f0172e0b5a | """Formal Power Series"""
from collections import defaultdict
from sympy import oo, zoo, nan
from sympy.core.add import Add
from sympy.core.compatibility import iterable
from sympy.core.expr import Expr
from sympy.core.function import Derivative, Function, expand
from sympy.core.mul import Mul
from sympy.core.numbers import Rational
from sympy.core.relational import Eq
from sympy.sets.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import Wild, Dummy, symbols, Symbol
from sympy.core.sympify import sympify
from sympy.discrete.convolutions import convolution
from sympy.functions.combinatorial.factorials import binomial, factorial, rf
from sympy.functions.combinatorial.numbers import bell
from sympy.functions.elementary.integers import floor, frac, ceiling
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.functions.elementary.piecewise import Piecewise
from sympy.series.limits import Limit
from sympy.series.order import Order
from sympy.simplify.powsimp import powsimp
from sympy.series.sequences import sequence
from sympy.series.series_class import SeriesBase
def rational_algorithm(f, x, k, order=4, full=False):
"""
Rational algorithm for computing
formula of coefficients of Formal Power Series
of a function.
Applicable when f(x) or some derivative of f(x)
is a rational function in x.
:func:`rational_algorithm` uses :func:`~.apart` function for partial fraction
decomposition. :func:`~.apart` by default uses 'undetermined coefficients
method'. By setting ``full=True``, 'Bronstein's algorithm' can be used
instead.
Looks for derivative of a function up to 4'th order (by default).
This can be overridden using order option.
Returns
=======
formula : Expr
ind : Expr
Independent terms.
order : int
Examples
========
>>> from sympy import log, atan
>>> from sympy.series.formal import rational_algorithm as ra
>>> from sympy.abc import x, k
>>> ra(1 / (1 - x), x, k)
(1, 0, 0)
>>> ra(log(1 + x), x, k)
(-(-1)**(-k)/k, 0, 1)
>>> ra(atan(x), x, k, full=True)
((-I*(-I)**(-k)/2 + I*I**(-k)/2)/k, 0, 1)
Notes
=====
By setting ``full=True``, range of admissible functions to be solved using
``rational_algorithm`` can be increased. This option should be used
carefully as it can significantly slow down the computation as ``doit`` is
performed on the :class:`~.RootSum` object returned by the :func:`~.apart`
function. Use ``full=False`` whenever possible.
See Also
========
sympy.polys.partfrac.apart
References
==========
.. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf
.. [2] Power Series in Computer Algebra - Wolfram Koepf
"""
from sympy.polys import RootSum, apart
from sympy.integrals import integrate
diff = f
ds = [] # list of diff
for i in range(order + 1):
if i:
diff = diff.diff(x)
if diff.is_rational_function(x):
coeff, sep = S.Zero, S.Zero
terms = apart(diff, x, full=full)
if terms.has(RootSum):
terms = terms.doit()
for t in Add.make_args(terms):
num, den = t.as_numer_denom()
if not den.has(x):
sep += t
else:
if isinstance(den, Mul):
# m*(n*x - a)**j -> (n*x - a)**j
ind = den.as_independent(x)
den = ind[1]
num /= ind[0]
# (n*x - a)**j -> (x - b)
den, j = den.as_base_exp()
a, xterm = den.as_coeff_add(x)
# term -> m/x**n
if not a:
sep += t
continue
xc = xterm[0].coeff(x)
a /= -xc
num /= xc**j
ak = ((-1)**j * num *
binomial(j + k - 1, k).rewrite(factorial) /
a**(j + k))
coeff += ak
# Hacky, better way?
if coeff.is_zero:
return None
if (coeff.has(x) or coeff.has(zoo) or coeff.has(oo) or
coeff.has(nan)):
return None
for j in range(i):
coeff = (coeff / (k + j + 1))
sep = integrate(sep, x)
sep += (ds.pop() - sep).limit(x, 0) # constant of integration
return (coeff.subs(k, k - i), sep, i)
else:
ds.append(diff)
return None
def rational_independent(terms, x):
"""Returns a list of all the rationally independent terms.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.series.formal import rational_independent
>>> from sympy.abc import x
>>> rational_independent([cos(x), sin(x)], x)
[cos(x), sin(x)]
>>> rational_independent([x**2, sin(x), x*sin(x), x**3], x)
[x**3 + x**2, x*sin(x) + sin(x)]
"""
if not terms:
return []
ind = terms[0:1]
for t in terms[1:]:
n = t.as_independent(x)[1]
for i, term in enumerate(ind):
d = term.as_independent(x)[1]
q = (n / d).cancel()
if q.is_rational_function(x):
ind[i] += t
break
else:
ind.append(t)
return ind
def simpleDE(f, x, g, order=4):
r"""Generates simple DE.
DE is of the form
.. math::
f^k(x) + \sum\limits_{j=0}^{k-1} A_j f^j(x) = 0
where :math:`A_j` should be rational function in x.
Generates DE's upto order 4 (default). DE's can also have free parameters.
By increasing order, higher order DE's can be found.
Yields a tuple of (DE, order).
"""
from sympy.solvers.solveset import linsolve
a = symbols('a:%d' % (order))
def _makeDE(k):
eq = f.diff(x, k) + Add(*[a[i]*f.diff(x, i) for i in range(0, k)])
DE = g(x).diff(x, k) + Add(*[a[i]*g(x).diff(x, i) for i in range(0, k)])
return eq, DE
found = False
for k in range(1, order + 1):
eq, DE = _makeDE(k)
eq = eq.expand()
terms = eq.as_ordered_terms()
ind = rational_independent(terms, x)
if found or len(ind) == k:
sol = dict(zip(a, (i for s in linsolve(ind, a[:k]) for i in s)))
if sol:
found = True
DE = DE.subs(sol)
DE = DE.as_numer_denom()[0]
DE = DE.factor().as_coeff_mul(Derivative)[1][0]
yield DE.collect(Derivative(g(x))), k
def exp_re(DE, r, k):
"""Converts a DE with constant coefficients (explike) into a RE.
Performs the substitution:
.. math::
f^j(x) \\to r(k + j)
Normalises the terms so that lowest order of a term is always r(k).
Examples
========
>>> from sympy import Function, Derivative
>>> from sympy.series.formal import exp_re
>>> from sympy.abc import x, k
>>> f, r = Function('f'), Function('r')
>>> exp_re(-f(x) + Derivative(f(x)), r, k)
-r(k) + r(k + 1)
>>> exp_re(Derivative(f(x), x) + Derivative(f(x), (x, 2)), r, k)
r(k) + r(k + 1)
See Also
========
sympy.series.formal.hyper_re
"""
RE = S.Zero
g = DE.atoms(Function).pop()
mini = None
for t in Add.make_args(DE):
coeff, d = t.as_independent(g)
if isinstance(d, Derivative):
j = d.derivative_count
else:
j = 0
if mini is None or j < mini:
mini = j
RE += coeff * r(k + j)
if mini:
RE = RE.subs(k, k - mini)
return RE
def hyper_re(DE, r, k):
"""Converts a DE into a RE.
Performs the substitution:
.. math::
x^l f^j(x) \\to (k + 1 - l)_j . a_{k + j - l}
Normalises the terms so that lowest order of a term is always r(k).
Examples
========
>>> from sympy import Function, Derivative
>>> from sympy.series.formal import hyper_re
>>> from sympy.abc import x, k
>>> f, r = Function('f'), Function('r')
>>> hyper_re(-f(x) + Derivative(f(x)), r, k)
(k + 1)*r(k + 1) - r(k)
>>> hyper_re(-x*f(x) + Derivative(f(x), (x, 2)), r, k)
(k + 2)*(k + 3)*r(k + 3) - r(k)
See Also
========
sympy.series.formal.exp_re
"""
RE = S.Zero
g = DE.atoms(Function).pop()
x = g.atoms(Symbol).pop()
mini = None
for t in Add.make_args(DE.expand()):
coeff, d = t.as_independent(g)
c, v = coeff.as_independent(x)
l = v.as_coeff_exponent(x)[1]
if isinstance(d, Derivative):
j = d.derivative_count
else:
j = 0
RE += c * rf(k + 1 - l, j) * r(k + j - l)
if mini is None or j - l < mini:
mini = j - l
RE = RE.subs(k, k - mini)
m = Wild('m')
return RE.collect(r(k + m))
def _transformation_a(f, x, P, Q, k, m, shift):
f *= x**(-shift)
P = P.subs(k, k + shift)
Q = Q.subs(k, k + shift)
return f, P, Q, m
def _transformation_c(f, x, P, Q, k, m, scale):
f = f.subs(x, x**scale)
P = P.subs(k, k / scale)
Q = Q.subs(k, k / scale)
m *= scale
return f, P, Q, m
def _transformation_e(f, x, P, Q, k, m):
f = f.diff(x)
P = P.subs(k, k + 1) * (k + m + 1)
Q = Q.subs(k, k + 1) * (k + 1)
return f, P, Q, m
def _apply_shift(sol, shift):
return [(res, cond + shift) for res, cond in sol]
def _apply_scale(sol, scale):
return [(res, cond / scale) for res, cond in sol]
def _apply_integrate(sol, x, k):
return [(res / ((cond + 1)*(cond.as_coeff_Add()[1].coeff(k))), cond + 1)
for res, cond in sol]
def _compute_formula(f, x, P, Q, k, m, k_max):
"""Computes the formula for f."""
from sympy.polys import roots
sol = []
for i in range(k_max + 1, k_max + m + 1):
if (i < 0) == True:
continue
r = f.diff(x, i).limit(x, 0) / factorial(i)
if r.is_zero:
continue
kterm = m*k + i
res = r
p = P.subs(k, kterm)
q = Q.subs(k, kterm)
c1 = p.subs(k, 1/k).leadterm(k)[0]
c2 = q.subs(k, 1/k).leadterm(k)[0]
res *= (-c1 / c2)**k
for r, mul in roots(p, k).items():
res *= rf(-r, k)**mul
for r, mul in roots(q, k).items():
res /= rf(-r, k)**mul
sol.append((res, kterm))
return sol
def _rsolve_hypergeometric(f, x, P, Q, k, m):
"""Recursive wrapper to rsolve_hypergeometric.
Returns a Tuple of (formula, series independent terms,
maximum power of x in independent terms) if successful
otherwise ``None``.
See :func:`rsolve_hypergeometric` for details.
"""
from sympy.polys import lcm, roots
from sympy.integrals import integrate
# transformation - c
proots, qroots = roots(P, k), roots(Q, k)
all_roots = dict(proots)
all_roots.update(qroots)
scale = lcm([r.as_numer_denom()[1] for r, t in all_roots.items()
if r.is_rational])
f, P, Q, m = _transformation_c(f, x, P, Q, k, m, scale)
# transformation - a
qroots = roots(Q, k)
if qroots:
k_min = Min(*qroots.keys())
else:
k_min = S.Zero
shift = k_min + m
f, P, Q, m = _transformation_a(f, x, P, Q, k, m, shift)
l = (x*f).limit(x, 0)
if not isinstance(l, Limit) and l != 0: # Ideally should only be l != 0
return None
qroots = roots(Q, k)
if qroots:
k_max = Max(*qroots.keys())
else:
k_max = S.Zero
ind, mp = S.Zero, -oo
for i in range(k_max + m + 1):
r = f.diff(x, i).limit(x, 0) / factorial(i)
if r.is_finite is False:
old_f = f
f, P, Q, m = _transformation_a(f, x, P, Q, k, m, i)
f, P, Q, m = _transformation_e(f, x, P, Q, k, m)
sol, ind, mp = _rsolve_hypergeometric(f, x, P, Q, k, m)
sol = _apply_integrate(sol, x, k)
sol = _apply_shift(sol, i)
ind = integrate(ind, x)
ind += (old_f - ind).limit(x, 0) # constant of integration
mp += 1
return sol, ind, mp
elif r:
ind += r*x**(i + shift)
pow_x = Rational((i + shift), scale)
if pow_x > mp:
mp = pow_x # maximum power of x
ind = ind.subs(x, x**(1/scale))
sol = _compute_formula(f, x, P, Q, k, m, k_max)
sol = _apply_shift(sol, shift)
sol = _apply_scale(sol, scale)
return sol, ind, mp
def rsolve_hypergeometric(f, x, P, Q, k, m):
"""Solves RE of hypergeometric type.
Attempts to solve RE of the form
Q(k)*a(k + m) - P(k)*a(k)
Transformations that preserve Hypergeometric type:
a. x**n*f(x): b(k + m) = R(k - n)*b(k)
b. f(A*x): b(k + m) = A**m*R(k)*b(k)
c. f(x**n): b(k + n*m) = R(k/n)*b(k)
d. f(x**(1/m)): b(k + 1) = R(k*m)*b(k)
e. f'(x): b(k + m) = ((k + m + 1)/(k + 1))*R(k + 1)*b(k)
Some of these transformations have been used to solve the RE.
Returns
=======
formula : Expr
ind : Expr
Independent terms.
order : int
Examples
========
>>> from sympy import exp, ln, S
>>> from sympy.series.formal import rsolve_hypergeometric as rh
>>> from sympy.abc import x, k
>>> rh(exp(x), x, -S.One, (k + 1), k, 1)
(Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1)
>>> rh(ln(1 + x), x, k**2, k*(k + 1), k, 1)
(Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1),
Eq(Mod(k, 1), 0)), (0, True)), x, 2)
References
==========
.. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf
.. [2] Power Series in Computer Algebra - Wolfram Koepf
"""
result = _rsolve_hypergeometric(f, x, P, Q, k, m)
if result is None:
return None
sol_list, ind, mp = result
sol_dict = defaultdict(lambda: S.Zero)
for res, cond in sol_list:
j, mk = cond.as_coeff_Add()
c = mk.coeff(k)
if j.is_integer is False:
res *= x**frac(j)
j = floor(j)
res = res.subs(k, (k - j) / c)
cond = Eq(k % c, j % c)
sol_dict[cond] += res # Group together formula for same conditions
sol = []
for cond, res in sol_dict.items():
sol.append((res, cond))
sol.append((S.Zero, True))
sol = Piecewise(*sol)
if mp is -oo:
s = S.Zero
elif mp.is_integer is False:
s = ceiling(mp)
else:
s = mp + 1
# save all the terms of
# form 1/x**k in ind
if s < 0:
ind += sum(sequence(sol * x**k, (k, s, -1)))
s = S.Zero
return (sol, ind, s)
def _solve_hyper_RE(f, x, RE, g, k):
"""See docstring of :func:`rsolve_hypergeometric` for details."""
terms = Add.make_args(RE)
if len(terms) == 2:
gs = list(RE.atoms(Function))
P, Q = map(RE.coeff, gs)
m = gs[1].args[0] - gs[0].args[0]
if m < 0:
P, Q = Q, P
m = abs(m)
return rsolve_hypergeometric(f, x, P, Q, k, m)
def _solve_explike_DE(f, x, DE, g, k):
"""Solves DE with constant coefficients."""
from sympy.solvers import rsolve
for t in Add.make_args(DE):
coeff, d = t.as_independent(g)
if coeff.free_symbols:
return
RE = exp_re(DE, g, k)
init = {}
for i in range(len(Add.make_args(RE))):
if i:
f = f.diff(x)
init[g(k).subs(k, i)] = f.limit(x, 0)
sol = rsolve(RE, g(k), init)
if sol:
return (sol / factorial(k), S.Zero, S.Zero)
def _solve_simple(f, x, DE, g, k):
"""Converts DE into RE and solves using :func:`rsolve`."""
from sympy.solvers import rsolve
RE = hyper_re(DE, g, k)
init = {}
for i in range(len(Add.make_args(RE))):
if i:
f = f.diff(x)
init[g(k).subs(k, i)] = f.limit(x, 0) / factorial(i)
sol = rsolve(RE, g(k), init)
if sol:
return (sol, S.Zero, S.Zero)
def _transform_explike_DE(DE, g, x, order, syms):
"""Converts DE with free parameters into DE with constant coefficients."""
from sympy.solvers.solveset import linsolve
eq = []
highest_coeff = DE.coeff(Derivative(g(x), x, order))
for i in range(order):
coeff = DE.coeff(Derivative(g(x), x, i))
coeff = (coeff / highest_coeff).expand().collect(x)
for t in Add.make_args(coeff):
eq.append(t)
temp = []
for e in eq:
if e.has(x):
break
elif e.has(Symbol):
temp.append(e)
else:
eq = temp
if eq:
sol = dict(zip(syms, (i for s in linsolve(eq, list(syms)) for i in s)))
if sol:
DE = DE.subs(sol)
DE = DE.factor().as_coeff_mul(Derivative)[1][0]
DE = DE.collect(Derivative(g(x)))
return DE
def _transform_DE_RE(DE, g, k, order, syms):
"""Converts DE with free parameters into RE of hypergeometric type."""
from sympy.solvers.solveset import linsolve
RE = hyper_re(DE, g, k)
eq = []
for i in range(1, order):
coeff = RE.coeff(g(k + i))
eq.append(coeff)
sol = dict(zip(syms, (i for s in linsolve(eq, list(syms)) for i in s)))
if sol:
m = Wild('m')
RE = RE.subs(sol)
RE = RE.factor().as_numer_denom()[0].collect(g(k + m))
RE = RE.as_coeff_mul(g)[1][0]
for i in range(order): # smallest order should be g(k)
if RE.coeff(g(k + i)) and i:
RE = RE.subs(k, k - i)
break
return RE
def solve_de(f, x, DE, order, g, k):
"""Solves the DE.
Tries to solve DE by either converting into a RE containing two terms or
converting into a DE having constant coefficients.
Returns
=======
formula : Expr
ind : Expr
Independent terms.
order : int
Examples
========
>>> from sympy import Derivative as D, Function
>>> from sympy import exp, ln
>>> from sympy.series.formal import solve_de
>>> from sympy.abc import x, k
>>> f = Function('f')
>>> solve_de(exp(x), x, D(f(x), x) - f(x), 1, f, k)
(Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1)
>>> solve_de(ln(1 + x), x, (x + 1)*D(f(x), x, 2) + D(f(x)), 2, f, k)
(Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1),
Eq(Mod(k, 1), 0)), (0, True)), x, 2)
"""
sol = None
syms = DE.free_symbols.difference({g, x})
if syms:
RE = _transform_DE_RE(DE, g, k, order, syms)
else:
RE = hyper_re(DE, g, k)
if not RE.free_symbols.difference({k}):
sol = _solve_hyper_RE(f, x, RE, g, k)
if sol:
return sol
if syms:
DE = _transform_explike_DE(DE, g, x, order, syms)
if not DE.free_symbols.difference({x}):
sol = _solve_explike_DE(f, x, DE, g, k)
if sol:
return sol
def hyper_algorithm(f, x, k, order=4):
"""Hypergeometric algorithm for computing Formal Power Series.
Steps:
* Generates DE
* Convert the DE into RE
* Solves the RE
Examples
========
>>> from sympy import exp, ln
>>> from sympy.series.formal import hyper_algorithm
>>> from sympy.abc import x, k
>>> hyper_algorithm(exp(x), x, k)
(Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1)
>>> hyper_algorithm(ln(1 + x), x, k)
(Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1),
Eq(Mod(k, 1), 0)), (0, True)), x, 2)
See Also
========
sympy.series.formal.simpleDE
sympy.series.formal.solve_de
"""
g = Function('g')
des = [] # list of DE's
sol = None
for DE, i in simpleDE(f, x, g, order):
if DE is not None:
sol = solve_de(f, x, DE, i, g, k)
if sol:
return sol
if not DE.free_symbols.difference({x}):
des.append(DE)
# If nothing works
# Try plain rsolve
for DE in des:
sol = _solve_simple(f, x, DE, g, k)
if sol:
return sol
def _compute_fps(f, x, x0, dir, hyper, order, rational, full):
"""Recursive wrapper to compute fps.
See :func:`compute_fps` for details.
"""
if x0 in [S.Infinity, S.NegativeInfinity]:
dir = S.One if x0 is S.Infinity else -S.One
temp = f.subs(x, 1/x)
result = _compute_fps(temp, x, 0, dir, hyper, order, rational, full)
if result is None:
return None
return (result[0], result[1].subs(x, 1/x), result[2].subs(x, 1/x))
elif x0 or dir == -S.One:
if dir == -S.One:
rep = -x + x0
rep2 = -x
rep2b = x0
else:
rep = x + x0
rep2 = x
rep2b = -x0
temp = f.subs(x, rep)
result = _compute_fps(temp, x, 0, S.One, hyper, order, rational, full)
if result is None:
return None
return (result[0], result[1].subs(x, rep2 + rep2b),
result[2].subs(x, rep2 + rep2b))
if f.is_polynomial(x):
k = Dummy('k')
ak = sequence(Coeff(f, x, k), (k, 1, oo))
xk = sequence(x**k, (k, 0, oo))
ind = f.coeff(x, 0)
return ak, xk, ind
# Break instances of Add
# this allows application of different
# algorithms on different terms increasing the
# range of admissible functions.
if isinstance(f, Add):
result = False
ak = sequence(S.Zero, (0, oo))
ind, xk = S.Zero, None
for t in Add.make_args(f):
res = _compute_fps(t, x, 0, S.One, hyper, order, rational, full)
if res:
if not result:
result = True
xk = res[1]
if res[0].start > ak.start:
seq = ak
s, f = ak.start, res[0].start
else:
seq = res[0]
s, f = res[0].start, ak.start
save = Add(*[z[0]*z[1] for z in zip(seq[0:(f - s)], xk[s:f])])
ak += res[0]
ind += res[2] + save
else:
ind += t
if result:
return ak, xk, ind
return None
# The symbolic term - symb, if present, is being separated from the function
# Otherwise symb is being set to S.One
syms = f.free_symbols.difference({x})
(f, symb) = expand(f).as_independent(*syms)
if symb.is_zero:
symb = S.One
symb = powsimp(symb)
result = None
# from here on it's x0=0 and dir=1 handling
k = Dummy('k')
if rational:
result = rational_algorithm(f, x, k, order, full)
if result is None and hyper:
result = hyper_algorithm(f, x, k, order)
if result is None:
return None
ak = sequence(result[0], (k, result[2], oo))
xk_formula = powsimp(x**k * symb)
xk = sequence(xk_formula, (k, 0, oo))
ind = powsimp(result[1] * symb)
return ak, xk, ind
def compute_fps(f, x, x0=0, dir=1, hyper=True, order=4, rational=True,
full=False):
"""Computes the formula for Formal Power Series of a function.
Tries to compute the formula by applying the following techniques
(in order):
* rational_algorithm
* Hypergeometric algorithm
Parameters
==========
x : Symbol
x0 : number, optional
Point to perform series expansion about. Default is 0.
dir : {1, -1, '+', '-'}, optional
If dir is 1 or '+' the series is calculated from the right and
for -1 or '-' the series is calculated from the left. For smooth
functions this flag will not alter the results. Default is 1.
hyper : {True, False}, optional
Set hyper to False to skip the hypergeometric algorithm.
By default it is set to False.
order : int, optional
Order of the derivative of ``f``, Default is 4.
rational : {True, False}, optional
Set rational to False to skip rational algorithm. By default it is set
to True.
full : {True, False}, optional
Set full to True to increase the range of rational algorithm.
See :func:`rational_algorithm` for details. By default it is set to
False.
Returns
=======
ak : sequence
Sequence of coefficients.
xk : sequence
Sequence of powers of x.
ind : Expr
Independent terms.
mul : Pow
Common terms.
See Also
========
sympy.series.formal.rational_algorithm
sympy.series.formal.hyper_algorithm
"""
f = sympify(f)
x = sympify(x)
if not f.has(x):
return None
x0 = sympify(x0)
if dir == '+':
dir = S.One
elif dir == '-':
dir = -S.One
elif dir not in [S.One, -S.One]:
raise ValueError("Dir must be '+' or '-'")
else:
dir = sympify(dir)
return _compute_fps(f, x, x0, dir, hyper, order, rational, full)
class Coeff(Function):
"""
Coeff(p, x, n) represents the nth coefficient of the polynomial p in x
"""
@classmethod
def eval(cls, p, x, n):
if p.is_polynomial(x) and n.is_integer:
return p.coeff(x, n)
class FormalPowerSeries(SeriesBase):
"""Represents Formal Power Series of a function.
No computation is performed. This class should only to be used to represent
a series. No checks are performed.
For computing a series use :func:`fps`.
See Also
========
sympy.series.formal.fps
"""
def __new__(cls, *args):
args = map(sympify, args)
return Expr.__new__(cls, *args)
def __init__(self, *args):
ak = args[4][0]
k = ak.variables[0]
self.ak_seq = sequence(ak.formula, (k, 1, oo))
self.fact_seq = sequence(factorial(k), (k, 1, oo))
self.bell_coeff_seq = self.ak_seq * self.fact_seq
self.sign_seq = sequence((-1, 1), (k, 1, oo))
@property
def function(self):
return self.args[0]
@property
def x(self):
return self.args[1]
@property
def x0(self):
return self.args[2]
@property
def dir(self):
return self.args[3]
@property
def ak(self):
return self.args[4][0]
@property
def xk(self):
return self.args[4][1]
@property
def ind(self):
return self.args[4][2]
@property
def interval(self):
return Interval(0, oo)
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return oo
@property
def infinite(self):
"""Returns an infinite representation of the series"""
from sympy.concrete import Sum
ak, xk = self.ak, self.xk
k = ak.variables[0]
inf_sum = Sum(ak.formula * xk.formula, (k, ak.start, ak.stop))
return self.ind + inf_sum
def _get_pow_x(self, term):
"""Returns the power of x in a term."""
xterm, pow_x = term.as_independent(self.x)[1].as_base_exp()
if not xterm.has(self.x):
return S.Zero
return pow_x
def polynomial(self, n=6):
"""Truncated series as polynomial.
Returns series expansion of ``f`` upto order ``O(x**n)``
as a polynomial(without ``O`` term).
"""
terms = []
sym = self.free_symbols
for i, t in enumerate(self):
xp = self._get_pow_x(t)
if xp.has(*sym):
xp = xp.as_coeff_add(*sym)[0]
if xp >= n:
break
elif xp.is_integer is True and i == n + 1:
break
elif t is not S.Zero:
terms.append(t)
return Add(*terms)
def truncate(self, n=6):
"""Truncated series.
Returns truncated series expansion of f upto
order ``O(x**n)``.
If n is ``None``, returns an infinite iterator.
"""
if n is None:
return iter(self)
x, x0 = self.x, self.x0
pt_xk = self.xk.coeff(n)
if x0 is S.NegativeInfinity:
x0 = S.Infinity
return self.polynomial(n) + Order(pt_xk, (x, x0))
def zero_coeff(self):
return self._eval_term(0)
def _eval_term(self, pt):
try:
pt_xk = self.xk.coeff(pt)
pt_ak = self.ak.coeff(pt).simplify() # Simplify the coefficients
except IndexError:
term = S.Zero
else:
term = (pt_ak * pt_xk)
if self.ind:
ind = S.Zero
sym = self.free_symbols
for t in Add.make_args(self.ind):
pow_x = self._get_pow_x(t)
if pow_x.has(*sym):
pow_x = pow_x.as_coeff_add(*sym)[0]
if pt == 0 and pow_x < 1:
ind += t
elif pow_x >= pt and pow_x < pt + 1:
ind += t
term += ind
return term.collect(self.x)
def _eval_subs(self, old, new):
x = self.x
if old.has(x):
return self
def _eval_as_leading_term(self, x, cdir=0):
for t in self:
if t is not S.Zero:
return t
def _eval_derivative(self, x):
f = self.function.diff(x)
ind = self.ind.diff(x)
pow_xk = self._get_pow_x(self.xk.formula)
ak = self.ak
k = ak.variables[0]
if ak.formula.has(x):
form = []
for e, c in ak.formula.args:
temp = S.Zero
for t in Add.make_args(e):
pow_x = self._get_pow_x(t)
temp += t * (pow_xk + pow_x)
form.append((temp, c))
form = Piecewise(*form)
ak = sequence(form.subs(k, k + 1), (k, ak.start - 1, ak.stop))
else:
ak = sequence((ak.formula * pow_xk).subs(k, k + 1),
(k, ak.start - 1, ak.stop))
return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind))
def integrate(self, x=None, **kwargs):
"""Integrate Formal Power Series.
Examples
========
>>> from sympy import fps, sin, integrate
>>> from sympy.abc import x
>>> f = fps(sin(x))
>>> f.integrate(x).truncate()
-1 + x**2/2 - x**4/24 + O(x**6)
>>> integrate(f, (x, 0, 1))
1 - cos(1)
"""
from sympy.integrals import integrate
if x is None:
x = self.x
elif iterable(x):
return integrate(self.function, x)
f = integrate(self.function, x)
ind = integrate(self.ind, x)
ind += (f - ind).limit(x, 0) # constant of integration
pow_xk = self._get_pow_x(self.xk.formula)
ak = self.ak
k = ak.variables[0]
if ak.formula.has(x):
form = []
for e, c in ak.formula.args:
temp = S.Zero
for t in Add.make_args(e):
pow_x = self._get_pow_x(t)
temp += t / (pow_xk + pow_x + 1)
form.append((temp, c))
form = Piecewise(*form)
ak = sequence(form.subs(k, k - 1), (k, ak.start + 1, ak.stop))
else:
ak = sequence((ak.formula / (pow_xk + 1)).subs(k, k - 1),
(k, ak.start + 1, ak.stop))
return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind))
def product(self, other, x=None, n=6):
"""Multiplies two Formal Power Series, using discrete convolution and
return the truncated terms upto specified order.
Parameters
==========
n : Number, optional
Specifies the order of the term up to which the polynomial should
be truncated.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(sin(x))
>>> f2 = fps(exp(x))
>>> f1.product(f2, x).truncate(4)
x + x**2 + x**3/3 + O(x**4)
See Also
========
sympy.discrete.convolutions
sympy.series.formal.FormalPowerSeriesProduct
"""
if x is None:
x = self.x
if n is None:
return iter(self)
other = sympify(other)
if not isinstance(other, FormalPowerSeries):
raise ValueError("Both series should be an instance of FormalPowerSeries"
" class.")
if self.dir != other.dir:
raise ValueError("Both series should be calculated from the"
" same direction.")
elif self.x0 != other.x0:
raise ValueError("Both series should be calculated about the"
" same point.")
elif self.x != other.x:
raise ValueError("Both series should have the same symbol.")
return FormalPowerSeriesProduct(self, other)
def coeff_bell(self, n):
r"""
self.coeff_bell(n) returns a sequence of Bell polynomials of the second kind.
Note that ``n`` should be a integer.
The second kind of Bell polynomials (are sometimes called "partial" Bell
polynomials or incomplete Bell polynomials) are defined as
.. math::
B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
\sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
\frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
\left(\frac{x_1}{1!} \right)^{j_1}
\left(\frac{x_2}{2!} \right)^{j_2} \dotsb
\left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.
* ``bell(n, k, (x1, x2, ...))`` gives Bell polynomials of the second kind,
`B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.
See Also
========
sympy.functions.combinatorial.numbers.bell
"""
inner_coeffs = [bell(n, j, tuple(self.bell_coeff_seq[:n-j+1])) for j in range(1, n+1)]
k = Dummy('k')
return sequence(tuple(inner_coeffs), (k, 1, oo))
def compose(self, other, x=None, n=6):
r"""
Returns the truncated terms of the formal power series of the composed function,
up to specified `n`.
If `f` and `g` are two formal power series of two different functions,
then the coefficient sequence ``ak`` of the composed formal power series `fp`
will be as follows.
.. math::
\sum\limits_{k=0}^{n} b_k B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})
Parameters
==========
n : Number, optional
Specifies the order of the term up to which the polynomial should
be truncated.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(sin(x))
>>> f1.compose(f2, x).truncate()
1 + x + x**2/2 - x**4/8 - x**5/15 + O(x**6)
>>> f1.compose(f2, x).truncate(8)
1 + x + x**2/2 - x**4/8 - x**5/15 - x**6/240 + x**7/90 + O(x**8)
See Also
========
sympy.functions.combinatorial.numbers.bell
sympy.series.formal.FormalPowerSeriesCompose
References
==========
.. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974.
"""
if x is None:
x = self.x
if n is None:
return iter(self)
other = sympify(other)
if not isinstance(other, FormalPowerSeries):
raise ValueError("Both series should be an instance of FormalPowerSeries"
" class.")
if self.dir != other.dir:
raise ValueError("Both series should be calculated from the"
" same direction.")
elif self.x0 != other.x0:
raise ValueError("Both series should be calculated about the"
" same point.")
elif self.x != other.x:
raise ValueError("Both series should have the same symbol.")
if other._eval_term(0).as_coeff_mul(other.x)[0] is not S.Zero:
raise ValueError("The formal power series of the inner function should not have any "
"constant coefficient term.")
return FormalPowerSeriesCompose(self, other)
def inverse(self, x=None, n=6):
r"""
Returns the truncated terms of the inverse of the formal power series,
up to specified `n`.
If `f` and `g` are two formal power series of two different functions,
then the coefficient sequence ``ak`` of the composed formal power series `fp`
will be as follows.
.. math::
\sum\limits_{k=0}^{n} (-1)^{k} x_0^{-k-1} B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})
Parameters
==========
n : Number, optional
Specifies the order of the term up to which the polynomial should
be truncated.
Examples
========
>>> from sympy import fps, exp, cos
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(cos(x))
>>> f1.inverse(x).truncate()
1 - x + x**2/2 - x**3/6 + x**4/24 - x**5/120 + O(x**6)
>>> f2.inverse(x).truncate(8)
1 + x**2/2 + 5*x**4/24 + 61*x**6/720 + O(x**8)
See Also
========
sympy.functions.combinatorial.numbers.bell
sympy.series.formal.FormalPowerSeriesInverse
References
==========
.. [1] Comtet, Louis: Advanced combinatorics; the art of finite and infinite expansions. Reidel, 1974.
"""
if x is None:
x = self.x
if n is None:
return iter(self)
if self._eval_term(0).is_zero:
raise ValueError("Constant coefficient should exist for an inverse of a formal"
" power series to exist.")
return FormalPowerSeriesInverse(self)
def __add__(self, other):
other = sympify(other)
if isinstance(other, FormalPowerSeries):
if self.dir != other.dir:
raise ValueError("Both series should be calculated from the"
" same direction.")
elif self.x0 != other.x0:
raise ValueError("Both series should be calculated about the"
" same point.")
x, y = self.x, other.x
f = self.function + other.function.subs(y, x)
if self.x not in f.free_symbols:
return f
ak = self.ak + other.ak
if self.ak.start > other.ak.start:
seq = other.ak
s, e = other.ak.start, self.ak.start
else:
seq = self.ak
s, e = self.ak.start, other.ak.start
save = Add(*[z[0]*z[1] for z in zip(seq[0:(e - s)], self.xk[s:e])])
ind = self.ind + other.ind + save
return self.func(f, x, self.x0, self.dir, (ak, self.xk, ind))
elif not other.has(self.x):
f = self.function + other
ind = self.ind + other
return self.func(f, self.x, self.x0, self.dir,
(self.ak, self.xk, ind))
return Add(self, other)
def __radd__(self, other):
return self.__add__(other)
def __neg__(self):
return self.func(-self.function, self.x, self.x0, self.dir,
(-self.ak, self.xk, -self.ind))
def __sub__(self, other):
return self.__add__(-other)
def __rsub__(self, other):
return (-self).__add__(other)
def __mul__(self, other):
other = sympify(other)
if other.has(self.x):
return Mul(self, other)
f = self.function * other
ak = self.ak.coeff_mul(other)
ind = self.ind * other
return self.func(f, self.x, self.x0, self.dir, (ak, self.xk, ind))
def __rmul__(self, other):
return self.__mul__(other)
class FiniteFormalPowerSeries(FormalPowerSeries):
"""Base Class for Product, Compose and Inverse classes"""
def __init__(self, *args):
pass
@property
def ffps(self):
return self.args[0]
@property
def gfps(self):
return self.args[1]
@property
def f(self):
return self.ffps.function
@property
def g(self):
return self.gfps.function
@property
def infinite(self):
raise NotImplementedError("No infinite version for an object of"
" FiniteFormalPowerSeries class.")
def _eval_terms(self, n):
raise NotImplementedError("(%s)._eval_terms()" % self)
def _eval_term(self, pt):
raise NotImplementedError("By the current logic, one can get terms"
"upto a certain order, instead of getting term by term.")
def polynomial(self, n):
return self._eval_terms(n)
def truncate(self, n=6):
ffps = self.ffps
pt_xk = ffps.xk.coeff(n)
x, x0 = ffps.x, ffps.x0
return self.polynomial(n) + Order(pt_xk, (x, x0))
def _eval_derivative(self, x):
raise NotImplementedError
def integrate(self, x):
raise NotImplementedError
class FormalPowerSeriesProduct(FiniteFormalPowerSeries):
"""Represents the product of two formal power series of two functions.
No computation is performed. Terms are calculated using a term by term logic,
instead of a point by point logic.
There are two differences between a :obj:`FormalPowerSeries` object and a
:obj:`FormalPowerSeriesProduct` object. The first argument contains the two
functions involved in the product. Also, the coefficient sequence contains
both the coefficient sequence of the formal power series of the involved functions.
See Also
========
sympy.series.formal.FormalPowerSeries
sympy.series.formal.FiniteFormalPowerSeries
"""
def __init__(self, *args):
ffps, gfps = self.ffps, self.gfps
k = ffps.ak.variables[0]
self.coeff1 = sequence(ffps.ak.formula, (k, 0, oo))
k = gfps.ak.variables[0]
self.coeff2 = sequence(gfps.ak.formula, (k, 0, oo))
@property
def function(self):
"""Function of the product of two formal power series."""
return self.f * self.g
def _eval_terms(self, n):
"""
Returns the first `n` terms of the product formal power series.
Term by term logic is implemented here.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(sin(x))
>>> f2 = fps(exp(x))
>>> fprod = f1.product(f2, x)
>>> fprod._eval_terms(4)
x**3/3 + x**2 + x
See Also
========
sympy.series.formal.FormalPowerSeries.product
"""
coeff1, coeff2 = self.coeff1, self.coeff2
aks = convolution(coeff1[:n], coeff2[:n])
terms = []
for i in range(0, n):
terms.append(aks[i] * self.ffps.xk.coeff(i))
return Add(*terms)
class FormalPowerSeriesCompose(FiniteFormalPowerSeries):
"""Represents the composed formal power series of two functions.
No computation is performed. Terms are calculated using a term by term logic,
instead of a point by point logic.
There are two differences between a :obj:`FormalPowerSeries` object and a
:obj:`FormalPowerSeriesCompose` object. The first argument contains the outer
function and the inner function involved in the omposition. Also, the
coefficient sequence contains the generic sequence which is to be multiplied
by a custom ``bell_seq`` finite sequence. The finite terms will then be added up to
get the final terms.
See Also
========
sympy.series.formal.FormalPowerSeries
sympy.series.formal.FiniteFormalPowerSeries
"""
@property
def function(self):
"""Function for the composed formal power series."""
f, g, x = self.f, self.g, self.ffps.x
return f.subs(x, g)
def _eval_terms(self, n):
"""
Returns the first `n` terms of the composed formal power series.
Term by term logic is implemented here.
The coefficient sequence of the :obj:`FormalPowerSeriesCompose` object is the generic sequence.
It is multiplied by ``bell_seq`` to get a sequence, whose terms are added up to get
the final terms for the polynomial.
Examples
========
>>> from sympy import fps, sin, exp
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(sin(x))
>>> fcomp = f1.compose(f2, x)
>>> fcomp._eval_terms(6)
-x**5/15 - x**4/8 + x**2/2 + x + 1
>>> fcomp._eval_terms(8)
x**7/90 - x**6/240 - x**5/15 - x**4/8 + x**2/2 + x + 1
See Also
========
sympy.series.formal.FormalPowerSeries.compose
sympy.series.formal.FormalPowerSeries.coeff_bell
"""
ffps, gfps = self.ffps, self.gfps
terms = [ffps.zero_coeff()]
for i in range(1, n):
bell_seq = gfps.coeff_bell(i)
seq = (ffps.bell_coeff_seq * bell_seq)
terms.append(Add(*(seq[:i])) / ffps.fact_seq[i-1] * ffps.xk.coeff(i))
return Add(*terms)
class FormalPowerSeriesInverse(FiniteFormalPowerSeries):
"""Represents the Inverse of a formal power series.
No computation is performed. Terms are calculated using a term by term logic,
instead of a point by point logic.
There is a single difference between a :obj:`FormalPowerSeries` object and a
:obj:`FormalPowerSeriesInverse` object. The coefficient sequence contains the
generic sequence which is to be multiplied by a custom ``bell_seq`` finite sequence.
The finite terms will then be added up to get the final terms.
See Also
========
sympy.series.formal.FormalPowerSeries
sympy.series.formal.FiniteFormalPowerSeries
"""
def __init__(self, *args):
ffps = self.ffps
k = ffps.xk.variables[0]
inv = ffps.zero_coeff()
inv_seq = sequence(inv ** (-(k + 1)), (k, 1, oo))
self.aux_seq = ffps.sign_seq * ffps.fact_seq * inv_seq
@property
def function(self):
"""Function for the inverse of a formal power series."""
f = self.f
return 1 / f
@property
def g(self):
raise ValueError("Only one function is considered while performing"
"inverse of a formal power series.")
@property
def gfps(self):
raise ValueError("Only one function is considered while performing"
"inverse of a formal power series.")
def _eval_terms(self, n):
"""
Returns the first `n` terms of the composed formal power series.
Term by term logic is implemented here.
The coefficient sequence of the `FormalPowerSeriesInverse` object is the generic sequence.
It is multiplied by ``bell_seq`` to get a sequence, whose terms are added up to get
the final terms for the polynomial.
Examples
========
>>> from sympy import fps, exp, cos
>>> from sympy.abc import x
>>> f1 = fps(exp(x))
>>> f2 = fps(cos(x))
>>> finv1, finv2 = f1.inverse(), f2.inverse()
>>> finv1._eval_terms(6)
-x**5/120 + x**4/24 - x**3/6 + x**2/2 - x + 1
>>> finv2._eval_terms(8)
61*x**6/720 + 5*x**4/24 + x**2/2 + 1
See Also
========
sympy.series.formal.FormalPowerSeries.inverse
sympy.series.formal.FormalPowerSeries.coeff_bell
"""
ffps = self.ffps
terms = [ffps.zero_coeff()]
for i in range(1, n):
bell_seq = ffps.coeff_bell(i)
seq = (self.aux_seq * bell_seq)
terms.append(Add(*(seq[:i])) / ffps.fact_seq[i-1] * ffps.xk.coeff(i))
return Add(*terms)
def fps(f, x=None, x0=0, dir=1, hyper=True, order=4, rational=True, full=False):
"""Generates Formal Power Series of f.
Returns the formal series expansion of ``f`` around ``x = x0``
with respect to ``x`` in the form of a ``FormalPowerSeries`` object.
Formal Power Series is represented using an explicit formula
computed using different algorithms.
See :func:`compute_fps` for the more details regarding the computation
of formula.
Parameters
==========
x : Symbol, optional
If x is None and ``f`` is univariate, the univariate symbols will be
supplied, otherwise an error will be raised.
x0 : number, optional
Point to perform series expansion about. Default is 0.
dir : {1, -1, '+', '-'}, optional
If dir is 1 or '+' the series is calculated from the right and
for -1 or '-' the series is calculated from the left. For smooth
functions this flag will not alter the results. Default is 1.
hyper : {True, False}, optional
Set hyper to False to skip the hypergeometric algorithm.
By default it is set to False.
order : int, optional
Order of the derivative of ``f``, Default is 4.
rational : {True, False}, optional
Set rational to False to skip rational algorithm. By default it is set
to True.
full : {True, False}, optional
Set full to True to increase the range of rational algorithm.
See :func:`rational_algorithm` for details. By default it is set to
False.
Examples
========
>>> from sympy import fps, ln, atan, sin
>>> from sympy.abc import x, n
Rational Functions
>>> fps(ln(1 + x)).truncate()
x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6)
>>> fps(atan(x), full=True).truncate()
x - x**3/3 + x**5/5 + O(x**6)
Symbolic Functions
>>> fps(x**n*sin(x**2), x).truncate(8)
-x**(n + 6)/6 + x**(n + 2) + O(x**(n + 8))
See Also
========
sympy.series.formal.FormalPowerSeries
sympy.series.formal.compute_fps
"""
f = sympify(f)
if x is None:
free = f.free_symbols
if len(free) == 1:
x = free.pop()
elif not free:
return f
else:
raise NotImplementedError("multivariate formal power series")
result = compute_fps(f, x, x0, dir, hyper, order, rational, full)
if result is None:
return f
return FormalPowerSeries(f, x, x0, dir, result)
|
ea177139aa1de3f6c399e78491604b67c106794bccb7b6a951acdfb9df5812c5 | from sympy.core import S, sympify, Expr, Rational, Dummy
from sympy.core import Add, Mul, expand_power_base, expand_log
from sympy.core.cache import cacheit
from sympy.core.compatibility import default_sort_key, is_sequence
from sympy.core.containers import Tuple
from sympy.sets.sets import Complement
from sympy.utilities.iterables import uniq
class Order(Expr):
r""" Represents the limiting behavior of some function
The order of a function characterizes the function based on the limiting
behavior of the function as it goes to some limit. Only taking the limit
point to be a number is currently supported. This is expressed in
big O notation [1]_.
The formal definition for the order of a function `g(x)` about a point `a`
is such that `g(x) = O(f(x))` as `x \rightarrow a` if and only if for any
`\delta > 0` there exists a `M > 0` such that `|g(x)| \leq M|f(x)|` for
`|x-a| < \delta`. This is equivalent to `\lim_{x \rightarrow a}
\sup |g(x)/f(x)| < \infty`.
Let's illustrate it on the following example by taking the expansion of
`\sin(x)` about 0:
.. math ::
\sin(x) = x - x^3/3! + O(x^5)
where in this case `O(x^5) = x^5/5! - x^7/7! + \cdots`. By the definition
of `O`, for any `\delta > 0` there is an `M` such that:
.. math ::
|x^5/5! - x^7/7! + ....| <= M|x^5| \text{ for } |x| < \delta
or by the alternate definition:
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| < \infty
which surely is true, because
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| = 1/5!
As it is usually used, the order of a function can be intuitively thought
of representing all terms of powers greater than the one specified. For
example, `O(x^3)` corresponds to any terms proportional to `x^3,
x^4,\ldots` and any higher power. For a polynomial, this leaves terms
proportional to `x^2`, `x` and constants.
Examples
========
>>> from sympy import O, oo, cos, pi
>>> from sympy.abc import x, y
>>> O(x + x**2)
O(x)
>>> O(x + x**2, (x, 0))
O(x)
>>> O(x + x**2, (x, oo))
O(x**2, (x, oo))
>>> O(1 + x*y)
O(1, x, y)
>>> O(1 + x*y, (x, 0), (y, 0))
O(1, x, y)
>>> O(1 + x*y, (x, oo), (y, oo))
O(x*y, (x, oo), (y, oo))
>>> O(1) in O(1, x)
True
>>> O(1, x) in O(1)
False
>>> O(x) in O(1, x)
True
>>> O(x**2) in O(x)
True
>>> O(x)*x
O(x**2)
>>> O(x) - O(x)
O(x)
>>> O(cos(x))
O(1)
>>> O(cos(x), (x, pi/2))
O(x - pi/2, (x, pi/2))
References
==========
.. [1] `Big O notation <https://en.wikipedia.org/wiki/Big_O_notation>`_
Notes
=====
In ``O(f(x), x)`` the expression ``f(x)`` is assumed to have a leading
term. ``O(f(x), x)`` is automatically transformed to
``O(f(x).as_leading_term(x),x)``.
``O(expr*f(x), x)`` is ``O(f(x), x)``
``O(expr, x)`` is ``O(1)``
``O(0, x)`` is 0.
Multivariate O is also supported:
``O(f(x, y), x, y)`` is transformed to
``O(f(x, y).as_leading_term(x,y).as_leading_term(y), x, y)``
In the multivariate case, it is assumed the limits w.r.t. the various
symbols commute.
If no symbols are passed then all symbols in the expression are used
and the limit point is assumed to be zero.
"""
is_Order = True
__slots__ = ()
@cacheit
def __new__(cls, expr, *args, **kwargs):
expr = sympify(expr)
if not args:
if expr.is_Order:
variables = expr.variables
point = expr.point
else:
variables = list(expr.free_symbols)
point = [S.Zero]*len(variables)
else:
args = list(args if is_sequence(args) else [args])
variables, point = [], []
if is_sequence(args[0]):
for a in args:
v, p = list(map(sympify, a))
variables.append(v)
point.append(p)
else:
variables = list(map(sympify, args))
point = [S.Zero]*len(variables)
if not all(v.is_symbol for v in variables):
raise TypeError('Variables are not symbols, got %s' % variables)
if len(list(uniq(variables))) != len(variables):
raise ValueError('Variables are supposed to be unique symbols, got %s' % variables)
if expr.is_Order:
expr_vp = dict(expr.args[1:])
new_vp = dict(expr_vp)
vp = dict(zip(variables, point))
for v, p in vp.items():
if v in new_vp.keys():
if p != new_vp[v]:
raise NotImplementedError(
"Mixing Order at different points is not supported.")
else:
new_vp[v] = p
if set(expr_vp.keys()) == set(new_vp.keys()):
return expr
else:
variables = list(new_vp.keys())
point = [new_vp[v] for v in variables]
if expr is S.NaN:
return S.NaN
if any(x in p.free_symbols for x in variables for p in point):
raise ValueError('Got %s as a point.' % point)
if variables:
if any(p != point[0] for p in point):
raise NotImplementedError(
"Multivariable orders at different points are not supported.")
if point[0] is S.Infinity:
s = {k: 1/Dummy() for k in variables}
rs = {1/v: 1/k for k, v in s.items()}
elif point[0] is S.NegativeInfinity:
s = {k: -1/Dummy() for k in variables}
rs = {-1/v: -1/k for k, v in s.items()}
elif point[0] is not S.Zero:
s = {k: Dummy() + point[0] for k in variables}
rs = {v - point[0]: k - point[0] for k, v in s.items()}
else:
s = ()
rs = ()
expr = expr.subs(s)
if expr.is_Add:
expr = expr.factor()
if s:
args = tuple([r[0] for r in rs.items()])
else:
args = tuple(variables)
if len(variables) > 1:
# XXX: better way? We need this expand() to
# workaround e.g: expr = x*(x + y).
# (x*(x + y)).as_leading_term(x, y) currently returns
# x*y (wrong order term!). That's why we want to deal with
# expand()'ed expr (handled in "if expr.is_Add" branch below).
expr = expr.expand()
old_expr = None
while old_expr != expr:
old_expr = expr
if expr.is_Add:
lst = expr.extract_leading_order(args)
expr = Add(*[f.expr for (e, f) in lst])
elif expr:
expr = expr.as_leading_term(*args)
expr = expr.as_independent(*args, as_Add=False)[1]
expr = expand_power_base(expr)
expr = expand_log(expr)
if len(args) == 1:
# The definition of O(f(x)) symbol explicitly stated that
# the argument of f(x) is irrelevant. That's why we can
# combine some power exponents (only "on top" of the
# expression tree for f(x)), e.g.:
# x**p * (-x)**q -> x**(p+q) for real p, q.
x = args[0]
margs = list(Mul.make_args(
expr.as_independent(x, as_Add=False)[1]))
for i, t in enumerate(margs):
if t.is_Pow:
b, q = t.args
if b in (x, -x) and q.is_real and not q.has(x):
margs[i] = x**q
elif b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
elif b.is_Mul and b.args[0] is S.NegativeOne:
b = -b
if b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
expr = Mul(*margs)
expr = expr.subs(rs)
if expr.is_Order:
expr = expr.expr
if not expr.has(*variables) and not expr.is_zero:
expr = S.One
# create Order instance:
vp = dict(zip(variables, point))
variables.sort(key=default_sort_key)
point = [vp[v] for v in variables]
args = (expr,) + Tuple(*zip(variables, point))
obj = Expr.__new__(cls, *args)
return obj
def _eval_nseries(self, x, n, logx, cdir=0):
return self
@property
def expr(self):
return self.args[0]
@property
def variables(self):
if self.args[1:]:
return tuple(x[0] for x in self.args[1:])
else:
return ()
@property
def point(self):
if self.args[1:]:
return tuple(x[1] for x in self.args[1:])
else:
return ()
@property
def free_symbols(self):
return self.expr.free_symbols | set(self.variables)
def _eval_power(b, e):
if e.is_Number and e.is_nonnegative:
return b.func(b.expr ** e, *b.args[1:])
if e == O(1):
return b
return
def as_expr_variables(self, order_symbols):
if order_symbols is None:
order_symbols = self.args[1:]
else:
if (not all(o[1] == order_symbols[0][1] for o in order_symbols) and
not all(p == self.point[0] for p in self.point)): # pragma: no cover
raise NotImplementedError('Order at points other than 0 '
'or oo not supported, got %s as a point.' % self.point)
if order_symbols and order_symbols[0][1] != self.point[0]:
raise NotImplementedError(
"Multiplying Order at different points is not supported.")
order_symbols = dict(order_symbols)
for s, p in dict(self.args[1:]).items():
if s not in order_symbols.keys():
order_symbols[s] = p
order_symbols = sorted(order_symbols.items(), key=lambda x: default_sort_key(x[0]))
return self.expr, tuple(order_symbols)
def removeO(self):
return S.Zero
def getO(self):
return self
@cacheit
def contains(self, expr):
r"""
Return True if expr belongs to Order(self.expr, \*self.variables).
Return False if self belongs to expr.
Return None if the inclusion relation cannot be determined
(e.g. when self and expr have different symbols).
"""
from sympy import powsimp
if expr.is_zero:
return True
if expr is S.NaN:
return False
point = self.point[0] if self.point else S.Zero
if expr.is_Order:
if (any(p != point for p in expr.point) or
any(p != point for p in self.point)):
return None
if expr.expr == self.expr:
# O(1) + O(1), O(1) + O(1, x), etc.
return all([x in self.args[1:] for x in expr.args[1:]])
if expr.expr.is_Add:
return all([self.contains(x) for x in expr.expr.args])
if self.expr.is_Add and point.is_zero:
return any([self.func(x, *self.args[1:]).contains(expr)
for x in self.expr.args])
if self.variables and expr.variables:
common_symbols = tuple(
[s for s in self.variables if s in expr.variables])
elif self.variables:
common_symbols = self.variables
else:
common_symbols = expr.variables
if not common_symbols:
return None
if (self.expr.is_Pow and len(self.variables) == 1
and self.variables == expr.variables):
symbol = self.variables[0]
other = expr.expr.as_independent(symbol, as_Add=False)[1]
if (other.is_Pow and other.base == symbol and
self.expr.base == symbol):
if point.is_zero:
rv = (self.expr.exp - other.exp).is_nonpositive
if point.is_infinite:
rv = (self.expr.exp - other.exp).is_nonnegative
if rv is not None:
return rv
r = None
ratio = self.expr/expr.expr
ratio = powsimp(ratio, deep=True, combine='exp')
for s in common_symbols:
from sympy.series.limits import Limit
l = Limit(ratio, s, point).doit(heuristics=False)
if not isinstance(l, Limit):
l = l != 0
else:
l = None
if r is None:
r = l
else:
if r != l:
return
return r
if self.expr.is_Pow and len(self.variables) == 1:
symbol = self.variables[0]
other = expr.as_independent(symbol, as_Add=False)[1]
if (other.is_Pow and other.base == symbol and
self.expr.base == symbol):
if point.is_zero:
rv = (self.expr.exp - other.exp).is_nonpositive
if point.is_infinite:
rv = (self.expr.exp - other.exp).is_nonnegative
if rv is not None:
return rv
obj = self.func(expr, *self.args[1:])
return self.contains(obj)
def __contains__(self, other):
result = self.contains(other)
if result is None:
raise TypeError('contains did not evaluate to a bool')
return result
def _eval_subs(self, old, new):
if old in self.variables:
newexpr = self.expr.subs(old, new)
i = self.variables.index(old)
newvars = list(self.variables)
newpt = list(self.point)
if new.is_symbol:
newvars[i] = new
else:
syms = new.free_symbols
if len(syms) == 1 or old in syms:
if old in syms:
var = self.variables[i]
else:
var = syms.pop()
# First, try to substitute self.point in the "new"
# expr to see if this is a fixed point.
# E.g. O(y).subs(y, sin(x))
point = new.subs(var, self.point[i])
if point != self.point[i]:
from sympy.solvers.solveset import solveset
d = Dummy()
sol = solveset(old - new.subs(var, d), d)
if isinstance(sol, Complement):
e1 = sol.args[0]
e2 = sol.args[1]
sol = set(e1) - set(e2)
res = [dict(zip((d, ), sol))]
point = d.subs(res[0]).limit(old, self.point[i])
newvars[i] = var
newpt[i] = point
elif old not in syms:
del newvars[i], newpt[i]
if not syms and new == self.point[i]:
newvars.extend(syms)
newpt.extend([S.Zero]*len(syms))
else:
return
return Order(newexpr, *zip(newvars, newpt))
def _eval_conjugate(self):
expr = self.expr._eval_conjugate()
if expr is not None:
return self.func(expr, *self.args[1:])
def _eval_derivative(self, x):
return self.func(self.expr.diff(x), *self.args[1:]) or self
def _eval_transpose(self):
expr = self.expr._eval_transpose()
if expr is not None:
return self.func(expr, *self.args[1:])
def _sage_(self):
#XXX: SAGE doesn't have Order yet. Let's return 0 instead.
return Rational(0)._sage_()
def __neg__(self):
return self
O = Order
|
3f30f43004cc3632e33e2f465b919018be94a9cd944feb8fc27ce18bb4e4247f | def finite_diff(expression, variable, increment=1):
"""
Takes as input a polynomial expression and the variable used to construct
it and returns the difference between function's value when the input is
incremented to 1 and the original function value. If you want an increment
other than one supply it as a third argument.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.series.kauers import finite_diff
>>> finite_diff(x**2, x)
2*x + 1
>>> finite_diff(y**3 + 2*y**2 + 3*y + 4, y)
3*y**2 + 7*y + 6
>>> finite_diff(x**2 + 3*x + 8, x, 2)
4*x + 10
>>> finite_diff(z**3 + 8*z, z, 3)
9*z**2 + 27*z + 51
"""
expression = expression.expand()
expression2 = expression.subs(variable, variable + increment)
expression2 = expression2.expand()
return expression2 - expression
def finite_diff_kauers(sum):
"""
Takes as input a Sum instance and returns the difference between the sum
with the upper index incremented by 1 and the original sum. For example,
if S(n) is a sum, then finite_diff_kauers will return S(n + 1) - S(n).
Examples
========
>>> from sympy.series.kauers import finite_diff_kauers
>>> from sympy import Sum
>>> from sympy.abc import x, y, m, n, k
>>> finite_diff_kauers(Sum(k, (k, 1, n)))
n + 1
>>> finite_diff_kauers(Sum(1/k, (k, 1, n)))
1/(n + 1)
>>> finite_diff_kauers(Sum((x*y**2), (x, 1, n), (y, 1, m)))
(m + 1)**2*(n + 1)
>>> finite_diff_kauers(Sum((x*y), (x, 1, m), (y, 1, n)))
(m + 1)*(n + 1)
"""
function = sum.function
for l in sum.limits:
function = function.subs(l[0], l[- 1] + 1)
return function
|
0607859c45509e32175013678990c98684543b1f679756914d2de9fad97e066a | """
Expand Hypergeometric (and Meijer G) functions into named
special functions.
The algorithm for doing this uses a collection of lookup tables of
hypergeometric functions, and various of their properties, to expand
many hypergeometric functions in terms of special functions.
It is based on the following paper:
Kelly B. Roach. Meijer G Function Representations.
In: Proceedings of the 1997 International Symposium on Symbolic and
Algebraic Computation, pages 205-211, New York, 1997. ACM.
It is described in great(er) detail in the Sphinx documentation.
"""
# SUMMARY OF EXTENSIONS FOR MEIJER G FUNCTIONS
#
# o z**rho G(ap, bq; z) = G(ap + rho, bq + rho; z)
#
# o denote z*d/dz by D
#
# o It is helpful to keep in mind that ap and bq play essentially symmetric
# roles: G(1/z) has slightly altered parameters, with ap and bq interchanged.
#
# o There are four shift operators:
# A_J = b_J - D, J = 1, ..., n
# B_J = 1 - a_j + D, J = 1, ..., m
# C_J = -b_J + D, J = m+1, ..., q
# D_J = a_J - 1 - D, J = n+1, ..., p
#
# A_J, C_J increment b_J
# B_J, D_J decrement a_J
#
# o The corresponding four inverse-shift operators are defined if there
# is no cancellation. Thus e.g. an index a_J (upper or lower) can be
# incremented if a_J != b_i for i = 1, ..., q.
#
# o Order reduction: if b_j - a_i is a non-negative integer, where
# j <= m and i > n, the corresponding quotient of gamma functions reduces
# to a polynomial. Hence the G function can be expressed using a G-function
# of lower order.
# Similarly if j > m and i <= n.
#
# Secondly, there are paired index theorems [Adamchik, The evaluation of
# integrals of Bessel functions via G-function identities]. Suppose there
# are three parameters a, b, c, where a is an a_i, i <= n, b is a b_j,
# j <= m and c is a denominator parameter (i.e. a_i, i > n or b_j, j > m).
# Suppose further all three differ by integers.
# Then the order can be reduced.
# TODO work this out in detail.
#
# o An index quadruple is called suitable if its order cannot be reduced.
# If there exists a sequence of shift operators transforming one index
# quadruple into another, we say one is reachable from the other.
#
# o Deciding if one index quadruple is reachable from another is tricky. For
# this reason, we use hand-built routines to match and instantiate formulas.
#
from collections import defaultdict
from itertools import product
from sympy import SYMPY_DEBUG
from sympy.core import (S, Dummy, symbols, sympify, Tuple, expand, I, pi, Mul,
EulerGamma, oo, zoo, expand_func, Add, nan, Expr, Rational)
from sympy.core.compatibility import default_sort_key, reduce
from sympy.core.mod import Mod
from sympy.functions import (exp, sqrt, root, log, lowergamma, cos,
besseli, gamma, uppergamma, expint, erf, sin, besselj, Ei, Ci, Si, Shi,
sinh, cosh, Chi, fresnels, fresnelc, polar_lift, exp_polar, floor, ceiling,
rf, factorial, lerchphi, Piecewise, re, elliptic_k, elliptic_e)
from sympy.functions.elementary.complexes import polarify, unpolarify
from sympy.functions.special.hyper import (hyper, HyperRep_atanh,
HyperRep_power1, HyperRep_power2, HyperRep_log1, HyperRep_asin1,
HyperRep_asin2, HyperRep_sqrts1, HyperRep_sqrts2, HyperRep_log2,
HyperRep_cosasin, HyperRep_sinasin, meijerg)
from sympy.polys import poly, Poly
from sympy.series import residue
from sympy.simplify import simplify # type: ignore
from sympy.simplify.powsimp import powdenest
from sympy.utilities.iterables import sift
# function to define "buckets"
def _mod1(x):
# TODO see if this can work as Mod(x, 1); this will require
# different handling of the "buckets" since these need to
# be sorted and that fails when there is a mixture of
# integers and expressions with parameters. With the current
# Mod behavior, Mod(k, 1) == Mod(1, 1) == 0 if k is an integer.
# Although the sorting can be done with Basic.compare, this may
# still require different handling of the sorted buckets.
if x.is_Number:
return Mod(x, 1)
c, x = x.as_coeff_Add()
return Mod(c, 1) + x
# leave add formulae at the top for easy reference
def add_formulae(formulae):
""" Create our knowledge base. """
from sympy.matrices import Matrix
a, b, c, z = symbols('a b c, z', cls=Dummy)
def add(ap, bq, res):
func = Hyper_Function(ap, bq)
formulae.append(Formula(func, z, res, (a, b, c)))
def addb(ap, bq, B, C, M):
func = Hyper_Function(ap, bq)
formulae.append(Formula(func, z, None, (a, b, c), B, C, M))
# Luke, Y. L. (1969), The Special Functions and Their Approximations,
# Volume 1, section 6.2
# 0F0
add((), (), exp(z))
# 1F0
add((a, ), (), HyperRep_power1(-a, z))
# 2F1
addb((a, a - S.Half), (2*a, ),
Matrix([HyperRep_power2(a, z),
HyperRep_power2(a + S.Half, z)/2]),
Matrix([[1, 0]]),
Matrix([[(a - S.Half)*z/(1 - z), (S.Half - a)*z/(1 - z)],
[a/(1 - z), a*(z - 2)/(1 - z)]]))
addb((1, 1), (2, ),
Matrix([HyperRep_log1(z), 1]), Matrix([[-1/z, 0]]),
Matrix([[0, z/(z - 1)], [0, 0]]))
addb((S.Half, 1), (S('3/2'), ),
Matrix([HyperRep_atanh(z), 1]),
Matrix([[1, 0]]),
Matrix([[Rational(-1, 2), 1/(1 - z)/2], [0, 0]]))
addb((S.Half, S.Half), (S('3/2'), ),
Matrix([HyperRep_asin1(z), HyperRep_power1(Rational(-1, 2), z)]),
Matrix([[1, 0]]),
Matrix([[Rational(-1, 2), S.Half], [0, z/(1 - z)/2]]))
addb((a, S.Half + a), (S.Half, ),
Matrix([HyperRep_sqrts1(-a, z), -HyperRep_sqrts2(-a - S.Half, z)]),
Matrix([[1, 0]]),
Matrix([[0, -a],
[z*(-2*a - 1)/2/(1 - z), S.Half - z*(-2*a - 1)/(1 - z)]]))
# A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
# Integrals and Series: More Special Functions, Vol. 3,.
# Gordon and Breach Science Publisher
addb([a, -a], [S.Half],
Matrix([HyperRep_cosasin(a, z), HyperRep_sinasin(a, z)]),
Matrix([[1, 0]]),
Matrix([[0, -a], [a*z/(1 - z), 1/(1 - z)/2]]))
addb([1, 1], [3*S.Half],
Matrix([HyperRep_asin2(z), 1]), Matrix([[1, 0]]),
Matrix([[(z - S.Half)/(1 - z), 1/(1 - z)/2], [0, 0]]))
# Complete elliptic integrals K(z) and E(z), both a 2F1 function
addb([S.Half, S.Half], [S.One],
Matrix([elliptic_k(z), elliptic_e(z)]),
Matrix([[2/pi, 0]]),
Matrix([[Rational(-1, 2), -1/(2*z-2)],
[Rational(-1, 2), S.Half]]))
addb([Rational(-1, 2), S.Half], [S.One],
Matrix([elliptic_k(z), elliptic_e(z)]),
Matrix([[0, 2/pi]]),
Matrix([[Rational(-1, 2), -1/(2*z-2)],
[Rational(-1, 2), S.Half]]))
# 3F2
addb([Rational(-1, 2), 1, 1], [S.Half, 2],
Matrix([z*HyperRep_atanh(z), HyperRep_log1(z), 1]),
Matrix([[Rational(-2, 3), -S.One/(3*z), Rational(2, 3)]]),
Matrix([[S.Half, 0, z/(1 - z)/2],
[0, 0, z/(z - 1)],
[0, 0, 0]]))
# actually the formula for 3/2 is much nicer ...
addb([Rational(-1, 2), 1, 1], [2, 2],
Matrix([HyperRep_power1(S.Half, z), HyperRep_log2(z), 1]),
Matrix([[Rational(4, 9) - 16/(9*z), 4/(3*z), 16/(9*z)]]),
Matrix([[z/2/(z - 1), 0, 0], [1/(2*(z - 1)), 0, S.Half], [0, 0, 0]]))
# 1F1
addb([1], [b], Matrix([z**(1 - b) * exp(z) * lowergamma(b - 1, z), 1]),
Matrix([[b - 1, 0]]), Matrix([[1 - b + z, 1], [0, 0]]))
addb([a], [2*a],
Matrix([z**(S.Half - a)*exp(z/2)*besseli(a - S.Half, z/2)
* gamma(a + S.Half)/4**(S.Half - a),
z**(S.Half - a)*exp(z/2)*besseli(a + S.Half, z/2)
* gamma(a + S.Half)/4**(S.Half - a)]),
Matrix([[1, 0]]),
Matrix([[z/2, z/2], [z/2, (z/2 - 2*a)]]))
mz = polar_lift(-1)*z
addb([a], [a + 1],
Matrix([mz**(-a)*a*lowergamma(a, mz), a*exp(z)]),
Matrix([[1, 0]]),
Matrix([[-a, 1], [0, z]]))
# This one is redundant.
add([Rational(-1, 2)], [S.Half], exp(z) - sqrt(pi*z)*(-I)*erf(I*sqrt(z)))
# Added to get nice results for Laplace transform of Fresnel functions
# http://functions.wolfram.com/07.22.03.6437.01
# Basic rule
#add([1], [Rational(3, 4), Rational(5, 4)],
# sqrt(pi) * (cos(2*sqrt(polar_lift(-1)*z))*fresnelc(2*root(polar_lift(-1)*z,4)/sqrt(pi)) +
# sin(2*sqrt(polar_lift(-1)*z))*fresnels(2*root(polar_lift(-1)*z,4)/sqrt(pi)))
# / (2*root(polar_lift(-1)*z,4)))
# Manually tuned rule
addb([1], [Rational(3, 4), Rational(5, 4)],
Matrix([ sqrt(pi)*(I*sinh(2*sqrt(z))*fresnels(2*root(z, 4)*exp(I*pi/4)/sqrt(pi))
+ cosh(2*sqrt(z))*fresnelc(2*root(z, 4)*exp(I*pi/4)/sqrt(pi)))
* exp(-I*pi/4)/(2*root(z, 4)),
sqrt(pi)*root(z, 4)*(sinh(2*sqrt(z))*fresnelc(2*root(z, 4)*exp(I*pi/4)/sqrt(pi))
+ I*cosh(2*sqrt(z))*fresnels(2*root(z, 4)*exp(I*pi/4)/sqrt(pi)))
*exp(-I*pi/4)/2,
1 ]),
Matrix([[1, 0, 0]]),
Matrix([[Rational(-1, 4), 1, Rational(1, 4)],
[ z, Rational(1, 4), 0],
[ 0, 0, 0]]))
# 2F2
addb([S.Half, a], [Rational(3, 2), a + 1],
Matrix([a/(2*a - 1)*(-I)*sqrt(pi/z)*erf(I*sqrt(z)),
a/(2*a - 1)*(polar_lift(-1)*z)**(-a)*
lowergamma(a, polar_lift(-1)*z),
a/(2*a - 1)*exp(z)]),
Matrix([[1, -1, 0]]),
Matrix([[Rational(-1, 2), 0, 1], [0, -a, 1], [0, 0, z]]))
# We make a "basis" of four functions instead of three, and give EulerGamma
# an extra slot (it could just be a coefficient to 1). The advantage is
# that this way Polys will not see multivariate polynomials (it treats
# EulerGamma as an indeterminate), which is *way* faster.
addb([1, 1], [2, 2],
Matrix([Ei(z) - log(z), exp(z), 1, EulerGamma]),
Matrix([[1/z, 0, 0, -1/z]]),
Matrix([[0, 1, -1, 0], [0, z, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]))
# 0F1
add((), (S.Half, ), cosh(2*sqrt(z)))
addb([], [b],
Matrix([gamma(b)*z**((1 - b)/2)*besseli(b - 1, 2*sqrt(z)),
gamma(b)*z**(1 - b/2)*besseli(b, 2*sqrt(z))]),
Matrix([[1, 0]]), Matrix([[0, 1], [z, (1 - b)]]))
# 0F3
x = 4*z**Rational(1, 4)
def fp(a, z):
return besseli(a, x) + besselj(a, x)
def fm(a, z):
return besseli(a, x) - besselj(a, x)
# TODO branching
addb([], [S.Half, a, a + S.Half],
Matrix([fp(2*a - 1, z), fm(2*a, z)*z**Rational(1, 4),
fm(2*a - 1, z)*sqrt(z), fp(2*a, z)*z**Rational(3, 4)])
* 2**(-2*a)*gamma(2*a)*z**((1 - 2*a)/4),
Matrix([[1, 0, 0, 0]]),
Matrix([[0, 1, 0, 0],
[0, S.Half - a, 1, 0],
[0, 0, S.Half, 1],
[z, 0, 0, 1 - a]]))
x = 2*(4*z)**Rational(1, 4)*exp_polar(I*pi/4)
addb([], [a, a + S.Half, 2*a],
(2*sqrt(polar_lift(-1)*z))**(1 - 2*a)*gamma(2*a)**2 *
Matrix([besselj(2*a - 1, x)*besseli(2*a - 1, x),
x*(besseli(2*a, x)*besselj(2*a - 1, x)
- besseli(2*a - 1, x)*besselj(2*a, x)),
x**2*besseli(2*a, x)*besselj(2*a, x),
x**3*(besseli(2*a, x)*besselj(2*a - 1, x)
+ besseli(2*a - 1, x)*besselj(2*a, x))]),
Matrix([[1, 0, 0, 0]]),
Matrix([[0, Rational(1, 4), 0, 0],
[0, (1 - 2*a)/2, Rational(-1, 2), 0],
[0, 0, 1 - 2*a, Rational(1, 4)],
[-32*z, 0, 0, 1 - a]]))
# 1F2
addb([a], [a - S.Half, 2*a],
Matrix([z**(S.Half - a)*besseli(a - S.Half, sqrt(z))**2,
z**(1 - a)*besseli(a - S.Half, sqrt(z))
*besseli(a - Rational(3, 2), sqrt(z)),
z**(Rational(3, 2) - a)*besseli(a - Rational(3, 2), sqrt(z))**2]),
Matrix([[-gamma(a + S.Half)**2/4**(S.Half - a),
2*gamma(a - S.Half)*gamma(a + S.Half)/4**(1 - a),
0]]),
Matrix([[1 - 2*a, 1, 0], [z/2, S.Half - a, S.Half], [0, z, 0]]))
addb([S.Half], [b, 2 - b],
pi*(1 - b)/sin(pi*b)*
Matrix([besseli(1 - b, sqrt(z))*besseli(b - 1, sqrt(z)),
sqrt(z)*(besseli(-b, sqrt(z))*besseli(b - 1, sqrt(z))
+ besseli(1 - b, sqrt(z))*besseli(b, sqrt(z))),
besseli(-b, sqrt(z))*besseli(b, sqrt(z))]),
Matrix([[1, 0, 0]]),
Matrix([[b - 1, S.Half, 0],
[z, 0, z],
[0, S.Half, -b]]))
addb([S.Half], [Rational(3, 2), Rational(3, 2)],
Matrix([Shi(2*sqrt(z))/2/sqrt(z), sinh(2*sqrt(z))/2/sqrt(z),
cosh(2*sqrt(z))]),
Matrix([[1, 0, 0]]),
Matrix([[Rational(-1, 2), S.Half, 0], [0, Rational(-1, 2), S.Half], [0, 2*z, 0]]))
# FresnelS
# Basic rule
#add([Rational(3, 4)], [Rational(3, 2),Rational(7, 4)], 6*fresnels( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) / ( pi * (exp(pi*I/4)*root(z,4)*2/sqrt(pi))**3 ) )
# Manually tuned rule
addb([Rational(3, 4)], [Rational(3, 2), Rational(7, 4)],
Matrix(
[ fresnels(
exp(
pi*I/4)*root(
z, 4)*2/sqrt(
pi) ) / (
pi * (exp(pi*I/4)*root(z, 4)*2/sqrt(pi))**3 ),
sinh(2*sqrt(z))/sqrt(z),
cosh(2*sqrt(z)) ]),
Matrix([[6, 0, 0]]),
Matrix([[Rational(-3, 4), Rational(1, 16), 0],
[ 0, Rational(-1, 2), 1],
[ 0, z, 0]]))
# FresnelC
# Basic rule
#add([Rational(1, 4)], [S.Half,Rational(5, 4)], fresnelc( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) / ( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) )
# Manually tuned rule
addb([Rational(1, 4)], [S.Half, Rational(5, 4)],
Matrix(
[ sqrt(
pi)*exp(
-I*pi/4)*fresnelc(
2*root(z, 4)*exp(I*pi/4)/sqrt(pi))/(2*root(z, 4)),
cosh(2*sqrt(z)),
sinh(2*sqrt(z))*sqrt(z) ]),
Matrix([[1, 0, 0]]),
Matrix([[Rational(-1, 4), Rational(1, 4), 0 ],
[ 0, 0, 1 ],
[ 0, z, S.Half]]))
# 2F3
# XXX with this five-parameter formula is pretty slow with the current
# Formula.find_instantiations (creates 2!*3!*3**(2+3) ~ 3000
# instantiations ... But it's not too bad.
addb([a, a + S.Half], [2*a, b, 2*a - b + 1],
gamma(b)*gamma(2*a - b + 1) * (sqrt(z)/2)**(1 - 2*a) *
Matrix([besseli(b - 1, sqrt(z))*besseli(2*a - b, sqrt(z)),
sqrt(z)*besseli(b, sqrt(z))*besseli(2*a - b, sqrt(z)),
sqrt(z)*besseli(b - 1, sqrt(z))*besseli(2*a - b + 1, sqrt(z)),
besseli(b, sqrt(z))*besseli(2*a - b + 1, sqrt(z))]),
Matrix([[1, 0, 0, 0]]),
Matrix([[0, S.Half, S.Half, 0],
[z/2, 1 - b, 0, z/2],
[z/2, 0, b - 2*a, z/2],
[0, S.Half, S.Half, -2*a]]))
# (C/f above comment about eulergamma in the basis).
addb([1, 1], [2, 2, Rational(3, 2)],
Matrix([Chi(2*sqrt(z)) - log(2*sqrt(z)),
cosh(2*sqrt(z)), sqrt(z)*sinh(2*sqrt(z)), 1, EulerGamma]),
Matrix([[1/z, 0, 0, 0, -1/z]]),
Matrix([[0, S.Half, 0, Rational(-1, 2), 0],
[0, 0, 1, 0, 0],
[0, z, S.Half, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))
# 3F3
# This is rule: http://functions.wolfram.com/07.31.03.0134.01
# Initial reason to add it was a nice solution for
# integrate(erf(a*z)/z**2, z) and same for erfc and erfi.
# Basic rule
# add([1, 1, a], [2, 2, a+1], (a/(z*(a-1)**2)) *
# (1 - (-z)**(1-a) * (gamma(a) - uppergamma(a,-z))
# - (a-1) * (EulerGamma + uppergamma(0,-z) + log(-z))
# - exp(z)))
# Manually tuned rule
addb([1, 1, a], [2, 2, a+1],
Matrix([a*(log(-z) + expint(1, -z) + EulerGamma)/(z*(a**2 - 2*a + 1)),
a*(-z)**(-a)*(gamma(a) - uppergamma(a, -z))/(a - 1)**2,
a*exp(z)/(a**2 - 2*a + 1),
a/(z*(a**2 - 2*a + 1))]),
Matrix([[1-a, 1, -1/z, 1]]),
Matrix([[-1,0,-1/z,1],
[0,-a,1,0],
[0,0,z,0],
[0,0,0,-1]]))
def add_meijerg_formulae(formulae):
from sympy.matrices import Matrix
a, b, c, z = list(map(Dummy, 'abcz'))
rho = Dummy('rho')
def add(an, ap, bm, bq, B, C, M, matcher):
formulae.append(MeijerFormula(an, ap, bm, bq, z, [a, b, c, rho],
B, C, M, matcher))
def detect_uppergamma(func):
x = func.an[0]
y, z = func.bm
swapped = False
if not _mod1((x - y).simplify()):
swapped = True
(y, z) = (z, y)
if _mod1((x - z).simplify()) or x - z > 0:
return None
l = [y, x]
if swapped:
l = [x, y]
return {rho: y, a: x - y}, G_Function([x], [], l, [])
add([a + rho], [], [rho, a + rho], [],
Matrix([gamma(1 - a)*z**rho*exp(z)*uppergamma(a, z),
gamma(1 - a)*z**(a + rho)]),
Matrix([[1, 0]]),
Matrix([[rho + z, -1], [0, a + rho]]),
detect_uppergamma)
def detect_3113(func):
"""http://functions.wolfram.com/07.34.03.0984.01"""
x = func.an[0]
u, v, w = func.bm
if _mod1((u - v).simplify()) == 0:
if _mod1((v - w).simplify()) == 0:
return
sig = (S.Half, S.Half, S.Zero)
x1, x2, y = u, v, w
else:
if _mod1((x - u).simplify()) == 0:
sig = (S.Half, S.Zero, S.Half)
x1, y, x2 = u, v, w
else:
sig = (S.Zero, S.Half, S.Half)
y, x1, x2 = u, v, w
if (_mod1((x - x1).simplify()) != 0 or
_mod1((x - x2).simplify()) != 0 or
_mod1((x - y).simplify()) != S.Half or
x - x1 > 0 or x - x2 > 0):
return
return {a: x}, G_Function([x], [], [x - S.Half + t for t in sig], [])
s = sin(2*sqrt(z))
c_ = cos(2*sqrt(z))
S_ = Si(2*sqrt(z)) - pi/2
C = Ci(2*sqrt(z))
add([a], [], [a, a, a - S.Half], [],
Matrix([sqrt(pi)*z**(a - S.Half)*(c_*S_ - s*C),
sqrt(pi)*z**a*(s*S_ + c_*C),
sqrt(pi)*z**a]),
Matrix([[-2, 0, 0]]),
Matrix([[a - S.Half, -1, 0], [z, a, S.Half], [0, 0, a]]),
detect_3113)
def make_simp(z):
""" Create a function that simplifies rational functions in ``z``. """
def simp(expr):
""" Efficiently simplify the rational function ``expr``. """
numer, denom = expr.as_numer_denom()
numer = numer.expand()
# denom = denom.expand() # is this needed?
c, numer, denom = poly(numer, z).cancel(poly(denom, z))
return c * numer.as_expr() / denom.as_expr()
return simp
def debug(*args):
if SYMPY_DEBUG:
for a in args:
print(a, end="")
print()
class Hyper_Function(Expr):
""" A generalized hypergeometric function. """
def __new__(cls, ap, bq):
obj = super().__new__(cls)
obj.ap = Tuple(*list(map(expand, ap)))
obj.bq = Tuple(*list(map(expand, bq)))
return obj
@property
def args(self):
return (self.ap, self.bq)
@property
def sizes(self):
return (len(self.ap), len(self.bq))
@property
def gamma(self):
"""
Number of upper parameters that are negative integers
This is a transformation invariant.
"""
return sum(bool(x.is_integer and x.is_negative) for x in self.ap)
def _hashable_content(self):
return super()._hashable_content() + (self.ap,
self.bq)
def __call__(self, arg):
return hyper(self.ap, self.bq, arg)
def build_invariants(self):
"""
Compute the invariant vector.
Explanation
===========
The invariant vector is:
(gamma, ((s1, n1), ..., (sk, nk)), ((t1, m1), ..., (tr, mr)))
where gamma is the number of integer a < 0,
s1 < ... < sk
nl is the number of parameters a_i congruent to sl mod 1
t1 < ... < tr
ml is the number of parameters b_i congruent to tl mod 1
If the index pair contains parameters, then this is not truly an
invariant, since the parameters cannot be sorted uniquely mod1.
Examples
========
>>> from sympy.simplify.hyperexpand import Hyper_Function
>>> from sympy import S
>>> ap = (S.Half, S.One/3, S(-1)/2, -2)
>>> bq = (1, 2)
Here gamma = 1,
k = 3, s1 = 0, s2 = 1/3, s3 = 1/2
n1 = 1, n2 = 1, n2 = 2
r = 1, t1 = 0
m1 = 2:
>>> Hyper_Function(ap, bq).build_invariants()
(1, ((0, 1), (1/3, 1), (1/2, 2)), ((0, 2),))
"""
abuckets, bbuckets = sift(self.ap, _mod1), sift(self.bq, _mod1)
def tr(bucket):
bucket = list(bucket.items())
if not any(isinstance(x[0], Mod) for x in bucket):
bucket.sort(key=lambda x: default_sort_key(x[0]))
bucket = tuple([(mod, len(values)) for mod, values in bucket if
values])
return bucket
return (self.gamma, tr(abuckets), tr(bbuckets))
def difficulty(self, func):
""" Estimate how many steps it takes to reach ``func`` from self.
Return -1 if impossible. """
if self.gamma != func.gamma:
return -1
oabuckets, obbuckets, abuckets, bbuckets = [sift(params, _mod1) for
params in (self.ap, self.bq, func.ap, func.bq)]
diff = 0
for bucket, obucket in [(abuckets, oabuckets), (bbuckets, obbuckets)]:
for mod in set(list(bucket.keys()) + list(obucket.keys())):
if (not mod in bucket) or (not mod in obucket) \
or len(bucket[mod]) != len(obucket[mod]):
return -1
l1 = list(bucket[mod])
l2 = list(obucket[mod])
l1.sort()
l2.sort()
for i, j in zip(l1, l2):
diff += abs(i - j)
return diff
def _is_suitable_origin(self):
"""
Decide if ``self`` is a suitable origin.
Explanation
===========
A function is a suitable origin iff:
* none of the ai equals bj + n, with n a non-negative integer
* none of the ai is zero
* none of the bj is a non-positive integer
Note that this gives meaningful results only when none of the indices
are symbolic.
"""
for a in self.ap:
for b in self.bq:
if (a - b).is_integer and (a - b).is_negative is False:
return False
for a in self.ap:
if a == 0:
return False
for b in self.bq:
if b.is_integer and b.is_nonpositive:
return False
return True
class G_Function(Expr):
""" A Meijer G-function. """
def __new__(cls, an, ap, bm, bq):
obj = super().__new__(cls)
obj.an = Tuple(*list(map(expand, an)))
obj.ap = Tuple(*list(map(expand, ap)))
obj.bm = Tuple(*list(map(expand, bm)))
obj.bq = Tuple(*list(map(expand, bq)))
return obj
@property
def args(self):
return (self.an, self.ap, self.bm, self.bq)
def _hashable_content(self):
return super()._hashable_content() + self.args
def __call__(self, z):
return meijerg(self.an, self.ap, self.bm, self.bq, z)
def compute_buckets(self):
"""
Compute buckets for the fours sets of parameters.
Explanation
===========
We guarantee that any two equal Mod objects returned are actually the
same, and that the buckets are sorted by real part (an and bq
descendending, bm and ap ascending).
Examples
========
>>> from sympy.simplify.hyperexpand import G_Function
>>> from sympy.abc import y
>>> from sympy import S
>>> a, b = [1, 3, 2, S(3)/2], [1 + y, y, 2, y + 3]
>>> G_Function(a, b, [2], [y]).compute_buckets()
({0: [3, 2, 1], 1/2: [3/2]},
{0: [2], y: [y, y + 1, y + 3]}, {0: [2]}, {y: [y]})
"""
dicts = pan, pap, pbm, pbq = [defaultdict(list) for i in range(4)]
for dic, lis in zip(dicts, (self.an, self.ap, self.bm, self.bq)):
for x in lis:
dic[_mod1(x)].append(x)
for dic, flip in zip(dicts, (True, False, False, True)):
for m, items in dic.items():
x0 = items[0]
items.sort(key=lambda x: x - x0, reverse=flip)
dic[m] = items
return tuple([dict(w) for w in dicts])
@property
def signature(self):
return (len(self.an), len(self.ap), len(self.bm), len(self.bq))
# Dummy variable.
_x = Dummy('x')
class Formula:
"""
This class represents hypergeometric formulae.
Explanation
===========
Its data members are:
- z, the argument
- closed_form, the closed form expression
- symbols, the free symbols (parameters) in the formula
- func, the function
- B, C, M (see _compute_basis)
Examples
========
>>> from sympy.abc import a, b, z
>>> from sympy.simplify.hyperexpand import Formula, Hyper_Function
>>> func = Hyper_Function((a/2, a/3 + b, (1+a)/2), (a, b, (a+b)/7))
>>> f = Formula(func, z, None, [a, b])
"""
def _compute_basis(self, closed_form):
"""
Compute a set of functions B=(f1, ..., fn), a nxn matrix M
and a 1xn matrix C such that:
closed_form = C B
z d/dz B = M B.
"""
from sympy.matrices import Matrix, eye, zeros
afactors = [_x + a for a in self.func.ap]
bfactors = [_x + b - 1 for b in self.func.bq]
expr = _x*Mul(*bfactors) - self.z*Mul(*afactors)
poly = Poly(expr, _x)
n = poly.degree() - 1
b = [closed_form]
for _ in range(n):
b.append(self.z*b[-1].diff(self.z))
self.B = Matrix(b)
self.C = Matrix([[1] + [0]*n])
m = eye(n)
m = m.col_insert(0, zeros(n, 1))
l = poly.all_coeffs()[1:]
l.reverse()
self.M = m.row_insert(n, -Matrix([l])/poly.all_coeffs()[0])
def __init__(self, func, z, res, symbols, B=None, C=None, M=None):
z = sympify(z)
res = sympify(res)
symbols = [x for x in sympify(symbols) if func.has(x)]
self.z = z
self.symbols = symbols
self.B = B
self.C = C
self.M = M
self.func = func
# TODO with symbolic parameters, it could be advantageous
# (for prettier answers) to compute a basis only *after*
# instantiation
if res is not None:
self._compute_basis(res)
@property
def closed_form(self):
return reduce(lambda s,m: s+m[0]*m[1], zip(self.C, self.B), S.Zero)
def find_instantiations(self, func):
"""
Find substitutions of the free symbols that match ``func``.
Return the substitution dictionaries as a list. Note that the returned
instantiations need not actually match, or be valid!
"""
from sympy.solvers import solve
ap = func.ap
bq = func.bq
if len(ap) != len(self.func.ap) or len(bq) != len(self.func.bq):
raise TypeError('Cannot instantiate other number of parameters')
symbol_values = []
for a in self.symbols:
if a in self.func.ap.args:
symbol_values.append(ap)
elif a in self.func.bq.args:
symbol_values.append(bq)
else:
raise ValueError("At least one of the parameters of the "
"formula must be equal to %s" % (a,))
base_repl = [dict(list(zip(self.symbols, values)))
for values in product(*symbol_values)]
abuckets, bbuckets = [sift(params, _mod1) for params in [ap, bq]]
a_inv, b_inv = [{a: len(vals) for a, vals in bucket.items()}
for bucket in [abuckets, bbuckets]]
critical_values = [[0] for _ in self.symbols]
result = []
_n = Dummy()
for repl in base_repl:
symb_a, symb_b = [sift(params, lambda x: _mod1(x.xreplace(repl)))
for params in [self.func.ap, self.func.bq]]
for bucket, obucket in [(abuckets, symb_a), (bbuckets, symb_b)]:
for mod in set(list(bucket.keys()) + list(obucket.keys())):
if (not mod in bucket) or (not mod in obucket) \
or len(bucket[mod]) != len(obucket[mod]):
break
for a, vals in zip(self.symbols, critical_values):
if repl[a].free_symbols:
continue
exprs = [expr for expr in obucket[mod] if expr.has(a)]
repl0 = repl.copy()
repl0[a] += _n
for expr in exprs:
for target in bucket[mod]:
n0, = solve(expr.xreplace(repl0) - target, _n)
if n0.free_symbols:
raise ValueError("Value should not be true")
vals.append(n0)
else:
values = []
for a, vals in zip(self.symbols, critical_values):
a0 = repl[a]
min_ = floor(min(vals))
max_ = ceiling(max(vals))
values.append([a0 + n for n in range(min_, max_ + 1)])
result.extend(dict(list(zip(self.symbols, l))) for l in product(*values))
return result
class FormulaCollection:
""" A collection of formulae to use as origins. """
def __init__(self):
""" Doing this globally at module init time is a pain ... """
self.symbolic_formulae = {}
self.concrete_formulae = {}
self.formulae = []
add_formulae(self.formulae)
# Now process the formulae into a helpful form.
# These dicts are indexed by (p, q).
for f in self.formulae:
sizes = f.func.sizes
if len(f.symbols) > 0:
self.symbolic_formulae.setdefault(sizes, []).append(f)
else:
inv = f.func.build_invariants()
self.concrete_formulae.setdefault(sizes, {})[inv] = f
def lookup_origin(self, func):
"""
Given the suitable target ``func``, try to find an origin in our
knowledge base.
Examples
========
>>> from sympy.simplify.hyperexpand import (FormulaCollection,
... Hyper_Function)
>>> f = FormulaCollection()
>>> f.lookup_origin(Hyper_Function((), ())).closed_form
exp(_z)
>>> f.lookup_origin(Hyper_Function([1], ())).closed_form
HyperRep_power1(-1, _z)
>>> from sympy import S
>>> i = Hyper_Function([S('1/4'), S('3/4 + 4')], [S.Half])
>>> f.lookup_origin(i).closed_form
HyperRep_sqrts1(-1/4, _z)
"""
inv = func.build_invariants()
sizes = func.sizes
if sizes in self.concrete_formulae and \
inv in self.concrete_formulae[sizes]:
return self.concrete_formulae[sizes][inv]
# We don't have a concrete formula. Try to instantiate.
if not sizes in self.symbolic_formulae:
return None # Too bad...
possible = []
for f in self.symbolic_formulae[sizes]:
repls = f.find_instantiations(func)
for repl in repls:
func2 = f.func.xreplace(repl)
if not func2._is_suitable_origin():
continue
diff = func2.difficulty(func)
if diff == -1:
continue
possible.append((diff, repl, f, func2))
# find the nearest origin
possible.sort(key=lambda x: x[0])
for _, repl, f, func2 in possible:
f2 = Formula(func2, f.z, None, [], f.B.subs(repl),
f.C.subs(repl), f.M.subs(repl))
if not any(e.has(S.NaN, oo, -oo, zoo) for e in [f2.B, f2.M, f2.C]):
return f2
return None
class MeijerFormula:
"""
This class represents a Meijer G-function formula.
Its data members are:
- z, the argument
- symbols, the free symbols (parameters) in the formula
- func, the function
- B, C, M (c/f ordinary Formula)
"""
def __init__(self, an, ap, bm, bq, z, symbols, B, C, M, matcher):
an, ap, bm, bq = [Tuple(*list(map(expand, w))) for w in [an, ap, bm, bq]]
self.func = G_Function(an, ap, bm, bq)
self.z = z
self.symbols = symbols
self._matcher = matcher
self.B = B
self.C = C
self.M = M
@property
def closed_form(self):
return reduce(lambda s,m: s+m[0]*m[1], zip(self.C, self.B), S.Zero)
def try_instantiate(self, func):
"""
Try to instantiate the current formula to (almost) match func.
This uses the _matcher passed on init.
"""
if func.signature != self.func.signature:
return None
res = self._matcher(func)
if res is not None:
subs, newfunc = res
return MeijerFormula(newfunc.an, newfunc.ap, newfunc.bm, newfunc.bq,
self.z, [],
self.B.subs(subs), self.C.subs(subs),
self.M.subs(subs), None)
class MeijerFormulaCollection:
"""
This class holds a collection of meijer g formulae.
"""
def __init__(self):
formulae = []
add_meijerg_formulae(formulae)
self.formulae = defaultdict(list)
for formula in formulae:
self.formulae[formula.func.signature].append(formula)
self.formulae = dict(self.formulae)
def lookup_origin(self, func):
""" Try to find a formula that matches func. """
if not func.signature in self.formulae:
return None
for formula in self.formulae[func.signature]:
res = formula.try_instantiate(func)
if res is not None:
return res
class Operator:
"""
Base class for operators to be applied to our functions.
Explanation
===========
These operators are differential operators. They are by convention
expressed in the variable D = z*d/dz (although this base class does
not actually care).
Note that when the operator is applied to an object, we typically do
*not* blindly differentiate but instead use a different representation
of the z*d/dz operator (see make_derivative_operator).
To subclass from this, define a __init__ method that initializes a
self._poly variable. This variable stores a polynomial. By convention
the generator is z*d/dz, and acts to the right of all coefficients.
Thus this poly
x**2 + 2*z*x + 1
represents the differential operator
(z*d/dz)**2 + 2*z**2*d/dz.
This class is used only in the implementation of the hypergeometric
function expansion algorithm.
"""
def apply(self, obj, op):
"""
Apply ``self`` to the object ``obj``, where the generator is ``op``.
Examples
========
>>> from sympy.simplify.hyperexpand import Operator
>>> from sympy.polys.polytools import Poly
>>> from sympy.abc import x, y, z
>>> op = Operator()
>>> op._poly = Poly(x**2 + z*x + y, x)
>>> op.apply(z**7, lambda f: f.diff(z))
y*z**7 + 7*z**7 + 42*z**5
"""
coeffs = self._poly.all_coeffs()
coeffs.reverse()
diffs = [obj]
for c in coeffs[1:]:
diffs.append(op(diffs[-1]))
r = coeffs[0]*diffs[0]
for c, d in zip(coeffs[1:], diffs[1:]):
r += c*d
return r
class MultOperator(Operator):
""" Simply multiply by a "constant" """
def __init__(self, p):
self._poly = Poly(p, _x)
class ShiftA(Operator):
""" Increment an upper index. """
def __init__(self, ai):
ai = sympify(ai)
if ai == 0:
raise ValueError('Cannot increment zero upper index.')
self._poly = Poly(_x/ai + 1, _x)
def __str__(self):
return '<Increment upper %s.>' % (1/self._poly.all_coeffs()[0])
class ShiftB(Operator):
""" Decrement a lower index. """
def __init__(self, bi):
bi = sympify(bi)
if bi == 1:
raise ValueError('Cannot decrement unit lower index.')
self._poly = Poly(_x/(bi - 1) + 1, _x)
def __str__(self):
return '<Decrement lower %s.>' % (1/self._poly.all_coeffs()[0] + 1)
class UnShiftA(Operator):
""" Decrement an upper index. """
def __init__(self, ap, bq, i, z):
""" Note: i counts from zero! """
ap, bq, i = list(map(sympify, [ap, bq, i]))
self._ap = ap
self._bq = bq
self._i = i
ap = list(ap)
bq = list(bq)
ai = ap.pop(i) - 1
if ai == 0:
raise ValueError('Cannot decrement unit upper index.')
m = Poly(z*ai, _x)
for a in ap:
m *= Poly(_x + a, _x)
A = Dummy('A')
n = D = Poly(ai*A - ai, A)
for b in bq:
n *= D + (b - 1).as_poly(A)
b0 = -n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement upper index: '
'cancels with lower')
n = Poly(Poly(n.all_coeffs()[:-1], A).as_expr().subs(A, _x/ai + 1), _x)
self._poly = Poly((n - m)/b0, _x)
def __str__(self):
return '<Decrement upper index #%s of %s, %s.>' % (self._i,
self._ap, self._bq)
class UnShiftB(Operator):
""" Increment a lower index. """
def __init__(self, ap, bq, i, z):
""" Note: i counts from zero! """
ap, bq, i = list(map(sympify, [ap, bq, i]))
self._ap = ap
self._bq = bq
self._i = i
ap = list(ap)
bq = list(bq)
bi = bq.pop(i) + 1
if bi == 0:
raise ValueError('Cannot increment -1 lower index.')
m = Poly(_x*(bi - 1), _x)
for b in bq:
m *= Poly(_x + b - 1, _x)
B = Dummy('B')
D = Poly((bi - 1)*B - bi + 1, B)
n = Poly(z, B)
for a in ap:
n *= (D + a.as_poly(B))
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment index: cancels with upper')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, _x/(bi - 1) + 1), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment lower index #%s of %s, %s.>' % (self._i,
self._ap, self._bq)
class MeijerShiftA(Operator):
""" Increment an upper b index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(bi - _x, _x)
def __str__(self):
return '<Increment upper b=%s.>' % (self._poly.all_coeffs()[1])
class MeijerShiftB(Operator):
""" Decrement an upper a index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(1 - bi + _x, _x)
def __str__(self):
return '<Decrement upper a=%s.>' % (1 - self._poly.all_coeffs()[1])
class MeijerShiftC(Operator):
""" Increment a lower b index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(-bi + _x, _x)
def __str__(self):
return '<Increment lower b=%s.>' % (-self._poly.all_coeffs()[1])
class MeijerShiftD(Operator):
""" Decrement a lower a index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(bi - 1 - _x, _x)
def __str__(self):
return '<Decrement lower a=%s.>' % (self._poly.all_coeffs()[1] + 1)
class MeijerUnShiftA(Operator):
""" Decrement an upper b index. """
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
bi = bm.pop(i) - 1
m = Poly(1, _x)
for b in bm:
m *= Poly(b - _x, _x)
for b in bq:
m *= Poly(_x - b, _x)
A = Dummy('A')
D = Poly(bi - A, A)
n = Poly(z, A)
for a in an:
n *= (D + 1 - a)
for a in ap:
n *= (-D + a - 1)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement upper b index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], A).as_expr().subs(A, bi - _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Decrement upper b index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class MeijerUnShiftB(Operator):
""" Increment an upper a index. """
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
ai = an.pop(i) + 1
m = Poly(z, _x)
for a in an:
m *= Poly(1 - a + _x, _x)
for a in ap:
m *= Poly(a - 1 - _x, _x)
B = Dummy('B')
D = Poly(B + ai - 1, B)
n = Poly(1, B)
for b in bm:
n *= (-D + b)
for b in bq:
n *= (D - b)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment upper a index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, 1 - ai + _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment upper a index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class MeijerUnShiftC(Operator):
""" Decrement a lower b index. """
# XXX this is "essentially" the same as MeijerUnShiftA. This "essentially"
# can be made rigorous using the functional equation G(1/z) = G'(z),
# where G' denotes a G function of slightly altered parameters.
# However, sorting out the details seems harder than just coding it
# again.
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
bi = bq.pop(i) - 1
m = Poly(1, _x)
for b in bm:
m *= Poly(b - _x, _x)
for b in bq:
m *= Poly(_x - b, _x)
C = Dummy('C')
D = Poly(bi + C, C)
n = Poly(z, C)
for a in an:
n *= (D + 1 - a)
for a in ap:
n *= (-D + a - 1)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement lower b index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], C).as_expr().subs(C, _x - bi), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Decrement lower b index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class MeijerUnShiftD(Operator):
""" Increment a lower a index. """
# XXX This is essentially the same as MeijerUnShiftA.
# See comment at MeijerUnShiftC.
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
ai = ap.pop(i) + 1
m = Poly(z, _x)
for a in an:
m *= Poly(1 - a + _x, _x)
for a in ap:
m *= Poly(a - 1 - _x, _x)
B = Dummy('B') # - this is the shift operator `D_I`
D = Poly(ai - 1 - B, B)
n = Poly(1, B)
for b in bm:
n *= (-D + b)
for b in bq:
n *= (D - b)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment lower a index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, ai - 1 - _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment lower a index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class ReduceOrder(Operator):
""" Reduce Order by cancelling an upper and a lower index. """
def __new__(cls, ai, bj):
""" For convenience if reduction is not possible, return None. """
ai = sympify(ai)
bj = sympify(bj)
n = ai - bj
if not n.is_Integer or n < 0:
return None
if bj.is_integer and bj.is_nonpositive:
return None
expr = Operator.__new__(cls)
p = S.One
for k in range(n):
p *= (_x + bj + k)/(bj + k)
expr._poly = Poly(p, _x)
expr._a = ai
expr._b = bj
return expr
@classmethod
def _meijer(cls, b, a, sign):
""" Cancel b + sign*s and a + sign*s
This is for meijer G functions. """
b = sympify(b)
a = sympify(a)
n = b - a
if n.is_negative or not n.is_Integer:
return None
expr = Operator.__new__(cls)
p = S.One
for k in range(n):
p *= (sign*_x + a + k)
expr._poly = Poly(p, _x)
if sign == -1:
expr._a = b
expr._b = a
else:
expr._b = Add(1, a - 1, evaluate=False)
expr._a = Add(1, b - 1, evaluate=False)
return expr
@classmethod
def meijer_minus(cls, b, a):
return cls._meijer(b, a, -1)
@classmethod
def meijer_plus(cls, a, b):
return cls._meijer(1 - a, 1 - b, 1)
def __str__(self):
return '<Reduce order by cancelling upper %s with lower %s.>' % \
(self._a, self._b)
def _reduce_order(ap, bq, gen, key):
""" Order reduction algorithm used in Hypergeometric and Meijer G """
ap = list(ap)
bq = list(bq)
ap.sort(key=key)
bq.sort(key=key)
nap = []
# we will edit bq in place
operators = []
for a in ap:
op = None
for i in range(len(bq)):
op = gen(a, bq[i])
if op is not None:
bq.pop(i)
break
if op is None:
nap.append(a)
else:
operators.append(op)
return nap, bq, operators
def reduce_order(func):
"""
Given the hypergeometric function ``func``, find a sequence of operators to
reduces order as much as possible.
Explanation
===========
Return (newfunc, [operators]), where applying the operators to the
hypergeometric function newfunc yields func.
Examples
========
>>> from sympy.simplify.hyperexpand import reduce_order, Hyper_Function
>>> reduce_order(Hyper_Function((1, 2), (3, 4)))
(Hyper_Function((1, 2), (3, 4)), [])
>>> reduce_order(Hyper_Function((1,), (1,)))
(Hyper_Function((), ()), [<Reduce order by cancelling upper 1 with lower 1.>])
>>> reduce_order(Hyper_Function((2, 4), (3, 3)))
(Hyper_Function((2,), (3,)), [<Reduce order by cancelling
upper 4 with lower 3.>])
"""
nap, nbq, operators = _reduce_order(func.ap, func.bq, ReduceOrder, default_sort_key)
return Hyper_Function(Tuple(*nap), Tuple(*nbq)), operators
def reduce_order_meijer(func):
"""
Given the Meijer G function parameters, ``func``, find a sequence of
operators that reduces order as much as possible.
Return newfunc, [operators].
Examples
========
>>> from sympy.simplify.hyperexpand import (reduce_order_meijer,
... G_Function)
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [3, 4], [1, 2]))[0]
G_Function((4, 3), (5, 6), (3, 4), (2, 1))
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [3, 4], [1, 8]))[0]
G_Function((3,), (5, 6), (3, 4), (1,))
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [7, 5], [1, 5]))[0]
G_Function((3,), (), (), (1,))
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [7, 5], [5, 3]))[0]
G_Function((), (), (), ())
"""
nan, nbq, ops1 = _reduce_order(func.an, func.bq, ReduceOrder.meijer_plus,
lambda x: default_sort_key(-x))
nbm, nap, ops2 = _reduce_order(func.bm, func.ap, ReduceOrder.meijer_minus,
default_sort_key)
return G_Function(nan, nap, nbm, nbq), ops1 + ops2
def make_derivative_operator(M, z):
""" Create a derivative operator, to be passed to Operator.apply. """
def doit(C):
r = z*C.diff(z) + C*M
r = r.applyfunc(make_simp(z))
return r
return doit
def apply_operators(obj, ops, op):
"""
Apply the list of operators ``ops`` to object ``obj``, substituting
``op`` for the generator.
"""
res = obj
for o in reversed(ops):
res = o.apply(res, op)
return res
def devise_plan(target, origin, z):
"""
Devise a plan (consisting of shift and un-shift operators) to be applied
to the hypergeometric function ``target`` to yield ``origin``.
Returns a list of operators.
Examples
========
>>> from sympy.simplify.hyperexpand import devise_plan, Hyper_Function
>>> from sympy.abc import z
Nothing to do:
>>> devise_plan(Hyper_Function((1, 2), ()), Hyper_Function((1, 2), ()), z)
[]
>>> devise_plan(Hyper_Function((), (1, 2)), Hyper_Function((), (1, 2)), z)
[]
Very simple plans:
>>> devise_plan(Hyper_Function((2,), ()), Hyper_Function((1,), ()), z)
[<Increment upper 1.>]
>>> devise_plan(Hyper_Function((), (2,)), Hyper_Function((), (1,)), z)
[<Increment lower index #0 of [], [1].>]
Several buckets:
>>> from sympy import S
>>> devise_plan(Hyper_Function((1, S.Half), ()),
... Hyper_Function((2, S('3/2')), ()), z) #doctest: +NORMALIZE_WHITESPACE
[<Decrement upper index #0 of [3/2, 1], [].>,
<Decrement upper index #0 of [2, 3/2], [].>]
A slightly more complicated plan:
>>> devise_plan(Hyper_Function((1, 3), ()), Hyper_Function((2, 2), ()), z)
[<Increment upper 2.>, <Decrement upper index #0 of [2, 2], [].>]
Another more complicated plan: (note that the ap have to be shifted first!)
>>> devise_plan(Hyper_Function((1, -1), (2,)), Hyper_Function((3, -2), (4,)), z)
[<Decrement lower 3.>, <Decrement lower 4.>,
<Decrement upper index #1 of [-1, 2], [4].>,
<Decrement upper index #1 of [-1, 3], [4].>, <Increment upper -2.>]
"""
abuckets, bbuckets, nabuckets, nbbuckets = [sift(params, _mod1) for
params in (target.ap, target.bq, origin.ap, origin.bq)]
if len(list(abuckets.keys())) != len(list(nabuckets.keys())) or \
len(list(bbuckets.keys())) != len(list(nbbuckets.keys())):
raise ValueError('%s not reachable from %s' % (target, origin))
ops = []
def do_shifts(fro, to, inc, dec):
ops = []
for i in range(len(fro)):
if to[i] - fro[i] > 0:
sh = inc
ch = 1
else:
sh = dec
ch = -1
while to[i] != fro[i]:
ops += [sh(fro, i)]
fro[i] += ch
return ops
def do_shifts_a(nal, nbk, al, aother, bother):
""" Shift us from (nal, nbk) to (al, nbk). """
return do_shifts(nal, al, lambda p, i: ShiftA(p[i]),
lambda p, i: UnShiftA(p + aother, nbk + bother, i, z))
def do_shifts_b(nal, nbk, bk, aother, bother):
""" Shift us from (nal, nbk) to (nal, bk). """
return do_shifts(nbk, bk,
lambda p, i: UnShiftB(nal + aother, p + bother, i, z),
lambda p, i: ShiftB(p[i]))
for r in sorted(list(abuckets.keys()) + list(bbuckets.keys()), key=default_sort_key):
al = ()
nal = ()
bk = ()
nbk = ()
if r in abuckets:
al = abuckets[r]
nal = nabuckets[r]
if r in bbuckets:
bk = bbuckets[r]
nbk = nbbuckets[r]
if len(al) != len(nal) or len(bk) != len(nbk):
raise ValueError('%s not reachable from %s' % (target, origin))
al, nal, bk, nbk = [sorted(list(w), key=default_sort_key)
for w in [al, nal, bk, nbk]]
def others(dic, key):
l = []
for k, value in dic.items():
if k != key:
l += list(dic[k])
return l
aother = others(nabuckets, r)
bother = others(nbbuckets, r)
if len(al) == 0:
# there can be no complications, just shift the bs as we please
ops += do_shifts_b([], nbk, bk, aother, bother)
elif len(bk) == 0:
# there can be no complications, just shift the as as we please
ops += do_shifts_a(nal, [], al, aother, bother)
else:
namax = nal[-1]
amax = al[-1]
if nbk[0] - namax <= 0 or bk[0] - amax <= 0:
raise ValueError('Non-suitable parameters.')
if namax - amax > 0:
# we are going to shift down - first do the as, then the bs
ops += do_shifts_a(nal, nbk, al, aother, bother)
ops += do_shifts_b(al, nbk, bk, aother, bother)
else:
# we are going to shift up - first do the bs, then the as
ops += do_shifts_b(nal, nbk, bk, aother, bother)
ops += do_shifts_a(nal, bk, al, aother, bother)
nabuckets[r] = al
nbbuckets[r] = bk
ops.reverse()
return ops
def try_shifted_sum(func, z):
""" Try to recognise a hypergeometric sum that starts from k > 0. """
abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)
if len(abuckets[S.Zero]) != 1:
return None
r = abuckets[S.Zero][0]
if r <= 0:
return None
if not S.Zero in bbuckets:
return None
l = list(bbuckets[S.Zero])
l.sort()
k = l[0]
if k <= 0:
return None
nap = list(func.ap)
nap.remove(r)
nbq = list(func.bq)
nbq.remove(k)
k -= 1
nap = [x - k for x in nap]
nbq = [x - k for x in nbq]
ops = []
for n in range(r - 1):
ops.append(ShiftA(n + 1))
ops.reverse()
fac = factorial(k)/z**k
for a in nap:
fac /= rf(a, k)
for b in nbq:
fac *= rf(b, k)
ops += [MultOperator(fac)]
p = 0
for n in range(k):
m = z**n/factorial(n)
for a in nap:
m *= rf(a, n)
for b in nbq:
m /= rf(b, n)
p += m
return Hyper_Function(nap, nbq), ops, -p
def try_polynomial(func, z):
""" Recognise polynomial cases. Returns None if not such a case.
Requires order to be fully reduced. """
abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)
a0 = abuckets[S.Zero]
b0 = bbuckets[S.Zero]
a0.sort()
b0.sort()
al0 = [x for x in a0 if x <= 0]
bl0 = [x for x in b0 if x <= 0]
if bl0 and all(a < bl0[-1] for a in al0):
return oo
if not al0:
return None
a = al0[-1]
fac = 1
res = S.One
for n in Tuple(*list(range(-a))):
fac *= z
fac /= n + 1
for a in func.ap:
fac *= a + n
for b in func.bq:
fac /= b + n
res += fac
return res
def try_lerchphi(func):
"""
Try to find an expression for Hyper_Function ``func`` in terms of Lerch
Transcendents.
Return None if no such expression can be found.
"""
# This is actually quite simple, and is described in Roach's paper,
# section 18.
# We don't need to implement the reduction to polylog here, this
# is handled by expand_func.
from sympy.matrices import Matrix, zeros
from sympy.polys import apart
# First we need to figure out if the summation coefficient is a rational
# function of the summation index, and construct that rational function.
abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)
paired = {}
for key, value in abuckets.items():
if key != 0 and not key in bbuckets:
return None
bvalue = bbuckets[key]
paired[key] = (list(value), list(bvalue))
bbuckets.pop(key, None)
if bbuckets != {}:
return None
if not S.Zero in abuckets:
return None
aints, bints = paired[S.Zero]
# Account for the additional n! in denominator
paired[S.Zero] = (aints, bints + [1])
t = Dummy('t')
numer = S.One
denom = S.One
for key, (avalue, bvalue) in paired.items():
if len(avalue) != len(bvalue):
return None
# Note that since order has been reduced fully, all the b are
# bigger than all the a they differ from by an integer. In particular
# if there are any negative b left, this function is not well-defined.
for a, b in zip(avalue, bvalue):
if (a - b).is_positive:
k = a - b
numer *= rf(b + t, k)
denom *= rf(b, k)
else:
k = b - a
numer *= rf(a, k)
denom *= rf(a + t, k)
# Now do a partial fraction decomposition.
# We assemble two structures: a list monomials of pairs (a, b) representing
# a*t**b (b a non-negative integer), and a dict terms, where
# terms[a] = [(b, c)] means that there is a term b/(t-a)**c.
part = apart(numer/denom, t)
args = Add.make_args(part)
monomials = []
terms = {}
for arg in args:
numer, denom = arg.as_numer_denom()
if not denom.has(t):
p = Poly(numer, t)
if not p.is_monomial:
raise TypeError("p should be monomial")
((b, ), a) = p.LT()
monomials += [(a/denom, b)]
continue
if numer.has(t):
raise NotImplementedError('Need partial fraction decomposition'
' with linear denominators')
indep, [dep] = denom.as_coeff_mul(t)
n = 1
if dep.is_Pow:
n = dep.exp
dep = dep.base
if dep == t:
a == 0
elif dep.is_Add:
a, tmp = dep.as_independent(t)
b = 1
if tmp != t:
b, _ = tmp.as_independent(t)
if dep != b*t + a:
raise NotImplementedError('unrecognised form %s' % dep)
a /= b
indep *= b**n
else:
raise NotImplementedError('unrecognised form of partial fraction')
terms.setdefault(a, []).append((numer/indep, n))
# Now that we have this information, assemble our formula. All the
# monomials yield rational functions and go into one basis element.
# The terms[a] are related by differentiation. If the largest exponent is
# n, we need lerchphi(z, k, a) for k = 1, 2, ..., n.
# deriv maps a basis to its derivative, expressed as a C(z)-linear
# combination of other basis elements.
deriv = {}
coeffs = {}
z = Dummy('z')
monomials.sort(key=lambda x: x[1])
mon = {0: 1/(1 - z)}
if monomials:
for k in range(monomials[-1][1]):
mon[k + 1] = z*mon[k].diff(z)
for a, n in monomials:
coeffs.setdefault(S.One, []).append(a*mon[n])
for a, l in terms.items():
for c, k in l:
coeffs.setdefault(lerchphi(z, k, a), []).append(c)
l.sort(key=lambda x: x[1])
for k in range(2, l[-1][1] + 1):
deriv[lerchphi(z, k, a)] = [(-a, lerchphi(z, k, a)),
(1, lerchphi(z, k - 1, a))]
deriv[lerchphi(z, 1, a)] = [(-a, lerchphi(z, 1, a)),
(1/(1 - z), S.One)]
trans = {}
for n, b in enumerate([S.One] + list(deriv.keys())):
trans[b] = n
basis = [expand_func(b) for (b, _) in sorted(list(trans.items()),
key=lambda x:x[1])]
B = Matrix(basis)
C = Matrix([[0]*len(B)])
for b, c in coeffs.items():
C[trans[b]] = Add(*c)
M = zeros(len(B))
for b, l in deriv.items():
for c, b2 in l:
M[trans[b], trans[b2]] = c
return Formula(func, z, None, [], B, C, M)
def build_hypergeometric_formula(func):
"""
Create a formula object representing the hypergeometric function ``func``.
"""
# We know that no `ap` are negative integers, otherwise "detect poly"
# would have kicked in. However, `ap` could be empty. In this case we can
# use a different basis.
# I'm not aware of a basis that works in all cases.
from sympy import zeros, Matrix, eye
z = Dummy('z')
if func.ap:
afactors = [_x + a for a in func.ap]
bfactors = [_x + b - 1 for b in func.bq]
expr = _x*Mul(*bfactors) - z*Mul(*afactors)
poly = Poly(expr, _x)
n = poly.degree()
basis = []
M = zeros(n)
for k in range(n):
a = func.ap[0] + k
basis += [hyper([a] + list(func.ap[1:]), func.bq, z)]
if k < n - 1:
M[k, k] = -a
M[k, k + 1] = a
B = Matrix(basis)
C = Matrix([[1] + [0]*(n - 1)])
derivs = [eye(n)]
for k in range(n):
derivs.append(M*derivs[k])
l = poly.all_coeffs()
l.reverse()
res = [0]*n
for k, c in enumerate(l):
for r, d in enumerate(C*derivs[k]):
res[r] += c*d
for k, c in enumerate(res):
M[n - 1, k] = -c/derivs[n - 1][0, n - 1]/poly.all_coeffs()[0]
return Formula(func, z, None, [], B, C, M)
else:
# Since there are no `ap`, none of the `bq` can be non-positive
# integers.
basis = []
bq = list(func.bq[:])
for i in range(len(bq)):
basis += [hyper([], bq, z)]
bq[i] += 1
basis += [hyper([], bq, z)]
B = Matrix(basis)
n = len(B)
C = Matrix([[1] + [0]*(n - 1)])
M = zeros(n)
M[0, n - 1] = z/Mul(*func.bq)
for k in range(1, n):
M[k, k - 1] = func.bq[k - 1]
M[k, k] = -func.bq[k - 1]
return Formula(func, z, None, [], B, C, M)
def hyperexpand_special(ap, bq, z):
"""
Try to find a closed-form expression for hyper(ap, bq, z), where ``z``
is supposed to be a "special" value, e.g. 1.
This function tries various of the classical summation formulae
(Gauss, Saalschuetz, etc).
"""
# This code is very ad-hoc. There are many clever algorithms
# (notably Zeilberger's) related to this problem.
# For now we just want a few simple cases to work.
p, q = len(ap), len(bq)
z_ = z
z = unpolarify(z)
if z == 0:
return S.One
if p == 2 and q == 1:
# 2F1
a, b, c = ap + bq
if z == 1:
# Gauss
return gamma(c - a - b)*gamma(c)/gamma(c - a)/gamma(c - b)
if z == -1 and simplify(b - a + c) == 1:
b, a = a, b
if z == -1 and simplify(a - b + c) == 1:
# Kummer
if b.is_integer and b.is_negative:
return 2*cos(pi*b/2)*gamma(-b)*gamma(b - a + 1) \
/gamma(-b/2)/gamma(b/2 - a + 1)
else:
return gamma(b/2 + 1)*gamma(b - a + 1) \
/gamma(b + 1)/gamma(b/2 - a + 1)
# TODO tons of more formulae
# investigate what algorithms exist
return hyper(ap, bq, z_)
_collection = None
def _hyperexpand(func, z, ops0=[], z0=Dummy('z0'), premult=1, prem=0,
rewrite='default'):
"""
Try to find an expression for the hypergeometric function ``func``.
Explanation
===========
The result is expressed in terms of a dummy variable ``z0``. Then it
is multiplied by ``premult``. Then ``ops0`` is applied.
``premult`` must be a*z**prem for some a independent of ``z``.
"""
if z.is_zero:
return S.One
z = polarify(z, subs=False)
if rewrite == 'default':
rewrite = 'nonrepsmall'
def carryout_plan(f, ops):
C = apply_operators(f.C.subs(f.z, z0), ops,
make_derivative_operator(f.M.subs(f.z, z0), z0))
from sympy import eye
C = apply_operators(C, ops0,
make_derivative_operator(f.M.subs(f.z, z0)
+ prem*eye(f.M.shape[0]), z0))
if premult == 1:
C = C.applyfunc(make_simp(z0))
r = reduce(lambda s,m: s+m[0]*m[1], zip(C, f.B.subs(f.z, z0)), S.Zero)*premult
res = r.subs(z0, z)
if rewrite:
res = res.rewrite(rewrite)
return res
# TODO
# The following would be possible:
# *) PFD Duplication (see Kelly Roach's paper)
# *) In a similar spirit, try_lerchphi() can be generalised considerably.
global _collection
if _collection is None:
_collection = FormulaCollection()
debug('Trying to expand hypergeometric function ', func)
# First reduce order as much as possible.
func, ops = reduce_order(func)
if ops:
debug(' Reduced order to ', func)
else:
debug(' Could not reduce order.')
# Now try polynomial cases
res = try_polynomial(func, z0)
if res is not None:
debug(' Recognised polynomial.')
p = apply_operators(res, ops, lambda f: z0*f.diff(z0))
p = apply_operators(p*premult, ops0, lambda f: z0*f.diff(z0))
return unpolarify(simplify(p).subs(z0, z))
# Try to recognise a shifted sum.
p = S.Zero
res = try_shifted_sum(func, z0)
if res is not None:
func, nops, p = res
debug(' Recognised shifted sum, reduced order to ', func)
ops += nops
# apply the plan for poly
p = apply_operators(p, ops, lambda f: z0*f.diff(z0))
p = apply_operators(p*premult, ops0, lambda f: z0*f.diff(z0))
p = simplify(p).subs(z0, z)
# Try special expansions early.
if unpolarify(z) in [1, -1] and (len(func.ap), len(func.bq)) == (2, 1):
f = build_hypergeometric_formula(func)
r = carryout_plan(f, ops).replace(hyper, hyperexpand_special)
if not r.has(hyper):
return r + p
# Try to find a formula in our collection
formula = _collection.lookup_origin(func)
# Now try a lerch phi formula
if formula is None:
formula = try_lerchphi(func)
if formula is None:
debug(' Could not find an origin. ',
'Will return answer in terms of '
'simpler hypergeometric functions.')
formula = build_hypergeometric_formula(func)
debug(' Found an origin: ', formula.closed_form, ' ', formula.func)
# We need to find the operators that convert formula into func.
ops += devise_plan(func, formula.func, z0)
# Now carry out the plan.
r = carryout_plan(formula, ops) + p
return powdenest(r, polar=True).replace(hyper, hyperexpand_special)
def devise_plan_meijer(fro, to, z):
"""
Find operators to convert G-function ``fro`` into G-function ``to``.
Explanation
===========
It is assumed that ``fro`` and ``to`` have the same signatures, and that in fact
any corresponding pair of parameters differs by integers, and a direct path
is possible. I.e. if there are parameters a1 b1 c1 and a2 b2 c2 it is
assumed that a1 can be shifted to a2, etc. The only thing this routine
determines is the order of shifts to apply, nothing clever will be tried.
It is also assumed that ``fro`` is suitable.
Examples
========
>>> from sympy.simplify.hyperexpand import (devise_plan_meijer,
... G_Function)
>>> from sympy.abc import z
Empty plan:
>>> devise_plan_meijer(G_Function([1], [2], [3], [4]),
... G_Function([1], [2], [3], [4]), z)
[]
Very simple plans:
>>> devise_plan_meijer(G_Function([0], [], [], []),
... G_Function([1], [], [], []), z)
[<Increment upper a index #0 of [0], [], [], [].>]
>>> devise_plan_meijer(G_Function([0], [], [], []),
... G_Function([-1], [], [], []), z)
[<Decrement upper a=0.>]
>>> devise_plan_meijer(G_Function([], [1], [], []),
... G_Function([], [2], [], []), z)
[<Increment lower a index #0 of [], [1], [], [].>]
Slightly more complicated plans:
>>> devise_plan_meijer(G_Function([0], [], [], []),
... G_Function([2], [], [], []), z)
[<Increment upper a index #0 of [1], [], [], [].>,
<Increment upper a index #0 of [0], [], [], [].>]
>>> devise_plan_meijer(G_Function([0], [], [0], []),
... G_Function([-1], [], [1], []), z)
[<Increment upper b=0.>, <Decrement upper a=0.>]
Order matters:
>>> devise_plan_meijer(G_Function([0], [], [0], []),
... G_Function([1], [], [1], []), z)
[<Increment upper a index #0 of [0], [], [1], [].>, <Increment upper b=0.>]
"""
# TODO for now, we use the following simple heuristic: inverse-shift
# when possible, shift otherwise. Give up if we cannot make progress.
def try_shift(f, t, shifter, diff, counter):
""" Try to apply ``shifter`` in order to bring some element in ``f``
nearer to its counterpart in ``to``. ``diff`` is +/- 1 and
determines the effect of ``shifter``. Counter is a list of elements
blocking the shift.
Return an operator if change was possible, else None.
"""
for idx, (a, b) in enumerate(zip(f, t)):
if (
(a - b).is_integer and (b - a)/diff > 0 and
all(a != x for x in counter)):
sh = shifter(idx)
f[idx] += diff
return sh
fan = list(fro.an)
fap = list(fro.ap)
fbm = list(fro.bm)
fbq = list(fro.bq)
ops = []
change = True
while change:
change = False
op = try_shift(fan, to.an,
lambda i: MeijerUnShiftB(fan, fap, fbm, fbq, i, z),
1, fbm + fbq)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fap, to.ap,
lambda i: MeijerUnShiftD(fan, fap, fbm, fbq, i, z),
1, fbm + fbq)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbm, to.bm,
lambda i: MeijerUnShiftA(fan, fap, fbm, fbq, i, z),
-1, fan + fap)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbq, to.bq,
lambda i: MeijerUnShiftC(fan, fap, fbm, fbq, i, z),
-1, fan + fap)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fan, to.an, lambda i: MeijerShiftB(fan[i]), -1, [])
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fap, to.ap, lambda i: MeijerShiftD(fap[i]), -1, [])
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbm, to.bm, lambda i: MeijerShiftA(fbm[i]), 1, [])
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbq, to.bq, lambda i: MeijerShiftC(fbq[i]), 1, [])
if op is not None:
ops += [op]
change = True
continue
if fan != list(to.an) or fap != list(to.ap) or fbm != list(to.bm) or \
fbq != list(to.bq):
raise NotImplementedError('Could not devise plan.')
ops.reverse()
return ops
_meijercollection = None
def _meijergexpand(func, z0, allow_hyper=False, rewrite='default',
place=None):
"""
Try to find an expression for the Meijer G function specified
by the G_Function ``func``. If ``allow_hyper`` is True, then returning
an expression in terms of hypergeometric functions is allowed.
Currently this just does Slater's theorem.
If expansions exist both at zero and at infinity, ``place``
can be set to ``0`` or ``zoo`` for the preferred choice.
"""
global _meijercollection
if _meijercollection is None:
_meijercollection = MeijerFormulaCollection()
if rewrite == 'default':
rewrite = None
func0 = func
debug('Try to expand Meijer G function corresponding to ', func)
# We will play games with analytic continuation - rather use a fresh symbol
z = Dummy('z')
func, ops = reduce_order_meijer(func)
if ops:
debug(' Reduced order to ', func)
else:
debug(' Could not reduce order.')
# Try to find a direct formula
f = _meijercollection.lookup_origin(func)
if f is not None:
debug(' Found a Meijer G formula: ', f.func)
ops += devise_plan_meijer(f.func, func, z)
# Now carry out the plan.
C = apply_operators(f.C.subs(f.z, z), ops,
make_derivative_operator(f.M.subs(f.z, z), z))
C = C.applyfunc(make_simp(z))
r = C*f.B.subs(f.z, z)
r = r[0].subs(z, z0)
return powdenest(r, polar=True)
debug(" Could not find a direct formula. Trying Slater's theorem.")
# TODO the following would be possible:
# *) Paired Index Theorems
# *) PFD Duplication
# (See Kelly Roach's paper for details on either.)
#
# TODO Also, we tend to create combinations of gamma functions that can be
# simplified.
def can_do(pbm, pap):
""" Test if slater applies. """
for i in pbm:
if len(pbm[i]) > 1:
l = 0
if i in pap:
l = len(pap[i])
if l + 1 < len(pbm[i]):
return False
return True
def do_slater(an, bm, ap, bq, z, zfinal):
# zfinal is the value that will eventually be substituted for z.
# We pass it to _hyperexpand to improve performance.
func = G_Function(an, bm, ap, bq)
_, pbm, pap, _ = func.compute_buckets()
if not can_do(pbm, pap):
return S.Zero, False
cond = len(an) + len(ap) < len(bm) + len(bq)
if len(an) + len(ap) == len(bm) + len(bq):
cond = abs(z) < 1
if cond is False:
return S.Zero, False
res = S.Zero
for m in pbm:
if len(pbm[m]) == 1:
bh = pbm[m][0]
fac = 1
bo = list(bm)
bo.remove(bh)
for bj in bo:
fac *= gamma(bj - bh)
for aj in an:
fac *= gamma(1 + bh - aj)
for bj in bq:
fac /= gamma(1 + bh - bj)
for aj in ap:
fac /= gamma(aj - bh)
nap = [1 + bh - a for a in list(an) + list(ap)]
nbq = [1 + bh - b for b in list(bo) + list(bq)]
k = polar_lift(S.NegativeOne**(len(ap) - len(bm)))
harg = k*zfinal
# NOTE even though k "is" +-1, this has to be t/k instead of
# t*k ... we are using polar numbers for consistency!
premult = (t/k)**bh
hyp = _hyperexpand(Hyper_Function(nap, nbq), harg, ops,
t, premult, bh, rewrite=None)
res += fac * hyp
else:
b_ = pbm[m][0]
ki = [bi - b_ for bi in pbm[m][1:]]
u = len(ki)
li = [ai - b_ for ai in pap[m][:u + 1]]
bo = list(bm)
for b in pbm[m]:
bo.remove(b)
ao = list(ap)
for a in pap[m][:u]:
ao.remove(a)
lu = li[-1]
di = [l - k for (l, k) in zip(li, ki)]
# We first work out the integrand:
s = Dummy('s')
integrand = z**s
for b in bm:
if not Mod(b, 1) and b.is_Number:
b = int(round(b))
integrand *= gamma(b - s)
for a in an:
integrand *= gamma(1 - a + s)
for b in bq:
integrand /= gamma(1 - b + s)
for a in ap:
integrand /= gamma(a - s)
# Now sum the finitely many residues:
# XXX This speeds up some cases - is it a good idea?
integrand = expand_func(integrand)
for r in range(int(round(lu))):
resid = residue(integrand, s, b_ + r)
resid = apply_operators(resid, ops, lambda f: z*f.diff(z))
res -= resid
# Now the hypergeometric term.
au = b_ + lu
k = polar_lift(S.NegativeOne**(len(ao) + len(bo) + 1))
harg = k*zfinal
premult = (t/k)**au
nap = [1 + au - a for a in list(an) + list(ap)] + [1]
nbq = [1 + au - b for b in list(bm) + list(bq)]
hyp = _hyperexpand(Hyper_Function(nap, nbq), harg, ops,
t, premult, au, rewrite=None)
C = S.NegativeOne**(lu)/factorial(lu)
for i in range(u):
C *= S.NegativeOne**di[i]/rf(lu - li[i] + 1, di[i])
for a in an:
C *= gamma(1 - a + au)
for b in bo:
C *= gamma(b - au)
for a in ao:
C /= gamma(a - au)
for b in bq:
C /= gamma(1 - b + au)
res += C*hyp
return res, cond
t = Dummy('t')
slater1, cond1 = do_slater(func.an, func.bm, func.ap, func.bq, z, z0)
def tr(l):
return [1 - x for x in l]
for op in ops:
op._poly = Poly(op._poly.subs({z: 1/t, _x: -_x}), _x)
slater2, cond2 = do_slater(tr(func.bm), tr(func.an), tr(func.bq), tr(func.ap),
t, 1/z0)
slater1 = powdenest(slater1.subs(z, z0), polar=True)
slater2 = powdenest(slater2.subs(t, 1/z0), polar=True)
if not isinstance(cond2, bool):
cond2 = cond2.subs(t, 1/z)
m = func(z)
if m.delta > 0 or \
(m.delta == 0 and len(m.ap) == len(m.bq) and
(re(m.nu) < -1) is not False and polar_lift(z0) == polar_lift(1)):
# The condition delta > 0 means that the convergence region is
# connected. Any expression we find can be continued analytically
# to the entire convergence region.
# The conditions delta==0, p==q, re(nu) < -1 imply that G is continuous
# on the positive reals, so the values at z=1 agree.
if cond1 is not False:
cond1 = True
if cond2 is not False:
cond2 = True
if cond1 is True:
slater1 = slater1.rewrite(rewrite or 'nonrep')
else:
slater1 = slater1.rewrite(rewrite or 'nonrepsmall')
if cond2 is True:
slater2 = slater2.rewrite(rewrite or 'nonrep')
else:
slater2 = slater2.rewrite(rewrite or 'nonrepsmall')
if cond1 is not False and cond2 is not False:
# If one condition is False, there is no choice.
if place == 0:
cond2 = False
if place == zoo:
cond1 = False
if not isinstance(cond1, bool):
cond1 = cond1.subs(z, z0)
if not isinstance(cond2, bool):
cond2 = cond2.subs(z, z0)
def weight(expr, cond):
if cond is True:
c0 = 0
elif cond is False:
c0 = 1
else:
c0 = 2
if expr.has(oo, zoo, -oo, nan):
# XXX this actually should not happen, but consider
# S('meijerg(((0, -1/2, 0, -1/2, 1/2), ()), ((0,),
# (-1/2, -1/2, -1/2, -1)), exp_polar(I*pi))/4')
c0 = 3
return (c0, expr.count(hyper), expr.count_ops())
w1 = weight(slater1, cond1)
w2 = weight(slater2, cond2)
if min(w1, w2) <= (0, 1, oo):
if w1 < w2:
return slater1
else:
return slater2
if max(w1[0], w2[0]) <= 1 and max(w1[1], w2[1]) <= 1:
return Piecewise((slater1, cond1), (slater2, cond2), (func0(z0), True))
# We couldn't find an expression without hypergeometric functions.
# TODO it would be helpful to give conditions under which the integral
# is known to diverge.
r = Piecewise((slater1, cond1), (slater2, cond2), (func0(z0), True))
if r.has(hyper) and not allow_hyper:
debug(' Could express using hypergeometric functions, '
'but not allowed.')
if not r.has(hyper) or allow_hyper:
return r
return func0(z0)
def hyperexpand(f, allow_hyper=False, rewrite='default', place=None):
"""
Expand hypergeometric functions. If allow_hyper is True, allow partial
simplification (that is a result different from input,
but still containing hypergeometric functions).
If a G-function has expansions both at zero and at infinity,
``place`` can be set to ``0`` or ``zoo`` to indicate the
preferred choice.
Examples
========
>>> from sympy.simplify.hyperexpand import hyperexpand
>>> from sympy.functions import hyper
>>> from sympy.abc import z
>>> hyperexpand(hyper([], [], z))
exp(z)
Non-hyperegeometric parts of the expression and hypergeometric expressions
that are not recognised are left unchanged:
>>> hyperexpand(1 + hyper([1, 1, 1], [], z))
hyper((1, 1, 1), (), z) + 1
"""
f = sympify(f)
def do_replace(ap, bq, z):
r = _hyperexpand(Hyper_Function(ap, bq), z, rewrite=rewrite)
if r is None:
return hyper(ap, bq, z)
else:
return r
def do_meijer(ap, bq, z):
r = _meijergexpand(G_Function(ap[0], ap[1], bq[0], bq[1]), z,
allow_hyper, rewrite=rewrite, place=place)
if not r.has(nan, zoo, oo, -oo):
return r
return f.replace(hyper, do_replace).replace(meijerg, do_meijer)
|
9757d440686b51570176973fdb0aaf5b5fcbc34bd1bdc319c324164204422a18 | """ Optimizations of the expression tree representation for better CSE
opportunities.
"""
from sympy.core import Add, Basic, Mul
from sympy.core.basic import preorder_traversal
from sympy.core.singleton import S
from sympy.utilities.iterables import default_sort_key
def sub_pre(e):
""" Replace y - x with -(x - y) if -1 can be extracted from y - x.
"""
# replacing Add, A, from which -1 can be extracted with -1*-A
adds = [a for a in e.atoms(Add) if a.could_extract_minus_sign()]
reps = {}
ignore = set()
for a in adds:
na = -a
if na.is_Mul: # e.g. MatExpr
ignore.add(a)
continue
reps[a] = Mul._from_args([S.NegativeOne, na])
e = e.xreplace(reps)
# repeat again for persisting Adds but mark these with a leading 1, -1
# e.g. y - x -> 1*-1*(x - y)
if isinstance(e, Basic):
negs = {}
for a in sorted(e.atoms(Add), key=default_sort_key):
if a in ignore:
continue
if a in reps:
negs[a] = reps[a]
elif a.could_extract_minus_sign():
negs[a] = Mul._from_args([S.One, S.NegativeOne, -a])
e = e.xreplace(negs)
return e
def sub_post(e):
""" Replace 1*-1*x with -x.
"""
replacements = []
for node in preorder_traversal(e):
if isinstance(node, Mul) and \
node.args[0] is S.One and node.args[1] is S.NegativeOne:
replacements.append((node, -Mul._from_args(node.args[2:])))
for node, replacement in replacements:
e = e.xreplace({node: replacement})
return e
|
ea1de60622e3707d7965d962f692b4e2346663147cb860dce0bb5f00c6b9e52d | from collections import defaultdict
from sympy.core import (Basic, S, Add, Mul, Pow, Symbol, sympify,
expand_func, Function, Dummy, Expr, factor_terms,
expand_power_exp, Eq)
from sympy.core.compatibility import iterable, ordered, as_int
from sympy.core.parameters import global_parameters
from sympy.core.function import (expand_log, count_ops, _mexpand, _coeff_isneg,
nfloat, expand_mul)
from sympy.core.numbers import Float, I, pi, Rational, Integer
from sympy.core.relational import Relational
from sympy.core.rules import Transform
from sympy.core.sympify import _sympify
from sympy.functions import gamma, exp, sqrt, log, exp_polar, re
from sympy.functions.combinatorial.factorials import CombinatorialFunction
from sympy.functions.elementary.complexes import unpolarify, Abs
from sympy.functions.elementary.exponential import ExpBase
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.functions.special.bessel import besselj, besseli, besselk, jn, bessely
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.polys import together, cancel, factor
from sympy.simplify.combsimp import combsimp
from sympy.simplify.cse_opts import sub_pre, sub_post
from sympy.simplify.powsimp import powsimp
from sympy.simplify.radsimp import radsimp, fraction, collect_abs
from sympy.simplify.sqrtdenest import sqrtdenest
from sympy.simplify.trigsimp import trigsimp, exptrigsimp
from sympy.utilities.iterables import has_variety, sift
import mpmath
def separatevars(expr, symbols=[], dict=False, force=False):
"""
Separates variables in an expression, if possible. By
default, it separates with respect to all symbols in an
expression and collects constant coefficients that are
independent of symbols.
Explanation
===========
If ``dict=True`` then the separated terms will be returned
in a dictionary keyed to their corresponding symbols.
By default, all symbols in the expression will appear as
keys; if symbols are provided, then all those symbols will
be used as keys, and any terms in the expression containing
other symbols or non-symbols will be returned keyed to the
string 'coeff'. (Passing None for symbols will return the
expression in a dictionary keyed to 'coeff'.)
If ``force=True``, then bases of powers will be separated regardless
of assumptions on the symbols involved.
Notes
=====
The order of the factors is determined by Mul, so that the
separated expressions may not necessarily be grouped together.
Although factoring is necessary to separate variables in some
expressions, it is not necessary in all cases, so one should not
count on the returned factors being factored.
Examples
========
>>> from sympy.abc import x, y, z, alpha
>>> from sympy import separatevars, sin
>>> separatevars((x*y)**y)
(x*y)**y
>>> separatevars((x*y)**y, force=True)
x**y*y**y
>>> e = 2*x**2*z*sin(y)+2*z*x**2
>>> separatevars(e)
2*x**2*z*(sin(y) + 1)
>>> separatevars(e, symbols=(x, y), dict=True)
{'coeff': 2*z, x: x**2, y: sin(y) + 1}
>>> separatevars(e, [x, y, alpha], dict=True)
{'coeff': 2*z, alpha: 1, x: x**2, y: sin(y) + 1}
If the expression is not really separable, or is only partially
separable, separatevars will do the best it can to separate it
by using factoring.
>>> separatevars(x + x*y - 3*x**2)
-x*(3*x - y - 1)
If the expression is not separable then expr is returned unchanged
or (if dict=True) then None is returned.
>>> eq = 2*x + y*sin(x)
>>> separatevars(eq) == eq
True
>>> separatevars(2*x + y*sin(x), symbols=(x, y), dict=True) is None
True
"""
expr = sympify(expr)
if dict:
return _separatevars_dict(_separatevars(expr, force), symbols)
else:
return _separatevars(expr, force)
def _separatevars(expr, force):
if isinstance(expr, Abs):
arg = expr.args[0]
if arg.is_Mul and not arg.is_number:
s = separatevars(arg, dict=True, force=force)
if s is not None:
return Mul(*map(expr.func, s.values()))
else:
return expr
if len(expr.free_symbols) < 2:
return expr
# don't destroy a Mul since much of the work may already be done
if expr.is_Mul:
args = list(expr.args)
changed = False
for i, a in enumerate(args):
args[i] = separatevars(a, force)
changed = changed or args[i] != a
if changed:
expr = expr.func(*args)
return expr
# get a Pow ready for expansion
if expr.is_Pow:
expr = Pow(separatevars(expr.base, force=force), expr.exp)
# First try other expansion methods
expr = expr.expand(mul=False, multinomial=False, force=force)
_expr, reps = posify(expr) if force else (expr, {})
expr = factor(_expr).subs(reps)
if not expr.is_Add:
return expr
# Find any common coefficients to pull out
args = list(expr.args)
commonc = args[0].args_cnc(cset=True, warn=False)[0]
for i in args[1:]:
commonc &= i.args_cnc(cset=True, warn=False)[0]
commonc = Mul(*commonc)
commonc = commonc.as_coeff_Mul()[1] # ignore constants
commonc_set = commonc.args_cnc(cset=True, warn=False)[0]
# remove them
for i, a in enumerate(args):
c, nc = a.args_cnc(cset=True, warn=False)
c = c - commonc_set
args[i] = Mul(*c)*Mul(*nc)
nonsepar = Add(*args)
if len(nonsepar.free_symbols) > 1:
_expr = nonsepar
_expr, reps = posify(_expr) if force else (_expr, {})
_expr = (factor(_expr)).subs(reps)
if not _expr.is_Add:
nonsepar = _expr
return commonc*nonsepar
def _separatevars_dict(expr, symbols):
if symbols:
if not all(t.is_Atom for t in symbols):
raise ValueError("symbols must be Atoms.")
symbols = list(symbols)
elif symbols is None:
return {'coeff': expr}
else:
symbols = list(expr.free_symbols)
if not symbols:
return None
ret = {i: [] for i in symbols + ['coeff']}
for i in Mul.make_args(expr):
expsym = i.free_symbols
intersection = set(symbols).intersection(expsym)
if len(intersection) > 1:
return None
if len(intersection) == 0:
# There are no symbols, so it is part of the coefficient
ret['coeff'].append(i)
else:
ret[intersection.pop()].append(i)
# rebuild
for k, v in ret.items():
ret[k] = Mul(*v)
return ret
def _is_sum_surds(p):
args = p.args if p.is_Add else [p]
for y in args:
if not ((y**2).is_Rational and y.is_extended_real):
return False
return True
def posify(eq):
"""Return ``eq`` (with generic symbols made positive) and a
dictionary containing the mapping between the old and new
symbols.
Explanation
===========
Any symbol that has positive=None will be replaced with a positive dummy
symbol having the same name. This replacement will allow more symbolic
processing of expressions, especially those involving powers and
logarithms.
A dictionary that can be sent to subs to restore ``eq`` to its original
symbols is also returned.
>>> from sympy import posify, Symbol, log, solve
>>> from sympy.abc import x
>>> posify(x + Symbol('p', positive=True) + Symbol('n', negative=True))
(_x + n + p, {_x: x})
>>> eq = 1/x
>>> log(eq).expand()
log(1/x)
>>> log(posify(eq)[0]).expand()
-log(_x)
>>> p, rep = posify(eq)
>>> log(p).expand().subs(rep)
-log(x)
It is possible to apply the same transformations to an iterable
of expressions:
>>> eq = x**2 - 4
>>> solve(eq, x)
[-2, 2]
>>> eq_x, reps = posify([eq, x]); eq_x
[_x**2 - 4, _x]
>>> solve(*eq_x)
[2]
"""
eq = sympify(eq)
if iterable(eq):
f = type(eq)
eq = list(eq)
syms = set()
for e in eq:
syms = syms.union(e.atoms(Symbol))
reps = {}
for s in syms:
reps.update({v: k for k, v in posify(s)[1].items()})
for i, e in enumerate(eq):
eq[i] = e.subs(reps)
return f(eq), {r: s for s, r in reps.items()}
reps = {s: Dummy(s.name, positive=True, **s.assumptions0)
for s in eq.free_symbols if s.is_positive is None}
eq = eq.subs(reps)
return eq, {r: s for s, r in reps.items()}
def hypersimp(f, k):
"""Given combinatorial term f(k) simplify its consecutive term ratio
i.e. f(k+1)/f(k). The input term can be composed of functions and
integer sequences which have equivalent representation in terms
of gamma special function.
Explanation
===========
The algorithm performs three basic steps:
1. Rewrite all functions in terms of gamma, if possible.
2. Rewrite all occurrences of gamma in terms of products
of gamma and rising factorial with integer, absolute
constant exponent.
3. Perform simplification of nested fractions, powers
and if the resulting expression is a quotient of
polynomials, reduce their total degree.
If f(k) is hypergeometric then as result we arrive with a
quotient of polynomials of minimal degree. Otherwise None
is returned.
For more information on the implemented algorithm refer to:
1. W. Koepf, Algorithms for m-fold Hypergeometric Summation,
Journal of Symbolic Computation (1995) 20, 399-417
"""
f = sympify(f)
g = f.subs(k, k + 1) / f
g = g.rewrite(gamma)
if g.has(Piecewise):
g = piecewise_fold(g)
g = g.args[-1][0]
g = expand_func(g)
g = powsimp(g, deep=True, combine='exp')
if g.is_rational_function(k):
return simplify(g, ratio=S.Infinity)
else:
return None
def hypersimilar(f, g, k):
"""
Returns True if ``f`` and ``g`` are hyper-similar.
Explanation
===========
Similarity in hypergeometric sense means that a quotient of
f(k) and g(k) is a rational function in ``k``. This procedure
is useful in solving recurrence relations.
For more information see hypersimp().
"""
f, g = list(map(sympify, (f, g)))
h = (f/g).rewrite(gamma)
h = h.expand(func=True, basic=False)
return h.is_rational_function(k)
def signsimp(expr, evaluate=None):
"""Make all Add sub-expressions canonical wrt sign.
Explanation
===========
If an Add subexpression, ``a``, can have a sign extracted,
as determined by could_extract_minus_sign, it is replaced
with Mul(-1, a, evaluate=False). This allows signs to be
extracted from powers and products.
Examples
========
>>> from sympy import signsimp, exp, symbols
>>> from sympy.abc import x, y
>>> i = symbols('i', odd=True)
>>> n = -1 + 1/x
>>> n/x/(-n)**2 - 1/n/x
(-1 + 1/x)/(x*(1 - 1/x)**2) - 1/(x*(-1 + 1/x))
>>> signsimp(_)
0
>>> x*n + x*-n
x*(-1 + 1/x) + x*(1 - 1/x)
>>> signsimp(_)
0
Since powers automatically handle leading signs
>>> (-2)**i
-2**i
signsimp can be used to put the base of a power with an integer
exponent into canonical form:
>>> n**i
(-1 + 1/x)**i
By default, signsimp doesn't leave behind any hollow simplification:
if making an Add canonical wrt sign didn't change the expression, the
original Add is restored. If this is not desired then the keyword
``evaluate`` can be set to False:
>>> e = exp(y - x)
>>> signsimp(e) == e
True
>>> signsimp(e, evaluate=False)
exp(-(x - y))
"""
if evaluate is None:
evaluate = global_parameters.evaluate
expr = sympify(expr)
if not isinstance(expr, (Expr, Relational)) or expr.is_Atom:
return expr
e = sub_post(sub_pre(expr))
if not isinstance(e, (Expr, Relational)) or e.is_Atom:
return e
if e.is_Add:
return e.func(*[signsimp(a, evaluate) for a in e.args])
if evaluate:
e = e.xreplace({m: -(-m) for m in e.atoms(Mul) if -(-m) != m})
return e
def simplify(expr, ratio=1.7, measure=count_ops, rational=False, inverse=False, doit=True, **kwargs):
"""Simplifies the given expression.
Explanation
===========
Simplification is not a well defined term and the exact strategies
this function tries can change in the future versions of SymPy. If
your algorithm relies on "simplification" (whatever it is), try to
determine what you need exactly - is it powsimp()?, radsimp()?,
together()?, logcombine()?, or something else? And use this particular
function directly, because those are well defined and thus your algorithm
will be robust.
Nonetheless, especially for interactive use, or when you don't know
anything about the structure of the expression, simplify() tries to apply
intelligent heuristics to make the input expression "simpler". For
example:
>>> from sympy import simplify, cos, sin
>>> from sympy.abc import x, y
>>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2)
>>> a
(x**2 + x)/(x*sin(y)**2 + x*cos(y)**2)
>>> simplify(a)
x + 1
Note that we could have obtained the same result by using specific
simplification functions:
>>> from sympy import trigsimp, cancel
>>> trigsimp(a)
(x**2 + x)/x
>>> cancel(_)
x + 1
In some cases, applying :func:`simplify` may actually result in some more
complicated expression. The default ``ratio=1.7`` prevents more extreme
cases: if (result length)/(input length) > ratio, then input is returned
unmodified. The ``measure`` parameter lets you specify the function used
to determine how complex an expression is. The function should take a
single argument as an expression and return a number such that if
expression ``a`` is more complex than expression ``b``, then
``measure(a) > measure(b)``. The default measure function is
:func:`~.count_ops`, which returns the total number of operations in the
expression.
For example, if ``ratio=1``, ``simplify`` output can't be longer
than input.
::
>>> from sympy import sqrt, simplify, count_ops, oo
>>> root = 1/(sqrt(2)+3)
Since ``simplify(root)`` would result in a slightly longer expression,
root is returned unchanged instead::
>>> simplify(root, ratio=1) == root
True
If ``ratio=oo``, simplify will be applied anyway::
>>> count_ops(simplify(root, ratio=oo)) > count_ops(root)
True
Note that the shortest expression is not necessary the simplest, so
setting ``ratio`` to 1 may not be a good idea.
Heuristically, the default value ``ratio=1.7`` seems like a reasonable
choice.
You can easily define your own measure function based on what you feel
should represent the "size" or "complexity" of the input expression. Note
that some choices, such as ``lambda expr: len(str(expr))`` may appear to be
good metrics, but have other problems (in this case, the measure function
may slow down simplify too much for very large expressions). If you don't
know what a good metric would be, the default, ``count_ops``, is a good
one.
For example:
>>> from sympy import symbols, log
>>> a, b = symbols('a b', positive=True)
>>> g = log(a) + log(b) + log(a)*log(1/b)
>>> h = simplify(g)
>>> h
log(a*b**(1 - log(a)))
>>> count_ops(g)
8
>>> count_ops(h)
5
So you can see that ``h`` is simpler than ``g`` using the count_ops metric.
However, we may not like how ``simplify`` (in this case, using
``logcombine``) has created the ``b**(log(1/a) + 1)`` term. A simple way
to reduce this would be to give more weight to powers as operations in
``count_ops``. We can do this by using the ``visual=True`` option:
>>> print(count_ops(g, visual=True))
2*ADD + DIV + 4*LOG + MUL
>>> print(count_ops(h, visual=True))
2*LOG + MUL + POW + SUB
>>> from sympy import Symbol, S
>>> def my_measure(expr):
... POW = Symbol('POW')
... # Discourage powers by giving POW a weight of 10
... count = count_ops(expr, visual=True).subs(POW, 10)
... # Every other operation gets a weight of 1 (the default)
... count = count.replace(Symbol, type(S.One))
... return count
>>> my_measure(g)
8
>>> my_measure(h)
14
>>> 15./8 > 1.7 # 1.7 is the default ratio
True
>>> simplify(g, measure=my_measure)
-log(a)*log(b) + log(a) + log(b)
Note that because ``simplify()`` internally tries many different
simplification strategies and then compares them using the measure
function, we get a completely different result that is still different
from the input expression by doing this.
If ``rational=True``, Floats will be recast as Rationals before simplification.
If ``rational=None``, Floats will be recast as Rationals but the result will
be recast as Floats. If rational=False(default) then nothing will be done
to the Floats.
If ``inverse=True``, it will be assumed that a composition of inverse
functions, such as sin and asin, can be cancelled in any order.
For example, ``asin(sin(x))`` will yield ``x`` without checking whether
x belongs to the set where this relation is true. The default is
False.
Note that ``simplify()`` automatically calls ``doit()`` on the final
expression. You can avoid this behavior by passing ``doit=False`` as
an argument.
"""
def shorter(*choices):
"""
Return the choice that has the fewest ops. In case of a tie,
the expression listed first is selected.
"""
if not has_variety(choices):
return choices[0]
return min(choices, key=measure)
def done(e):
rv = e.doit() if doit else e
return shorter(rv, collect_abs(rv))
expr = sympify(expr)
kwargs = dict(
ratio=kwargs.get('ratio', ratio),
measure=kwargs.get('measure', measure),
rational=kwargs.get('rational', rational),
inverse=kwargs.get('inverse', inverse),
doit=kwargs.get('doit', doit))
# no routine for Expr needs to check for is_zero
if isinstance(expr, Expr) and expr.is_zero:
return S.Zero
_eval_simplify = getattr(expr, '_eval_simplify', None)
if _eval_simplify is not None:
return _eval_simplify(**kwargs)
original_expr = expr = collect_abs(signsimp(expr))
if not isinstance(expr, Basic) or not expr.args: # XXX: temporary hack
return expr
if inverse and expr.has(Function):
expr = inversecombine(expr)
if not expr.args: # simplified to atomic
return expr
# do deep simplification
handled = Add, Mul, Pow, ExpBase
expr = expr.replace(
# here, checking for x.args is not enough because Basic has
# args but Basic does not always play well with replace, e.g.
# when simultaneous is True found expressions will be masked
# off with a Dummy but not all Basic objects in an expression
# can be replaced with a Dummy
lambda x: isinstance(x, Expr) and x.args and not isinstance(
x, handled),
lambda x: x.func(*[simplify(i, **kwargs) for i in x.args]),
simultaneous=False)
if not isinstance(expr, handled):
return done(expr)
if not expr.is_commutative:
expr = nc_simplify(expr)
# TODO: Apply different strategies, considering expression pattern:
# is it a purely rational function? Is there any trigonometric function?...
# See also https://github.com/sympy/sympy/pull/185.
# rationalize Floats
floats = False
if rational is not False and expr.has(Float):
floats = True
expr = nsimplify(expr, rational=True)
expr = bottom_up(expr, lambda w: getattr(w, 'normal', lambda: w)())
expr = Mul(*powsimp(expr).as_content_primitive())
_e = cancel(expr)
expr1 = shorter(_e, _mexpand(_e).cancel()) # issue 6829
expr2 = shorter(together(expr, deep=True), together(expr1, deep=True))
if ratio is S.Infinity:
expr = expr2
else:
expr = shorter(expr2, expr1, expr)
if not isinstance(expr, Basic): # XXX: temporary hack
return expr
expr = factor_terms(expr, sign=False)
from sympy.simplify.hyperexpand import hyperexpand
from sympy.functions.special.bessel import BesselBase
from sympy import Sum, Product, Integral
from sympy.functions.elementary.complexes import sign
# must come before `Piecewise` since this introduces more `Piecewise` terms
if expr.has(sign):
expr = expr.rewrite(Abs)
# Deal with Piecewise separately to avoid recursive growth of expressions
if expr.has(Piecewise):
# Fold into a single Piecewise
expr = piecewise_fold(expr)
# Apply doit, if doit=True
expr = done(expr)
# Still a Piecewise?
if expr.has(Piecewise):
# Fold into a single Piecewise, in case doit lead to some
# expressions being Piecewise
expr = piecewise_fold(expr)
# kroneckersimp also affects Piecewise
if expr.has(KroneckerDelta):
expr = kroneckersimp(expr)
# Still a Piecewise?
if expr.has(Piecewise):
from sympy.functions.elementary.piecewise import piecewise_simplify
# Do not apply doit on the segments as it has already
# been done above, but simplify
expr = piecewise_simplify(expr, deep=True, doit=False)
# Still a Piecewise?
if expr.has(Piecewise):
# Try factor common terms
expr = shorter(expr, factor_terms(expr))
# As all expressions have been simplified above with the
# complete simplify, nothing more needs to be done here
return expr
# hyperexpand automatically only works on hypergeometric terms
# Do this after the Piecewise part to avoid recursive expansion
expr = hyperexpand(expr)
if expr.has(KroneckerDelta):
expr = kroneckersimp(expr)
if expr.has(BesselBase):
expr = besselsimp(expr)
if expr.has(TrigonometricFunction, HyperbolicFunction):
expr = trigsimp(expr, deep=True)
if expr.has(log):
expr = shorter(expand_log(expr, deep=True), logcombine(expr))
if expr.has(CombinatorialFunction, gamma):
# expression with gamma functions or non-integer arguments is
# automatically passed to gammasimp
expr = combsimp(expr)
if expr.has(Sum):
expr = sum_simplify(expr, **kwargs)
if expr.has(Integral):
expr = expr.xreplace({
i: factor_terms(i) for i in expr.atoms(Integral)})
if expr.has(Product):
expr = product_simplify(expr)
from sympy.physics.units import Quantity
from sympy.physics.units.util import quantity_simplify
if expr.has(Quantity):
expr = quantity_simplify(expr)
short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr), expr)
short = shorter(short, cancel(short))
short = shorter(short, factor_terms(short), expand_power_exp(expand_mul(short)))
if short.has(TrigonometricFunction, HyperbolicFunction, ExpBase):
short = exptrigsimp(short)
# get rid of hollow 2-arg Mul factorization
hollow_mul = Transform(
lambda x: Mul(*x.args),
lambda x:
x.is_Mul and
len(x.args) == 2 and
x.args[0].is_Number and
x.args[1].is_Add and
x.is_commutative)
expr = short.xreplace(hollow_mul)
numer, denom = expr.as_numer_denom()
if denom.is_Add:
n, d = fraction(radsimp(1/denom, symbolic=False, max_terms=1))
if n is not S.One:
expr = (numer*n).expand()/d
if expr.could_extract_minus_sign():
n, d = fraction(expr)
if d != 0:
expr = signsimp(-n/(-d))
if measure(expr) > ratio*measure(original_expr):
expr = original_expr
# restore floats
if floats and rational is None:
expr = nfloat(expr, exponent=False)
return done(expr)
def sum_simplify(s, **kwargs):
"""Main function for Sum simplification"""
from sympy.concrete.summations import Sum
from sympy.core.function import expand
if not isinstance(s, Add):
s = s.xreplace({a: sum_simplify(a, **kwargs)
for a in s.atoms(Add) if a.has(Sum)})
s = expand(s)
if not isinstance(s, Add):
return s
terms = s.args
s_t = [] # Sum Terms
o_t = [] # Other Terms
for term in terms:
sum_terms, other = sift(Mul.make_args(term),
lambda i: isinstance(i, Sum), binary=True)
if not sum_terms:
o_t.append(term)
continue
other = [Mul(*other)]
s_t.append(Mul(*(other + [s._eval_simplify(**kwargs) for s in sum_terms])))
result = Add(sum_combine(s_t), *o_t)
return result
def sum_combine(s_t):
"""Helper function for Sum simplification
Attempts to simplify a list of sums, by combining limits / sum function's
returns the simplified sum
"""
from sympy.concrete.summations import Sum
used = [False] * len(s_t)
for method in range(2):
for i, s_term1 in enumerate(s_t):
if not used[i]:
for j, s_term2 in enumerate(s_t):
if not used[j] and i != j:
temp = sum_add(s_term1, s_term2, method)
if isinstance(temp, Sum) or isinstance(temp, Mul):
s_t[i] = temp
s_term1 = s_t[i]
used[j] = True
result = S.Zero
for i, s_term in enumerate(s_t):
if not used[i]:
result = Add(result, s_term)
return result
def factor_sum(self, limits=None, radical=False, clear=False, fraction=False, sign=True):
"""Return Sum with constant factors extracted.
If ``limits`` is specified then ``self`` is the summand; the other
keywords are passed to ``factor_terms``.
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> from sympy.simplify.simplify import factor_sum
>>> s = Sum(x*y, (x, 1, 3))
>>> factor_sum(s)
y*Sum(x, (x, 1, 3))
>>> factor_sum(s.function, s.limits)
y*Sum(x, (x, 1, 3))
"""
# XXX deprecate in favor of direct call to factor_terms
from sympy.concrete.summations import Sum
kwargs = dict(radical=radical, clear=clear,
fraction=fraction, sign=sign)
expr = Sum(self, *limits) if limits else self
return factor_terms(expr, **kwargs)
def sum_add(self, other, method=0):
"""Helper function for Sum simplification"""
from sympy.concrete.summations import Sum
from sympy import Mul
#we know this is something in terms of a constant * a sum
#so we temporarily put the constants inside for simplification
#then simplify the result
def __refactor(val):
args = Mul.make_args(val)
sumv = next(x for x in args if isinstance(x, Sum))
constant = Mul(*[x for x in args if x != sumv])
return Sum(constant * sumv.function, *sumv.limits)
if isinstance(self, Mul):
rself = __refactor(self)
else:
rself = self
if isinstance(other, Mul):
rother = __refactor(other)
else:
rother = other
if type(rself) == type(rother):
if method == 0:
if rself.limits == rother.limits:
return factor_sum(Sum(rself.function + rother.function, *rself.limits))
elif method == 1:
if simplify(rself.function - rother.function) == 0:
if len(rself.limits) == len(rother.limits) == 1:
i = rself.limits[0][0]
x1 = rself.limits[0][1]
y1 = rself.limits[0][2]
j = rother.limits[0][0]
x2 = rother.limits[0][1]
y2 = rother.limits[0][2]
if i == j:
if x2 == y1 + 1:
return factor_sum(Sum(rself.function, (i, x1, y2)))
elif x1 == y2 + 1:
return factor_sum(Sum(rself.function, (i, x2, y1)))
return Add(self, other)
def product_simplify(s):
"""Main function for Product simplification"""
from sympy.concrete.products import Product
terms = Mul.make_args(s)
p_t = [] # Product Terms
o_t = [] # Other Terms
for term in terms:
if isinstance(term, Product):
p_t.append(term)
else:
o_t.append(term)
used = [False] * len(p_t)
for method in range(2):
for i, p_term1 in enumerate(p_t):
if not used[i]:
for j, p_term2 in enumerate(p_t):
if not used[j] and i != j:
if isinstance(product_mul(p_term1, p_term2, method), Product):
p_t[i] = product_mul(p_term1, p_term2, method)
used[j] = True
result = Mul(*o_t)
for i, p_term in enumerate(p_t):
if not used[i]:
result = Mul(result, p_term)
return result
def product_mul(self, other, method=0):
"""Helper function for Product simplification"""
from sympy.concrete.products import Product
if type(self) == type(other):
if method == 0:
if self.limits == other.limits:
return Product(self.function * other.function, *self.limits)
elif method == 1:
if simplify(self.function - other.function) == 0:
if len(self.limits) == len(other.limits) == 1:
i = self.limits[0][0]
x1 = self.limits[0][1]
y1 = self.limits[0][2]
j = other.limits[0][0]
x2 = other.limits[0][1]
y2 = other.limits[0][2]
if i == j:
if x2 == y1 + 1:
return Product(self.function, (i, x1, y2))
elif x1 == y2 + 1:
return Product(self.function, (i, x2, y1))
return Mul(self, other)
def _nthroot_solve(p, n, prec):
"""
helper function for ``nthroot``
It denests ``p**Rational(1, n)`` using its minimal polynomial
"""
from sympy.polys.numberfields import _minimal_polynomial_sq
from sympy.solvers import solve
while n % 2 == 0:
p = sqrtdenest(sqrt(p))
n = n // 2
if n == 1:
return p
pn = p**Rational(1, n)
x = Symbol('x')
f = _minimal_polynomial_sq(p, n, x)
if f is None:
return None
sols = solve(f, x)
for sol in sols:
if abs(sol - pn).n() < 1./10**prec:
sol = sqrtdenest(sol)
if _mexpand(sol**n) == p:
return sol
def logcombine(expr, force=False):
"""
Takes logarithms and combines them using the following rules:
- log(x) + log(y) == log(x*y) if both are positive
- a*log(x) == log(x**a) if x is positive and a is real
If ``force`` is ``True`` then the assumptions above will be assumed to hold if
there is no assumption already in place on a quantity. For example, if
``a`` is imaginary or the argument negative, force will not perform a
combination but if ``a`` is a symbol with no assumptions the change will
take place.
Examples
========
>>> from sympy import Symbol, symbols, log, logcombine, I
>>> from sympy.abc import a, x, y, z
>>> logcombine(a*log(x) + log(y) - log(z))
a*log(x) + log(y) - log(z)
>>> logcombine(a*log(x) + log(y) - log(z), force=True)
log(x**a*y/z)
>>> x,y,z = symbols('x,y,z', positive=True)
>>> a = Symbol('a', real=True)
>>> logcombine(a*log(x) + log(y) - log(z))
log(x**a*y/z)
The transformation is limited to factors and/or terms that
contain logs, so the result depends on the initial state of
expansion:
>>> eq = (2 + 3*I)*log(x)
>>> logcombine(eq, force=True) == eq
True
>>> logcombine(eq.expand(), force=True)
log(x**2) + I*log(x**3)
See Also
========
posify: replace all symbols with symbols having positive assumptions
sympy.core.function.expand_log: expand the logarithms of products
and powers; the opposite of logcombine
"""
def f(rv):
if not (rv.is_Add or rv.is_Mul):
return rv
def gooda(a):
# bool to tell whether the leading ``a`` in ``a*log(x)``
# could appear as log(x**a)
return (a is not S.NegativeOne and # -1 *could* go, but we disallow
(a.is_extended_real or force and a.is_extended_real is not False))
def goodlog(l):
# bool to tell whether log ``l``'s argument can combine with others
a = l.args[0]
return a.is_positive or force and a.is_nonpositive is not False
other = []
logs = []
log1 = defaultdict(list)
for a in Add.make_args(rv):
if isinstance(a, log) and goodlog(a):
log1[()].append(([], a))
elif not a.is_Mul:
other.append(a)
else:
ot = []
co = []
lo = []
for ai in a.args:
if ai.is_Rational and ai < 0:
ot.append(S.NegativeOne)
co.append(-ai)
elif isinstance(ai, log) and goodlog(ai):
lo.append(ai)
elif gooda(ai):
co.append(ai)
else:
ot.append(ai)
if len(lo) > 1:
logs.append((ot, co, lo))
elif lo:
log1[tuple(ot)].append((co, lo[0]))
else:
other.append(a)
# if there is only one log in other, put it with the
# good logs
if len(other) == 1 and isinstance(other[0], log):
log1[()].append(([], other.pop()))
# if there is only one log at each coefficient and none have
# an exponent to place inside the log then there is nothing to do
if not logs and all(len(log1[k]) == 1 and log1[k][0] == [] for k in log1):
return rv
# collapse multi-logs as far as possible in a canonical way
# TODO: see if x*log(a)+x*log(a)*log(b) -> x*log(a)*(1+log(b))?
# -- in this case, it's unambiguous, but if it were were a log(c) in
# each term then it's arbitrary whether they are grouped by log(a) or
# by log(c). So for now, just leave this alone; it's probably better to
# let the user decide
for o, e, l in logs:
l = list(ordered(l))
e = log(l.pop(0).args[0]**Mul(*e))
while l:
li = l.pop(0)
e = log(li.args[0]**e)
c, l = Mul(*o), e
if isinstance(l, log): # it should be, but check to be sure
log1[(c,)].append(([], l))
else:
other.append(c*l)
# logs that have the same coefficient can multiply
for k in list(log1.keys()):
log1[Mul(*k)] = log(logcombine(Mul(*[
l.args[0]**Mul(*c) for c, l in log1.pop(k)]),
force=force), evaluate=False)
# logs that have oppositely signed coefficients can divide
for k in ordered(list(log1.keys())):
if not k in log1: # already popped as -k
continue
if -k in log1:
# figure out which has the minus sign; the one with
# more op counts should be the one
num, den = k, -k
if num.count_ops() > den.count_ops():
num, den = den, num
other.append(
num*log(log1.pop(num).args[0]/log1.pop(den).args[0],
evaluate=False))
else:
other.append(k*log1.pop(k))
return Add(*other)
return bottom_up(expr, f)
def inversecombine(expr):
"""Simplify the composition of a function and its inverse.
Explanation
===========
No attention is paid to whether the inverse is a left inverse or a
right inverse; thus, the result will in general not be equivalent
to the original expression.
Examples
========
>>> from sympy.simplify.simplify import inversecombine
>>> from sympy import asin, sin, log, exp
>>> from sympy.abc import x
>>> inversecombine(asin(sin(x)))
x
>>> inversecombine(2*log(exp(3*x)))
6*x
"""
def f(rv):
if rv.is_Function and hasattr(rv, "inverse"):
if (len(rv.args) == 1 and len(rv.args[0].args) == 1 and
isinstance(rv.args[0], rv.inverse(argindex=1))):
rv = rv.args[0].args[0]
return rv
return bottom_up(expr, f)
def walk(e, *target):
"""Iterate through the args that are the given types (target) and
return a list of the args that were traversed; arguments
that are not of the specified types are not traversed.
Examples
========
>>> from sympy.simplify.simplify import walk
>>> from sympy import Min, Max
>>> from sympy.abc import x, y, z
>>> list(walk(Min(x, Max(y, Min(1, z))), Min))
[Min(x, Max(y, Min(1, z)))]
>>> list(walk(Min(x, Max(y, Min(1, z))), Min, Max))
[Min(x, Max(y, Min(1, z))), Max(y, Min(1, z)), Min(1, z)]
See Also
========
bottom_up
"""
if isinstance(e, target):
yield e
for i in e.args:
yield from walk(i, *target)
def bottom_up(rv, F, atoms=False, nonbasic=False):
"""Apply ``F`` to all expressions in an expression tree from the
bottom up. If ``atoms`` is True, apply ``F`` even if there are no args;
if ``nonbasic`` is True, try to apply ``F`` to non-Basic objects.
"""
args = getattr(rv, 'args', None)
if args is not None:
if args:
args = tuple([bottom_up(a, F, atoms, nonbasic) for a in args])
if args != rv.args:
rv = rv.func(*args)
rv = F(rv)
elif atoms:
rv = F(rv)
else:
if nonbasic:
try:
rv = F(rv)
except TypeError:
pass
return rv
def kroneckersimp(expr):
"""
Simplify expressions with KroneckerDelta.
The only simplification currently attempted is to identify multiplicative cancellation:
Examples
========
>>> from sympy import KroneckerDelta, kroneckersimp
>>> from sympy.abc import i
>>> kroneckersimp(1 + KroneckerDelta(0, i) * KroneckerDelta(1, i))
1
"""
def args_cancel(args1, args2):
for i1 in range(2):
for i2 in range(2):
a1 = args1[i1]
a2 = args2[i2]
a3 = args1[(i1 + 1) % 2]
a4 = args2[(i2 + 1) % 2]
if Eq(a1, a2) is S.true and Eq(a3, a4) is S.false:
return True
return False
def cancel_kronecker_mul(m):
from sympy.utilities.iterables import subsets
args = m.args
deltas = [a for a in args if isinstance(a, KroneckerDelta)]
for delta1, delta2 in subsets(deltas, 2):
args1 = delta1.args
args2 = delta2.args
if args_cancel(args1, args2):
return 0*m
return m
if not expr.has(KroneckerDelta):
return expr
if expr.has(Piecewise):
expr = expr.rewrite(KroneckerDelta)
newexpr = expr
expr = None
while newexpr != expr:
expr = newexpr
newexpr = expr.replace(lambda e: isinstance(e, Mul), cancel_kronecker_mul)
return expr
def besselsimp(expr):
"""
Simplify bessel-type functions.
Explanation
===========
This routine tries to simplify bessel-type functions. Currently it only
works on the Bessel J and I functions, however. It works by looking at all
such functions in turn, and eliminating factors of "I" and "-1" (actually
their polar equivalents) in front of the argument. Then, functions of
half-integer order are rewritten using strigonometric functions and
functions of integer order (> 1) are rewritten using functions
of low order. Finally, if the expression was changed, compute
factorization of the result with factor().
>>> from sympy import besselj, besseli, besselsimp, polar_lift, I, S
>>> from sympy.abc import z, nu
>>> besselsimp(besselj(nu, z*polar_lift(-1)))
exp(I*pi*nu)*besselj(nu, z)
>>> besselsimp(besseli(nu, z*polar_lift(-I)))
exp(-I*pi*nu/2)*besselj(nu, z)
>>> besselsimp(besseli(S(-1)/2, z))
sqrt(2)*cosh(z)/(sqrt(pi)*sqrt(z))
>>> besselsimp(z*besseli(0, z) + z*(besseli(2, z))/2 + besseli(1, z))
3*z*besseli(0, z)/2
"""
# TODO
# - better algorithm?
# - simplify (cos(pi*b)*besselj(b,z) - besselj(-b,z))/sin(pi*b) ...
# - use contiguity relations?
def replacer(fro, to, factors):
factors = set(factors)
def repl(nu, z):
if factors.intersection(Mul.make_args(z)):
return to(nu, z)
return fro(nu, z)
return repl
def torewrite(fro, to):
def tofunc(nu, z):
return fro(nu, z).rewrite(to)
return tofunc
def tominus(fro):
def tofunc(nu, z):
return exp(I*pi*nu)*fro(nu, exp_polar(-I*pi)*z)
return tofunc
orig_expr = expr
ifactors = [I, exp_polar(I*pi/2), exp_polar(-I*pi/2)]
expr = expr.replace(
besselj, replacer(besselj,
torewrite(besselj, besseli), ifactors))
expr = expr.replace(
besseli, replacer(besseli,
torewrite(besseli, besselj), ifactors))
minusfactors = [-1, exp_polar(I*pi)]
expr = expr.replace(
besselj, replacer(besselj, tominus(besselj), minusfactors))
expr = expr.replace(
besseli, replacer(besseli, tominus(besseli), minusfactors))
z0 = Dummy('z')
def expander(fro):
def repl(nu, z):
if (nu % 1) == S.Half:
return simplify(trigsimp(unpolarify(
fro(nu, z0).rewrite(besselj).rewrite(jn).expand(
func=True)).subs(z0, z)))
elif nu.is_Integer and nu > 1:
return fro(nu, z).expand(func=True)
return fro(nu, z)
return repl
expr = expr.replace(besselj, expander(besselj))
expr = expr.replace(bessely, expander(bessely))
expr = expr.replace(besseli, expander(besseli))
expr = expr.replace(besselk, expander(besselk))
def _bessel_simp_recursion(expr):
def _use_recursion(bessel, expr):
while True:
bessels = expr.find(lambda x: isinstance(x, bessel))
try:
for ba in sorted(bessels, key=lambda x: re(x.args[0])):
a, x = ba.args
bap1 = bessel(a+1, x)
bap2 = bessel(a+2, x)
if expr.has(bap1) and expr.has(bap2):
expr = expr.subs(ba, 2*(a+1)/x*bap1 - bap2)
break
else:
return expr
except (ValueError, TypeError):
return expr
if expr.has(besselj):
expr = _use_recursion(besselj, expr)
if expr.has(bessely):
expr = _use_recursion(bessely, expr)
return expr
expr = _bessel_simp_recursion(expr)
if expr != orig_expr:
expr = expr.factor()
return expr
def nthroot(expr, n, max_len=4, prec=15):
"""
Compute a real nth-root of a sum of surds.
Parameters
==========
expr : sum of surds
n : integer
max_len : maximum number of surds passed as constants to ``nsimplify``
Algorithm
=========
First ``nsimplify`` is used to get a candidate root; if it is not a
root the minimal polynomial is computed; the answer is one of its
roots.
Examples
========
>>> from sympy.simplify.simplify import nthroot
>>> from sympy import sqrt
>>> nthroot(90 + 34*sqrt(7), 3)
sqrt(7) + 3
"""
expr = sympify(expr)
n = sympify(n)
p = expr**Rational(1, n)
if not n.is_integer:
return p
if not _is_sum_surds(expr):
return p
surds = []
coeff_muls = [x.as_coeff_Mul() for x in expr.args]
for x, y in coeff_muls:
if not x.is_rational:
return p
if y is S.One:
continue
if not (y.is_Pow and y.exp == S.Half and y.base.is_integer):
return p
surds.append(y)
surds.sort()
surds = surds[:max_len]
if expr < 0 and n % 2 == 1:
p = (-expr)**Rational(1, n)
a = nsimplify(p, constants=surds)
res = a if _mexpand(a**n) == _mexpand(-expr) else p
return -res
a = nsimplify(p, constants=surds)
if _mexpand(a) is not _mexpand(p) and _mexpand(a**n) == _mexpand(expr):
return _mexpand(a)
expr = _nthroot_solve(expr, n, prec)
if expr is None:
return p
return expr
def nsimplify(expr, constants=(), tolerance=None, full=False, rational=None,
rational_conversion='base10'):
"""
Find a simple representation for a number or, if there are free symbols or
if ``rational=True``, then replace Floats with their Rational equivalents. If
no change is made and rational is not False then Floats will at least be
converted to Rationals.
Explanation
===========
For numerical expressions, a simple formula that numerically matches the
given numerical expression is sought (and the input should be possible
to evalf to a precision of at least 30 digits).
Optionally, a list of (rationally independent) constants to
include in the formula may be given.
A lower tolerance may be set to find less exact matches. If no tolerance
is given then the least precise value will set the tolerance (e.g. Floats
default to 15 digits of precision, so would be tolerance=10**-15).
With ``full=True``, a more extensive search is performed
(this is useful to find simpler numbers when the tolerance
is set low).
When converting to rational, if rational_conversion='base10' (the default), then
convert floats to rationals using their base-10 (string) representation.
When rational_conversion='exact' it uses the exact, base-2 representation.
Examples
========
>>> from sympy import nsimplify, sqrt, GoldenRatio, exp, I, pi
>>> nsimplify(4/(1+sqrt(5)), [GoldenRatio])
-2 + 2*GoldenRatio
>>> nsimplify((1/(exp(3*pi*I/5)+1)))
1/2 - I*sqrt(sqrt(5)/10 + 1/4)
>>> nsimplify(I**I, [pi])
exp(-pi/2)
>>> nsimplify(pi, tolerance=0.01)
22/7
>>> nsimplify(0.333333333333333, rational=True, rational_conversion='exact')
6004799503160655/18014398509481984
>>> nsimplify(0.333333333333333, rational=True)
1/3
See Also
========
sympy.core.function.nfloat
"""
try:
return sympify(as_int(expr))
except (TypeError, ValueError):
pass
expr = sympify(expr).xreplace({
Float('inf'): S.Infinity,
Float('-inf'): S.NegativeInfinity,
})
if expr is S.Infinity or expr is S.NegativeInfinity:
return expr
if rational or expr.free_symbols:
return _real_to_rational(expr, tolerance, rational_conversion)
# SymPy's default tolerance for Rationals is 15; other numbers may have
# lower tolerances set, so use them to pick the largest tolerance if None
# was given
if tolerance is None:
tolerance = 10**-min([15] +
[mpmath.libmp.libmpf.prec_to_dps(n._prec)
for n in expr.atoms(Float)])
# XXX should prec be set independent of tolerance or should it be computed
# from tolerance?
prec = 30
bprec = int(prec*3.33)
constants_dict = {}
for constant in constants:
constant = sympify(constant)
v = constant.evalf(prec)
if not v.is_Float:
raise ValueError("constants must be real-valued")
constants_dict[str(constant)] = v._to_mpmath(bprec)
exprval = expr.evalf(prec, chop=True)
re, im = exprval.as_real_imag()
# safety check to make sure that this evaluated to a number
if not (re.is_Number and im.is_Number):
return expr
def nsimplify_real(x):
orig = mpmath.mp.dps
xv = x._to_mpmath(bprec)
try:
# We'll be happy with low precision if a simple fraction
if not (tolerance or full):
mpmath.mp.dps = 15
rat = mpmath.pslq([xv, 1])
if rat is not None:
return Rational(-int(rat[1]), int(rat[0]))
mpmath.mp.dps = prec
newexpr = mpmath.identify(xv, constants=constants_dict,
tol=tolerance, full=full)
if not newexpr:
raise ValueError
if full:
newexpr = newexpr[0]
expr = sympify(newexpr)
if x and not expr: # don't let x become 0
raise ValueError
if expr.is_finite is False and not xv in [mpmath.inf, mpmath.ninf]:
raise ValueError
return expr
finally:
# even though there are returns above, this is executed
# before leaving
mpmath.mp.dps = orig
try:
if re:
re = nsimplify_real(re)
if im:
im = nsimplify_real(im)
except ValueError:
if rational is None:
return _real_to_rational(expr, rational_conversion=rational_conversion)
return expr
rv = re + im*S.ImaginaryUnit
# if there was a change or rational is explicitly not wanted
# return the value, else return the Rational representation
if rv != expr or rational is False:
return rv
return _real_to_rational(expr, rational_conversion=rational_conversion)
def _real_to_rational(expr, tolerance=None, rational_conversion='base10'):
"""
Replace all reals in expr with rationals.
Examples
========
>>> from sympy.simplify.simplify import _real_to_rational
>>> from sympy.abc import x
>>> _real_to_rational(.76 + .1*x**.5)
sqrt(x)/10 + 19/25
If rational_conversion='base10', this uses the base-10 string. If
rational_conversion='exact', the exact, base-2 representation is used.
>>> _real_to_rational(0.333333333333333, rational_conversion='exact')
6004799503160655/18014398509481984
>>> _real_to_rational(0.333333333333333)
1/3
"""
expr = _sympify(expr)
inf = Float('inf')
p = expr
reps = {}
reduce_num = None
if tolerance is not None and tolerance < 1:
reduce_num = ceiling(1/tolerance)
for fl in p.atoms(Float):
key = fl
if reduce_num is not None:
r = Rational(fl).limit_denominator(reduce_num)
elif (tolerance is not None and tolerance >= 1 and
fl.is_Integer is False):
r = Rational(tolerance*round(fl/tolerance)
).limit_denominator(int(tolerance))
else:
if rational_conversion == 'exact':
r = Rational(fl)
reps[key] = r
continue
elif rational_conversion != 'base10':
raise ValueError("rational_conversion must be 'base10' or 'exact'")
r = nsimplify(fl, rational=False)
# e.g. log(3).n() -> log(3) instead of a Rational
if fl and not r:
r = Rational(fl)
elif not r.is_Rational:
if fl == inf or fl == -inf:
r = S.ComplexInfinity
elif fl < 0:
fl = -fl
d = Pow(10, int(mpmath.log(fl)/mpmath.log(10)))
r = -Rational(str(fl/d))*d
elif fl > 0:
d = Pow(10, int(mpmath.log(fl)/mpmath.log(10)))
r = Rational(str(fl/d))*d
else:
r = Integer(0)
reps[key] = r
return p.subs(reps, simultaneous=True)
def clear_coefficients(expr, rhs=S.Zero):
"""Return `p, r` where `p` is the expression obtained when Rational
additive and multiplicative coefficients of `expr` have been stripped
away in a naive fashion (i.e. without simplification). The operations
needed to remove the coefficients will be applied to `rhs` and returned
as `r`.
Examples
========
>>> from sympy.simplify.simplify import clear_coefficients
>>> from sympy.abc import x, y
>>> from sympy import Dummy
>>> expr = 4*y*(6*x + 3)
>>> clear_coefficients(expr - 2)
(y*(2*x + 1), 1/6)
When solving 2 or more expressions like `expr = a`,
`expr = b`, etc..., it is advantageous to provide a Dummy symbol
for `rhs` and simply replace it with `a`, `b`, etc... in `r`.
>>> rhs = Dummy('rhs')
>>> clear_coefficients(expr, rhs)
(y*(2*x + 1), _rhs/12)
>>> _[1].subs(rhs, 2)
1/6
"""
was = None
free = expr.free_symbols
if expr.is_Rational:
return (S.Zero, rhs - expr)
while expr and was != expr:
was = expr
m, expr = (
expr.as_content_primitive()
if free else
factor_terms(expr).as_coeff_Mul(rational=True))
rhs /= m
c, expr = expr.as_coeff_Add(rational=True)
rhs -= c
expr = signsimp(expr, evaluate = False)
if _coeff_isneg(expr):
expr = -expr
rhs = -rhs
return expr, rhs
def nc_simplify(expr, deep=True):
'''
Simplify a non-commutative expression composed of multiplication
and raising to a power by grouping repeated subterms into one power.
Priority is given to simplifications that give the fewest number
of arguments in the end (for example, in a*b*a*b*c*a*b*c simplifying
to (a*b)**2*c*a*b*c gives 5 arguments while a*b*(a*b*c)**2 has 3).
If ``expr`` is a sum of such terms, the sum of the simplified terms
is returned.
Keyword argument ``deep`` controls whether or not subexpressions
nested deeper inside the main expression are simplified. See examples
below. Setting `deep` to `False` can save time on nested expressions
that don't need simplifying on all levels.
Examples
========
>>> from sympy import symbols
>>> from sympy.simplify.simplify import nc_simplify
>>> a, b, c = symbols("a b c", commutative=False)
>>> nc_simplify(a*b*a*b*c*a*b*c)
a*b*(a*b*c)**2
>>> expr = a**2*b*a**4*b*a**4
>>> nc_simplify(expr)
a**2*(b*a**4)**2
>>> nc_simplify(a*b*a*b*c**2*(a*b)**2*c**2)
((a*b)**2*c**2)**2
>>> nc_simplify(a*b*a*b + 2*a*c*a**2*c*a**2*c*a)
(a*b)**2 + 2*(a*c*a)**3
>>> nc_simplify(b**-1*a**-1*(a*b)**2)
a*b
>>> nc_simplify(a**-1*b**-1*c*a)
(b*a)**(-1)*c*a
>>> expr = (a*b*a*b)**2*a*c*a*c
>>> nc_simplify(expr)
(a*b)**4*(a*c)**2
>>> nc_simplify(expr, deep=False)
(a*b*a*b)**2*(a*c)**2
'''
from sympy.matrices.expressions import (MatrixExpr, MatAdd, MatMul,
MatPow, MatrixSymbol)
from sympy.core.exprtools import factor_nc
if isinstance(expr, MatrixExpr):
expr = expr.doit(inv_expand=False)
_Add, _Mul, _Pow, _Symbol = MatAdd, MatMul, MatPow, MatrixSymbol
else:
_Add, _Mul, _Pow, _Symbol = Add, Mul, Pow, Symbol
# =========== Auxiliary functions ========================
def _overlaps(args):
# Calculate a list of lists m such that m[i][j] contains the lengths
# of all possible overlaps between args[:i+1] and args[i+1+j:].
# An overlap is a suffix of the prefix that matches a prefix
# of the suffix.
# For example, let expr=c*a*b*a*b*a*b*a*b. Then m[3][0] contains
# the lengths of overlaps of c*a*b*a*b with a*b*a*b. The overlaps
# are a*b*a*b, a*b and the empty word so that m[3][0]=[4,2,0].
# All overlaps rather than only the longest one are recorded
# because this information helps calculate other overlap lengths.
m = [[([1, 0] if a == args[0] else [0]) for a in args[1:]]]
for i in range(1, len(args)):
overlaps = []
j = 0
for j in range(len(args) - i - 1):
overlap = []
for v in m[i-1][j+1]:
if j + i + 1 + v < len(args) and args[i] == args[j+i+1+v]:
overlap.append(v + 1)
overlap += [0]
overlaps.append(overlap)
m.append(overlaps)
return m
def _reduce_inverses(_args):
# replace consecutive negative powers by an inverse
# of a product of positive powers, e.g. a**-1*b**-1*c
# will simplify to (a*b)**-1*c;
# return that new args list and the number of negative
# powers in it (inv_tot)
inv_tot = 0 # total number of inverses
inverses = []
args = []
for arg in _args:
if isinstance(arg, _Pow) and arg.args[1] < 0:
inverses = [arg**-1] + inverses
inv_tot += 1
else:
if len(inverses) == 1:
args.append(inverses[0]**-1)
elif len(inverses) > 1:
args.append(_Pow(_Mul(*inverses), -1))
inv_tot -= len(inverses) - 1
inverses = []
args.append(arg)
if inverses:
args.append(_Pow(_Mul(*inverses), -1))
inv_tot -= len(inverses) - 1
return inv_tot, tuple(args)
def get_score(s):
# compute the number of arguments of s
# (including in nested expressions) overall
# but ignore exponents
if isinstance(s, _Pow):
return get_score(s.args[0])
elif isinstance(s, (_Add, _Mul)):
return sum([get_score(a) for a in s.args])
return 1
def compare(s, alt_s):
# compare two possible simplifications and return a
# "better" one
if s != alt_s and get_score(alt_s) < get_score(s):
return alt_s
return s
# ========================================================
if not isinstance(expr, (_Add, _Mul, _Pow)) or expr.is_commutative:
return expr
args = expr.args[:]
if isinstance(expr, _Pow):
if deep:
return _Pow(nc_simplify(args[0]), args[1]).doit()
else:
return expr
elif isinstance(expr, _Add):
return _Add(*[nc_simplify(a, deep=deep) for a in args]).doit()
else:
# get the non-commutative part
c_args, args = expr.args_cnc()
com_coeff = Mul(*c_args)
if com_coeff != 1:
return com_coeff*nc_simplify(expr/com_coeff, deep=deep)
inv_tot, args = _reduce_inverses(args)
# if most arguments are negative, work with the inverse
# of the expression, e.g. a**-1*b*a**-1*c**-1 will become
# (c*a*b**-1*a)**-1 at the end so can work with c*a*b**-1*a
invert = False
if inv_tot > len(args)/2:
invert = True
args = [a**-1 for a in args[::-1]]
if deep:
args = tuple(nc_simplify(a) for a in args)
m = _overlaps(args)
# simps will be {subterm: end} where `end` is the ending
# index of a sequence of repetitions of subterm;
# this is for not wasting time with subterms that are part
# of longer, already considered sequences
simps = {}
post = 1
pre = 1
# the simplification coefficient is the number of
# arguments by which contracting a given sequence
# would reduce the word; e.g. in a*b*a*b*c*a*b*c,
# contracting a*b*a*b to (a*b)**2 removes 3 arguments
# while a*b*c*a*b*c to (a*b*c)**2 removes 6. It's
# better to contract the latter so simplification
# with a maximum simplification coefficient will be chosen
max_simp_coeff = 0
simp = None # information about future simplification
for i in range(1, len(args)):
simp_coeff = 0
l = 0 # length of a subterm
p = 0 # the power of a subterm
if i < len(args) - 1:
rep = m[i][0]
start = i # starting index of the repeated sequence
end = i+1 # ending index of the repeated sequence
if i == len(args)-1 or rep == [0]:
# no subterm is repeated at this stage, at least as
# far as the arguments are concerned - there may be
# a repetition if powers are taken into account
if (isinstance(args[i], _Pow) and
not isinstance(args[i].args[0], _Symbol)):
subterm = args[i].args[0].args
l = len(subterm)
if args[i-l:i] == subterm:
# e.g. a*b in a*b*(a*b)**2 is not repeated
# in args (= [a, b, (a*b)**2]) but it
# can be matched here
p += 1
start -= l
if args[i+1:i+1+l] == subterm:
# e.g. a*b in (a*b)**2*a*b
p += 1
end += l
if p:
p += args[i].args[1]
else:
continue
else:
l = rep[0] # length of the longest repeated subterm at this point
start -= l - 1
subterm = args[start:end]
p = 2
end += l
if subterm in simps and simps[subterm] >= start:
# the subterm is part of a sequence that
# has already been considered
continue
# count how many times it's repeated
while end < len(args):
if l in m[end-1][0]:
p += 1
end += l
elif isinstance(args[end], _Pow) and args[end].args[0].args == subterm:
# for cases like a*b*a*b*(a*b)**2*a*b
p += args[end].args[1]
end += 1
else:
break
# see if another match can be made, e.g.
# for b*a**2 in b*a**2*b*a**3 or a*b in
# a**2*b*a*b
pre_exp = 0
pre_arg = 1
if start - l >= 0 and args[start-l+1:start] == subterm[1:]:
if isinstance(subterm[0], _Pow):
pre_arg = subterm[0].args[0]
exp = subterm[0].args[1]
else:
pre_arg = subterm[0]
exp = 1
if isinstance(args[start-l], _Pow) and args[start-l].args[0] == pre_arg:
pre_exp = args[start-l].args[1] - exp
start -= l
p += 1
elif args[start-l] == pre_arg:
pre_exp = 1 - exp
start -= l
p += 1
post_exp = 0
post_arg = 1
if end + l - 1 < len(args) and args[end:end+l-1] == subterm[:-1]:
if isinstance(subterm[-1], _Pow):
post_arg = subterm[-1].args[0]
exp = subterm[-1].args[1]
else:
post_arg = subterm[-1]
exp = 1
if isinstance(args[end+l-1], _Pow) and args[end+l-1].args[0] == post_arg:
post_exp = args[end+l-1].args[1] - exp
end += l
p += 1
elif args[end+l-1] == post_arg:
post_exp = 1 - exp
end += l
p += 1
# Consider a*b*a**2*b*a**2*b*a:
# b*a**2 is explicitly repeated, but note
# that in this case a*b*a is also repeated
# so there are two possible simplifications:
# a*(b*a**2)**3*a**-1 or (a*b*a)**3
# The latter is obviously simpler.
# But in a*b*a**2*b**2*a**2 the simplifications are
# a*(b*a**2)**2 and (a*b*a)**3*a in which case
# it's better to stick with the shorter subterm
if post_exp and exp % 2 == 0 and start > 0:
exp = exp/2
_pre_exp = 1
_post_exp = 1
if isinstance(args[start-1], _Pow) and args[start-1].args[0] == post_arg:
_post_exp = post_exp + exp
_pre_exp = args[start-1].args[1] - exp
elif args[start-1] == post_arg:
_post_exp = post_exp + exp
_pre_exp = 1 - exp
if _pre_exp == 0 or _post_exp == 0:
if not pre_exp:
start -= 1
post_exp = _post_exp
pre_exp = _pre_exp
pre_arg = post_arg
subterm = (post_arg**exp,) + subterm[:-1] + (post_arg**exp,)
simp_coeff += end-start
if post_exp:
simp_coeff -= 1
if pre_exp:
simp_coeff -= 1
simps[subterm] = end
if simp_coeff > max_simp_coeff:
max_simp_coeff = simp_coeff
simp = (start, _Mul(*subterm), p, end, l)
pre = pre_arg**pre_exp
post = post_arg**post_exp
if simp:
subterm = _Pow(nc_simplify(simp[1], deep=deep), simp[2])
pre = nc_simplify(_Mul(*args[:simp[0]])*pre, deep=deep)
post = post*nc_simplify(_Mul(*args[simp[3]:]), deep=deep)
simp = pre*subterm*post
if pre != 1 or post != 1:
# new simplifications may be possible but no need
# to recurse over arguments
simp = nc_simplify(simp, deep=False)
else:
simp = _Mul(*args)
if invert:
simp = _Pow(simp, -1)
# see if factor_nc(expr) is simplified better
if not isinstance(expr, MatrixExpr):
f_expr = factor_nc(expr)
if f_expr != expr:
alt_simp = nc_simplify(f_expr, deep=deep)
simp = compare(simp, alt_simp)
else:
simp = simp.doit(inv_expand=False)
return simp
def dotprodsimp(expr, withsimp=False):
"""Simplification for a sum of products targeted at the kind of blowup that
occurs during summation of products. Intended to reduce expression blowup
during matrix multiplication or other similar operations. Only works with
algebraic expressions and does not recurse into non.
Parameters
==========
withsimp : bool, optional
Specifies whether a flag should be returned along with the expression
to indicate roughly whether simplification was successful. It is used
in ``MatrixArithmetic._eval_pow_by_recursion`` to avoid attempting to
simplify an expression repetitively which does not simplify.
"""
def count_ops_alg(expr):
"""Optimized count algebraic operations with no recursion into
non-algebraic args that ``core.function.count_ops`` does. Also returns
whether rational functions may be present according to negative
exponents of powers or non-number fractions.
Returns
=======
ops, ratfunc : int, bool
``ops`` is the number of algebraic operations starting at the top
level expression (not recursing into non-alg children). ``ratfunc``
specifies whether the expression MAY contain rational functions
which ``cancel`` MIGHT optimize.
"""
ops = 0
args = [expr]
ratfunc = False
while args:
a = args.pop()
if not isinstance(a, Basic):
continue
if a.is_Rational:
if a is not S.One: # -1/3 = NEG + DIV
ops += bool (a.p < 0) + bool (a.q != 1)
elif a.is_Mul:
if _coeff_isneg(a):
ops += 1
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops += 1 + bool (n < 0)
args.append(d) # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ratfunc=True
ops += 1
args.append(n) # could be -Mul
else:
ops += len(a.args) - 1
args.extend(a.args)
elif a.is_Add:
laargs = len(a.args)
negs = 0
for ai in a.args:
if _coeff_isneg(ai):
negs += 1
ai = -ai
args.append(ai)
ops += laargs - (negs != laargs) # -x - y = NEG + SUB
elif a.is_Pow:
ops += 1
args.append(a.base)
if not ratfunc:
ratfunc = a.exp.is_negative is not False
return ops, ratfunc
def nonalg_subs_dummies(expr, dummies):
"""Substitute dummy variables for non-algebraic expressions to avoid
evaluation of non-algebraic terms that ``polys.polytools.cancel`` does.
"""
if not expr.args:
return expr
if expr.is_Add or expr.is_Mul or expr.is_Pow:
args = None
for i, a in enumerate(expr.args):
c = nonalg_subs_dummies(a, dummies)
if c is a:
continue
if args is None:
args = list(expr.args)
args[i] = c
if args is None:
return expr
return expr.func(*args)
return dummies.setdefault(expr, Dummy())
simplified = False # doesn't really mean simplified, rather "can simplify again"
if isinstance(expr, Basic) and (expr.is_Add or expr.is_Mul or expr.is_Pow):
expr2 = expr.expand(deep=True, modulus=None, power_base=False,
power_exp=False, mul=True, log=False, multinomial=True, basic=False)
if expr2 != expr:
expr = expr2
simplified = True
exprops, ratfunc = count_ops_alg(expr)
if exprops >= 6: # empirically tested cutoff for expensive simplification
if ratfunc:
dummies = {}
expr2 = nonalg_subs_dummies(expr, dummies)
if expr2 is expr or count_ops_alg(expr2)[0] >= 6: # check again after substitution
expr3 = cancel(expr2)
if expr3 != expr2:
expr = expr3.subs([(d, e) for e, d in dummies.items()])
simplified = True
# very special case: x/(x-1) - 1/(x-1) -> 1
elif (exprops == 5 and expr.is_Add and expr.args [0].is_Mul and
expr.args [1].is_Mul and expr.args [0].args [-1].is_Pow and
expr.args [1].args [-1].is_Pow and
expr.args [0].args [-1].exp is S.NegativeOne and
expr.args [1].args [-1].exp is S.NegativeOne):
expr2 = together (expr)
expr2ops = count_ops_alg(expr2)[0]
if expr2ops < exprops:
expr = expr2
simplified = True
else:
simplified = True
return (expr, simplified) if withsimp else expr
|
87332005ca78b461da90cbc74b0568b8a67818df30d6e7ffdb97e41f6294405f | """Tools for applying functions to specified parts of expressions. """
from sympy.core import sympify
def use(expr, func, level=0, args=(), kwargs={}):
"""
Use ``func`` to transform ``expr`` at the given level.
Examples
========
>>> from sympy import use, expand
>>> from sympy.abc import x, y
>>> f = (x + y)**2*x + 1
>>> use(f, expand, level=2)
x*(x**2 + 2*x*y + y**2) + 1
>>> expand(f)
x**3 + 2*x**2*y + x*y**2 + 1
"""
def _use(expr, level):
if not level:
return func(expr, *args, **kwargs)
else:
if expr.is_Atom:
return expr
else:
level -= 1
_args = []
for arg in expr.args:
_args.append(_use(arg, level))
return expr.__class__(*_args)
return _use(sympify(expr), level)
|
6c1b4a8bf7bd8ff6aa680c23c9ba36060aaba78928c502ea4ac98624b79c1988 | from collections import defaultdict
from sympy import SYMPY_DEBUG
from sympy.core import expand_power_base, sympify, Add, S, Mul, Derivative, Pow, symbols, expand_mul
from sympy.core.add import _unevaluated_Add
from sympy.core.compatibility import iterable, ordered, default_sort_key
from sympy.core.parameters import global_parameters
from sympy.core.exprtools import Factors, gcd_terms
from sympy.core.function import _mexpand
from sympy.core.mul import _keep_coeff, _unevaluated_Mul
from sympy.core.numbers import Rational
from sympy.functions import exp, sqrt, log
from sympy.functions.elementary.complexes import Abs
from sympy.polys import gcd
from sympy.simplify.sqrtdenest import sqrtdenest
def collect(expr, syms, func=None, evaluate=None, exact=False, distribute_order_term=True):
"""
Collect additive terms of an expression.
Explanation
===========
This function collects additive terms of an expression with respect
to a list of expression up to powers with rational exponents. By the
term symbol here are meant arbitrary expressions, which can contain
powers, products, sums etc. In other words symbol is a pattern which
will be searched for in the expression's terms.
The input expression is not expanded by :func:`collect`, so user is
expected to provide an expression is an appropriate form. This makes
:func:`collect` more predictable as there is no magic happening behind the
scenes. However, it is important to note, that powers of products are
converted to products of powers using the :func:`~.expand_power_base`
function.
There are two possible types of output. First, if ``evaluate`` flag is
set, this function will return an expression with collected terms or
else it will return a dictionary with expressions up to rational powers
as keys and collected coefficients as values.
Examples
========
>>> from sympy import S, collect, expand, factor, Wild
>>> from sympy.abc import a, b, c, x, y
This function can collect symbolic coefficients in polynomials or
rational expressions. It will manage to find all integer or rational
powers of collection variable::
>>> collect(a*x**2 + b*x**2 + a*x - b*x + c, x)
c + x**2*(a + b) + x*(a - b)
The same result can be achieved in dictionary form::
>>> d = collect(a*x**2 + b*x**2 + a*x - b*x + c, x, evaluate=False)
>>> d[x**2]
a + b
>>> d[x]
a - b
>>> d[S.One]
c
You can also work with multivariate polynomials. However, remember that
this function is greedy so it will care only about a single symbol at time,
in specification order::
>>> collect(x**2 + y*x**2 + x*y + y + a*y, [x, y])
x**2*(y + 1) + x*y + y*(a + 1)
Also more complicated expressions can be used as patterns::
>>> from sympy import sin, log
>>> collect(a*sin(2*x) + b*sin(2*x), sin(2*x))
(a + b)*sin(2*x)
>>> collect(a*x*log(x) + b*(x*log(x)), x*log(x))
x*(a + b)*log(x)
You can use wildcards in the pattern::
>>> w = Wild('w1')
>>> collect(a*x**y - b*x**y, w**y)
x**y*(a - b)
It is also possible to work with symbolic powers, although it has more
complicated behavior, because in this case power's base and symbolic part
of the exponent are treated as a single symbol::
>>> collect(a*x**c + b*x**c, x)
a*x**c + b*x**c
>>> collect(a*x**c + b*x**c, x**c)
x**c*(a + b)
However if you incorporate rationals to the exponents, then you will get
well known behavior::
>>> collect(a*x**(2*c) + b*x**(2*c), x**c)
x**(2*c)*(a + b)
Note also that all previously stated facts about :func:`collect` function
apply to the exponential function, so you can get::
>>> from sympy import exp
>>> collect(a*exp(2*x) + b*exp(2*x), exp(x))
(a + b)*exp(2*x)
If you are interested only in collecting specific powers of some symbols
then set ``exact`` flag in arguments::
>>> collect(a*x**7 + b*x**7, x, exact=True)
a*x**7 + b*x**7
>>> collect(a*x**7 + b*x**7, x**7, exact=True)
x**7*(a + b)
You can also apply this function to differential equations, where
derivatives of arbitrary order can be collected. Note that if you
collect with respect to a function or a derivative of a function, all
derivatives of that function will also be collected. Use
``exact=True`` to prevent this from happening::
>>> from sympy import Derivative as D, collect, Function
>>> f = Function('f') (x)
>>> collect(a*D(f,x) + b*D(f,x), D(f,x))
(a + b)*Derivative(f(x), x)
>>> collect(a*D(D(f,x),x) + b*D(D(f,x),x), f)
(a + b)*Derivative(f(x), (x, 2))
>>> collect(a*D(D(f,x),x) + b*D(D(f,x),x), D(f,x), exact=True)
a*Derivative(f(x), (x, 2)) + b*Derivative(f(x), (x, 2))
>>> collect(a*D(f,x) + b*D(f,x) + a*f + b*f, f)
(a + b)*f(x) + (a + b)*Derivative(f(x), x)
Or you can even match both derivative order and exponent at the same time::
>>> collect(a*D(D(f,x),x)**2 + b*D(D(f,x),x)**2, D(f,x))
(a + b)*Derivative(f(x), (x, 2))**2
Finally, you can apply a function to each of the collected coefficients.
For example you can factorize symbolic coefficients of polynomial::
>>> f = expand((x + a + 1)**3)
>>> collect(f, x, factor)
x**3 + 3*x**2*(a + 1) + 3*x*(a + 1)**2 + (a + 1)**3
.. note:: Arguments are expected to be in expanded form, so you might have
to call :func:`~.expand` prior to calling this function.
See Also
========
collect_const, collect_sqrt, rcollect
"""
from sympy.core.assumptions import assumptions
from sympy.utilities.iterables import sift
from sympy.core.symbol import Dummy, Wild
expr = sympify(expr)
syms = [sympify(i) for i in (syms if iterable(syms) else [syms])]
# replace syms[i] if it is not x, -x or has Wild symbols
cond = lambda x: x.is_Symbol or (-x).is_Symbol or bool(
x.atoms(Wild))
_, nonsyms = sift(syms, cond, binary=True)
if nonsyms:
reps = dict(zip(nonsyms, [Dummy(**assumptions(i)) for i in nonsyms]))
syms = [reps.get(s, s) for s in syms]
rv = collect(expr.subs(reps), syms,
func=func, evaluate=evaluate, exact=exact,
distribute_order_term=distribute_order_term)
urep = {v: k for k, v in reps.items()}
if not isinstance(rv, dict):
return rv.xreplace(urep)
else:
return {urep.get(k, k).xreplace(urep): v.xreplace(urep)
for k, v in rv.items()}
if evaluate is None:
evaluate = global_parameters.evaluate
def make_expression(terms):
product = []
for term, rat, sym, deriv in terms:
if deriv is not None:
var, order = deriv
while order > 0:
term, order = Derivative(term, var), order - 1
if sym is None:
if rat is S.One:
product.append(term)
else:
product.append(Pow(term, rat))
else:
product.append(Pow(term, rat*sym))
return Mul(*product)
def parse_derivative(deriv):
# scan derivatives tower in the input expression and return
# underlying function and maximal differentiation order
expr, sym, order = deriv.expr, deriv.variables[0], 1
for s in deriv.variables[1:]:
if s == sym:
order += 1
else:
raise NotImplementedError(
'Improve MV Derivative support in collect')
while isinstance(expr, Derivative):
s0 = expr.variables[0]
for s in expr.variables:
if s != s0:
raise NotImplementedError(
'Improve MV Derivative support in collect')
if s0 == sym:
expr, order = expr.expr, order + len(expr.variables)
else:
break
return expr, (sym, Rational(order))
def parse_term(expr):
"""Parses expression expr and outputs tuple (sexpr, rat_expo,
sym_expo, deriv)
where:
- sexpr is the base expression
- rat_expo is the rational exponent that sexpr is raised to
- sym_expo is the symbolic exponent that sexpr is raised to
- deriv contains the derivatives the the expression
For example, the output of x would be (x, 1, None, None)
the output of 2**x would be (2, 1, x, None).
"""
rat_expo, sym_expo = S.One, None
sexpr, deriv = expr, None
if expr.is_Pow:
if isinstance(expr.base, Derivative):
sexpr, deriv = parse_derivative(expr.base)
else:
sexpr = expr.base
if expr.exp.is_Number:
rat_expo = expr.exp
else:
coeff, tail = expr.exp.as_coeff_Mul()
if coeff.is_Number:
rat_expo, sym_expo = coeff, tail
else:
sym_expo = expr.exp
elif isinstance(expr, exp):
arg = expr.args[0]
if arg.is_Rational:
sexpr, rat_expo = S.Exp1, arg
elif arg.is_Mul:
coeff, tail = arg.as_coeff_Mul(rational=True)
sexpr, rat_expo = exp(tail), coeff
elif isinstance(expr, Derivative):
sexpr, deriv = parse_derivative(expr)
return sexpr, rat_expo, sym_expo, deriv
def parse_expression(terms, pattern):
"""Parse terms searching for a pattern.
Terms is a list of tuples as returned by parse_terms;
Pattern is an expression treated as a product of factors.
"""
pattern = Mul.make_args(pattern)
if len(terms) < len(pattern):
# pattern is longer than matched product
# so no chance for positive parsing result
return None
else:
pattern = [parse_term(elem) for elem in pattern]
terms = terms[:] # need a copy
elems, common_expo, has_deriv = [], None, False
for elem, e_rat, e_sym, e_ord in pattern:
if elem.is_Number and e_rat == 1 and e_sym is None:
# a constant is a match for everything
continue
for j in range(len(terms)):
if terms[j] is None:
continue
term, t_rat, t_sym, t_ord = terms[j]
# keeping track of whether one of the terms had
# a derivative or not as this will require rebuilding
# the expression later
if t_ord is not None:
has_deriv = True
if (term.match(elem) is not None and
(t_sym == e_sym or t_sym is not None and
e_sym is not None and
t_sym.match(e_sym) is not None)):
if exact is False:
# we don't have to be exact so find common exponent
# for both expression's term and pattern's element
expo = t_rat / e_rat
if common_expo is None:
# first time
common_expo = expo
else:
# common exponent was negotiated before so
# there is no chance for a pattern match unless
# common and current exponents are equal
if common_expo != expo:
common_expo = 1
else:
# we ought to be exact so all fields of
# interest must match in every details
if e_rat != t_rat or e_ord != t_ord:
continue
# found common term so remove it from the expression
# and try to match next element in the pattern
elems.append(terms[j])
terms[j] = None
break
else:
# pattern element not found
return None
return [_f for _f in terms if _f], elems, common_expo, has_deriv
if evaluate:
if expr.is_Add:
o = expr.getO() or 0
expr = expr.func(*[
collect(a, syms, func, True, exact, distribute_order_term)
for a in expr.args if a != o]) + o
elif expr.is_Mul:
return expr.func(*[
collect(term, syms, func, True, exact, distribute_order_term)
for term in expr.args])
elif expr.is_Pow:
b = collect(
expr.base, syms, func, True, exact, distribute_order_term)
return Pow(b, expr.exp)
syms = [expand_power_base(i, deep=False) for i in syms]
order_term = None
if distribute_order_term:
order_term = expr.getO()
if order_term is not None:
if order_term.has(*syms):
order_term = None
else:
expr = expr.removeO()
summa = [expand_power_base(i, deep=False) for i in Add.make_args(expr)]
collected, disliked = defaultdict(list), S.Zero
for product in summa:
c, nc = product.args_cnc(split_1=False)
args = list(ordered(c)) + nc
terms = [parse_term(i) for i in args]
small_first = True
for symbol in syms:
if SYMPY_DEBUG:
print("DEBUG: parsing of expression %s with symbol %s " % (
str(terms), str(symbol))
)
if isinstance(symbol, Derivative) and small_first:
terms = list(reversed(terms))
small_first = not small_first
result = parse_expression(terms, symbol)
if SYMPY_DEBUG:
print("DEBUG: returned %s" % str(result))
if result is not None:
if not symbol.is_commutative:
raise AttributeError("Can not collect noncommutative symbol")
terms, elems, common_expo, has_deriv = result
# when there was derivative in current pattern we
# will need to rebuild its expression from scratch
if not has_deriv:
margs = []
for elem in elems:
if elem[2] is None:
e = elem[1]
else:
e = elem[1]*elem[2]
margs.append(Pow(elem[0], e))
index = Mul(*margs)
else:
index = make_expression(elems)
terms = expand_power_base(make_expression(terms), deep=False)
index = expand_power_base(index, deep=False)
collected[index].append(terms)
break
else:
# none of the patterns matched
disliked += product
# add terms now for each key
collected = {k: Add(*v) for k, v in collected.items()}
if disliked is not S.Zero:
collected[S.One] = disliked
if order_term is not None:
for key, val in collected.items():
collected[key] = val + order_term
if func is not None:
collected = {
key: func(val) for key, val in collected.items()}
if evaluate:
return Add(*[key*val for key, val in collected.items()])
else:
return collected
def rcollect(expr, *vars):
"""
Recursively collect sums in an expression.
Examples
========
>>> from sympy.simplify import rcollect
>>> from sympy.abc import x, y
>>> expr = (x**2*y + x*y + x + y)/(x + y)
>>> rcollect(expr, y)
(x + y*(x**2 + x + 1))/(x + y)
See Also
========
collect, collect_const, collect_sqrt
"""
if expr.is_Atom or not expr.has(*vars):
return expr
else:
expr = expr.__class__(*[rcollect(arg, *vars) for arg in expr.args])
if expr.is_Add:
return collect(expr, vars)
else:
return expr
def collect_sqrt(expr, evaluate=None):
"""Return expr with terms having common square roots collected together.
If ``evaluate`` is False a count indicating the number of sqrt-containing
terms will be returned and, if non-zero, the terms of the Add will be
returned, else the expression itself will be returned as a single term.
If ``evaluate`` is True, the expression with any collected terms will be
returned.
Note: since I = sqrt(-1), it is collected, too.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import collect_sqrt
>>> from sympy.abc import a, b
>>> r2, r3, r5 = [sqrt(i) for i in [2, 3, 5]]
>>> collect_sqrt(a*r2 + b*r2)
sqrt(2)*(a + b)
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r3)
sqrt(2)*(a + b) + sqrt(3)*(a + b)
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r5)
sqrt(3)*a + sqrt(5)*b + sqrt(2)*(a + b)
If evaluate is False then the arguments will be sorted and
returned as a list and a count of the number of sqrt-containing
terms will be returned:
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r5, evaluate=False)
((sqrt(3)*a, sqrt(5)*b, sqrt(2)*(a + b)), 3)
>>> collect_sqrt(a*sqrt(2) + b, evaluate=False)
((b, sqrt(2)*a), 1)
>>> collect_sqrt(a + b, evaluate=False)
((a + b,), 0)
See Also
========
collect, collect_const, rcollect
"""
if evaluate is None:
evaluate = global_parameters.evaluate
# this step will help to standardize any complex arguments
# of sqrts
coeff, expr = expr.as_content_primitive()
vars = set()
for a in Add.make_args(expr):
for m in a.args_cnc()[0]:
if m.is_number and (
m.is_Pow and m.exp.is_Rational and m.exp.q == 2 or
m is S.ImaginaryUnit):
vars.add(m)
# we only want radicals, so exclude Number handling; in this case
# d will be evaluated
d = collect_const(expr, *vars, Numbers=False)
hit = expr != d
if not evaluate:
nrad = 0
# make the evaluated args canonical
args = list(ordered(Add.make_args(d)))
for i, m in enumerate(args):
c, nc = m.args_cnc()
for ci in c:
# XXX should this be restricted to ci.is_number as above?
if ci.is_Pow and ci.exp.is_Rational and ci.exp.q == 2 or \
ci is S.ImaginaryUnit:
nrad += 1
break
args[i] *= coeff
if not (hit or nrad):
args = [Add(*args)]
return tuple(args), nrad
return coeff*d
def collect_abs(expr):
"""Return ``expr`` with arguments of multiple Abs in a term collected
under a single instance.
Examples
========
>>> from sympy.simplify.radsimp import collect_abs
>>> from sympy.abc import x
>>> collect_abs(abs(x + 1)/abs(x**2 - 1))
Abs((x + 1)/(x**2 - 1))
>>> collect_abs(abs(1/x))
Abs(1/x)
"""
def _abs(mul):
from sympy.core.mul import _mulsort
c, nc = mul.args_cnc()
a = []
o = []
for i in c:
if isinstance(i, Abs):
a.append(i.args[0])
elif isinstance(i, Pow) and isinstance(i.base, Abs) and i.exp.is_real:
a.append(i.base.args[0]**i.exp)
else:
o.append(i)
if len(a) < 2 and not any(i.exp.is_negative for i in a if isinstance(i, Pow)):
return mul
absarg = Mul(*a)
A = Abs(absarg)
args = [A]
args.extend(o)
if not A.has(Abs):
args.extend(nc)
return Mul(*args)
if not isinstance(A, Abs):
# reevaluate and make it unevaluated
A = Abs(absarg, evaluate=False)
args[0] = A
_mulsort(args)
args.extend(nc) # nc always go last
return Mul._from_args(args, is_commutative=not nc)
return expr.replace(
lambda x: isinstance(x, Mul),
lambda x: _abs(x)).replace(
lambda x: isinstance(x, Pow),
lambda x: _abs(x))
def collect_const(expr, *vars, Numbers=True):
"""A non-greedy collection of terms with similar number coefficients in
an Add expr. If ``vars`` is given then only those constants will be
targeted. Although any Number can also be targeted, if this is not
desired set ``Numbers=False`` and no Float or Rational will be collected.
Parameters
==========
expr : sympy expression
This parameter defines the expression the expression from which
terms with similar coefficients are to be collected. A non-Add
expression is returned as it is.
vars : variable length collection of Numbers, optional
Specifies the constants to target for collection. Can be multiple in
number.
Numbers : bool
Specifies to target all instance of
:class:`sympy.core.numbers.Number` class. If ``Numbers=False``, then
no Float or Rational will be collected.
Returns
=======
expr : Expr
Returns an expression with similar coefficient terms collected.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import s, x, y, z
>>> from sympy.simplify.radsimp import collect_const
>>> collect_const(sqrt(3) + sqrt(3)*(1 + sqrt(2)))
sqrt(3)*(sqrt(2) + 2)
>>> collect_const(sqrt(3)*s + sqrt(7)*s + sqrt(3) + sqrt(7))
(sqrt(3) + sqrt(7))*(s + 1)
>>> s = sqrt(2) + 2
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7))
(sqrt(2) + 3)*(sqrt(3) + sqrt(7))
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7), sqrt(3))
sqrt(7) + sqrt(3)*(sqrt(2) + 3) + sqrt(7)*(sqrt(2) + 2)
The collection is sign-sensitive, giving higher precedence to the
unsigned values:
>>> collect_const(x - y - z)
x - (y + z)
>>> collect_const(-y - z)
-(y + z)
>>> collect_const(2*x - 2*y - 2*z, 2)
2*(x - y - z)
>>> collect_const(2*x - 2*y - 2*z, -2)
2*x - 2*(y + z)
See Also
========
collect, collect_sqrt, rcollect
"""
if not expr.is_Add:
return expr
recurse = False
if not vars:
recurse = True
vars = set()
for a in expr.args:
for m in Mul.make_args(a):
if m.is_number:
vars.add(m)
else:
vars = sympify(vars)
if not Numbers:
vars = [v for v in vars if not v.is_Number]
vars = list(ordered(vars))
for v in vars:
terms = defaultdict(list)
Fv = Factors(v)
for m in Add.make_args(expr):
f = Factors(m)
q, r = f.div(Fv)
if r.is_one:
# only accept this as a true factor if
# it didn't change an exponent from an Integer
# to a non-Integer, e.g. 2/sqrt(2) -> sqrt(2)
# -- we aren't looking for this sort of change
fwas = f.factors.copy()
fnow = q.factors
if not any(k in fwas and fwas[k].is_Integer and not
fnow[k].is_Integer for k in fnow):
terms[v].append(q.as_expr())
continue
terms[S.One].append(m)
args = []
hit = False
uneval = False
for k in ordered(terms):
v = terms[k]
if k is S.One:
args.extend(v)
continue
if len(v) > 1:
v = Add(*v)
hit = True
if recurse and v != expr:
vars.append(v)
else:
v = v[0]
# be careful not to let uneval become True unless
# it must be because it's going to be more expensive
# to rebuild the expression as an unevaluated one
if Numbers and k.is_Number and v.is_Add:
args.append(_keep_coeff(k, v, sign=True))
uneval = True
else:
args.append(k*v)
if hit:
if uneval:
expr = _unevaluated_Add(*args)
else:
expr = Add(*args)
if not expr.is_Add:
break
return expr
def radsimp(expr, symbolic=True, max_terms=4):
r"""
Rationalize the denominator by removing square roots.
Explanation
===========
The expression returned from radsimp must be used with caution
since if the denominator contains symbols, it will be possible to make
substitutions that violate the assumptions of the simplification process:
that for a denominator matching a + b*sqrt(c), a != +/-b*sqrt(c). (If
there are no symbols, this assumptions is made valid by collecting terms
of sqrt(c) so the match variable ``a`` does not contain ``sqrt(c)``.) If
you do not want the simplification to occur for symbolic denominators, set
``symbolic`` to False.
If there are more than ``max_terms`` radical terms then the expression is
returned unchanged.
Examples
========
>>> from sympy import radsimp, sqrt, Symbol, pprint
>>> from sympy import factor_terms, fraction, signsimp
>>> from sympy.simplify.radsimp import collect_sqrt
>>> from sympy.abc import a, b, c
>>> radsimp(1/(2 + sqrt(2)))
(2 - sqrt(2))/2
>>> x,y = map(Symbol, 'xy')
>>> e = ((2 + 2*sqrt(2))*x + (2 + sqrt(8))*y)/(2 + sqrt(2))
>>> radsimp(e)
sqrt(2)*(x + y)
No simplification beyond removal of the gcd is done. One might
want to polish the result a little, however, by collecting
square root terms:
>>> r2 = sqrt(2)
>>> r5 = sqrt(5)
>>> ans = radsimp(1/(y*r2 + x*r2 + a*r5 + b*r5)); pprint(ans)
___ ___ ___ ___
\/ 5 *a + \/ 5 *b - \/ 2 *x - \/ 2 *y
------------------------------------------
2 2 2 2
5*a + 10*a*b + 5*b - 2*x - 4*x*y - 2*y
>>> n, d = fraction(ans)
>>> pprint(factor_terms(signsimp(collect_sqrt(n))/d, radical=True))
___ ___
\/ 5 *(a + b) - \/ 2 *(x + y)
------------------------------------------
2 2 2 2
5*a + 10*a*b + 5*b - 2*x - 4*x*y - 2*y
If radicals in the denominator cannot be removed or there is no denominator,
the original expression will be returned.
>>> radsimp(sqrt(2)*x + sqrt(2))
sqrt(2)*x + sqrt(2)
Results with symbols will not always be valid for all substitutions:
>>> eq = 1/(a + b*sqrt(c))
>>> eq.subs(a, b*sqrt(c))
1/(2*b*sqrt(c))
>>> radsimp(eq).subs(a, b*sqrt(c))
nan
If ``symbolic=False``, symbolic denominators will not be transformed (but
numeric denominators will still be processed):
>>> radsimp(eq, symbolic=False)
1/(a + b*sqrt(c))
"""
from sympy.simplify.simplify import signsimp
syms = symbols("a:d A:D")
def _num(rterms):
# return the multiplier that will simplify the expression described
# by rterms [(sqrt arg, coeff), ... ]
a, b, c, d, A, B, C, D = syms
if len(rterms) == 2:
reps = dict(list(zip([A, a, B, b], [j for i in rterms for j in i])))
return (
sqrt(A)*a - sqrt(B)*b).xreplace(reps)
if len(rterms) == 3:
reps = dict(list(zip([A, a, B, b, C, c], [j for i in rterms for j in i])))
return (
(sqrt(A)*a + sqrt(B)*b - sqrt(C)*c)*(2*sqrt(A)*sqrt(B)*a*b - A*a**2 -
B*b**2 + C*c**2)).xreplace(reps)
elif len(rterms) == 4:
reps = dict(list(zip([A, a, B, b, C, c, D, d], [j for i in rterms for j in i])))
return ((sqrt(A)*a + sqrt(B)*b - sqrt(C)*c - sqrt(D)*d)*(2*sqrt(A)*sqrt(B)*a*b
- A*a**2 - B*b**2 - 2*sqrt(C)*sqrt(D)*c*d + C*c**2 +
D*d**2)*(-8*sqrt(A)*sqrt(B)*sqrt(C)*sqrt(D)*a*b*c*d + A**2*a**4 -
2*A*B*a**2*b**2 - 2*A*C*a**2*c**2 - 2*A*D*a**2*d**2 + B**2*b**4 -
2*B*C*b**2*c**2 - 2*B*D*b**2*d**2 + C**2*c**4 - 2*C*D*c**2*d**2 +
D**2*d**4)).xreplace(reps)
elif len(rterms) == 1:
return sqrt(rterms[0][0])
else:
raise NotImplementedError
def ispow2(d, log2=False):
if not d.is_Pow:
return False
e = d.exp
if e.is_Rational and e.q == 2 or symbolic and denom(e) == 2:
return True
if log2:
q = 1
if e.is_Rational:
q = e.q
elif symbolic:
d = denom(e)
if d.is_Integer:
q = d
if q != 1 and log(q, 2).is_Integer:
return True
return False
def handle(expr):
# Handle first reduces to the case
# expr = 1/d, where d is an add, or d is base**p/2.
# We do this by recursively calling handle on each piece.
from sympy.simplify.simplify import nsimplify
n, d = fraction(expr)
if expr.is_Atom or (d.is_Atom and n.is_Atom):
return expr
elif not n.is_Atom:
n = n.func(*[handle(a) for a in n.args])
return _unevaluated_Mul(n, handle(1/d))
elif n is not S.One:
return _unevaluated_Mul(n, handle(1/d))
elif d.is_Mul:
return _unevaluated_Mul(*[handle(1/d) for d in d.args])
# By this step, expr is 1/d, and d is not a mul.
if not symbolic and d.free_symbols:
return expr
if ispow2(d):
d2 = sqrtdenest(sqrt(d.base))**numer(d.exp)
if d2 != d:
return handle(1/d2)
elif d.is_Pow and (d.exp.is_integer or d.base.is_positive):
# (1/d**i) = (1/d)**i
return handle(1/d.base)**d.exp
if not (d.is_Add or ispow2(d)):
return 1/d.func(*[handle(a) for a in d.args])
# handle 1/d treating d as an Add (though it may not be)
keep = True # keep changes that are made
# flatten it and collect radicals after checking for special
# conditions
d = _mexpand(d)
# did it change?
if d.is_Atom:
return 1/d
# is it a number that might be handled easily?
if d.is_number:
_d = nsimplify(d)
if _d.is_Number and _d.equals(d):
return 1/_d
while True:
# collect similar terms
collected = defaultdict(list)
for m in Add.make_args(d): # d might have become non-Add
p2 = []
other = []
for i in Mul.make_args(m):
if ispow2(i, log2=True):
p2.append(i.base if i.exp is S.Half else i.base**(2*i.exp))
elif i is S.ImaginaryUnit:
p2.append(S.NegativeOne)
else:
other.append(i)
collected[tuple(ordered(p2))].append(Mul(*other))
rterms = list(ordered(list(collected.items())))
rterms = [(Mul(*i), Add(*j)) for i, j in rterms]
nrad = len(rterms) - (1 if rterms[0][0] is S.One else 0)
if nrad < 1:
break
elif nrad > max_terms:
# there may have been invalid operations leading to this point
# so don't keep changes, e.g. this expression is troublesome
# in collecting terms so as not to raise the issue of 2834:
# r = sqrt(sqrt(5) + 5)
# eq = 1/(sqrt(5)*r + 2*sqrt(5)*sqrt(-sqrt(5) + 5) + 5*r)
keep = False
break
if len(rterms) > 4:
# in general, only 4 terms can be removed with repeated squaring
# but other considerations can guide selection of radical terms
# so that radicals are removed
if all([x.is_Integer and (y**2).is_Rational for x, y in rterms]):
nd, d = rad_rationalize(S.One, Add._from_args(
[sqrt(x)*y for x, y in rterms]))
n *= nd
else:
# is there anything else that might be attempted?
keep = False
break
from sympy.simplify.powsimp import powsimp, powdenest
num = powsimp(_num(rterms))
n *= num
d *= num
d = powdenest(_mexpand(d), force=symbolic)
if d.is_Atom:
break
if not keep:
return expr
return _unevaluated_Mul(n, 1/d)
coeff, expr = expr.as_coeff_Add()
expr = expr.normal()
old = fraction(expr)
n, d = fraction(handle(expr))
if old != (n, d):
if not d.is_Atom:
was = (n, d)
n = signsimp(n, evaluate=False)
d = signsimp(d, evaluate=False)
u = Factors(_unevaluated_Mul(n, 1/d))
u = _unevaluated_Mul(*[k**v for k, v in u.factors.items()])
n, d = fraction(u)
if old == (n, d):
n, d = was
n = expand_mul(n)
if d.is_Number or d.is_Add:
n2, d2 = fraction(gcd_terms(_unevaluated_Mul(n, 1/d)))
if d2.is_Number or (d2.count_ops() <= d.count_ops()):
n, d = [signsimp(i) for i in (n2, d2)]
if n.is_Mul and n.args[0].is_Number:
n = n.func(*n.args)
return coeff + _unevaluated_Mul(n, 1/d)
def rad_rationalize(num, den):
"""
Rationalize ``num/den`` by removing square roots in the denominator;
num and den are sum of terms whose squares are positive rationals.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import rad_rationalize
>>> rad_rationalize(sqrt(3), 1 + sqrt(2)/3)
(-sqrt(3) + sqrt(6)/3, -7/9)
"""
if not den.is_Add:
return num, den
g, a, b = split_surds(den)
a = a*sqrt(g)
num = _mexpand((a - b)*num)
den = _mexpand(a**2 - b**2)
return rad_rationalize(num, den)
def fraction(expr, exact=False):
"""Returns a pair with expression's numerator and denominator.
If the given expression is not a fraction then this function
will return the tuple (expr, 1).
This function will not make any attempt to simplify nested
fractions or to do any term rewriting at all.
If only one of the numerator/denominator pair is needed then
use numer(expr) or denom(expr) functions respectively.
>>> from sympy import fraction, Rational, Symbol
>>> from sympy.abc import x, y
>>> fraction(x/y)
(x, y)
>>> fraction(x)
(x, 1)
>>> fraction(1/y**2)
(1, y**2)
>>> fraction(x*y/2)
(x*y, 2)
>>> fraction(Rational(1, 2))
(1, 2)
This function will also work fine with assumptions:
>>> k = Symbol('k', negative=True)
>>> fraction(x * y**k)
(x, y**(-k))
If we know nothing about sign of some exponent and ``exact``
flag is unset, then structure this exponent's structure will
be analyzed and pretty fraction will be returned:
>>> from sympy import exp, Mul
>>> fraction(2*x**(-y))
(2, x**y)
>>> fraction(exp(-x))
(1, exp(x))
>>> fraction(exp(-x), exact=True)
(exp(-x), 1)
The ``exact`` flag will also keep any unevaluated Muls from
being evaluated:
>>> u = Mul(2, x + 1, evaluate=False)
>>> fraction(u)
(2*x + 2, 1)
>>> fraction(u, exact=True)
(2*(x + 1), 1)
"""
expr = sympify(expr)
numer, denom = [], []
for term in Mul.make_args(expr):
if term.is_commutative and (term.is_Pow or isinstance(term, exp)):
b, ex = term.as_base_exp()
if ex.is_negative:
if ex is S.NegativeOne:
denom.append(b)
elif exact:
if ex.is_constant():
denom.append(Pow(b, -ex))
else:
numer.append(term)
else:
denom.append(Pow(b, -ex))
elif ex.is_positive:
numer.append(term)
elif not exact and ex.is_Mul:
n, d = term.as_numer_denom()
if n != 1:
numer.append(n)
denom.append(d)
else:
numer.append(term)
elif term.is_Rational and not term.is_Integer:
if term.p != 1:
numer.append(term.p)
denom.append(term.q)
else:
numer.append(term)
return Mul(*numer, evaluate=not exact), Mul(*denom, evaluate=not exact)
def numer(expr):
return fraction(expr)[0]
def denom(expr):
return fraction(expr)[1]
def fraction_expand(expr, **hints):
return expr.expand(frac=True, **hints)
def numer_expand(expr, **hints):
a, b = fraction(expr)
return a.expand(numer=True, **hints) / b
def denom_expand(expr, **hints):
a, b = fraction(expr)
return a / b.expand(denom=True, **hints)
expand_numer = numer_expand
expand_denom = denom_expand
expand_fraction = fraction_expand
def split_surds(expr):
"""
Split an expression with terms whose squares are positive rationals
into a sum of terms whose surds squared have gcd equal to g
and a sum of terms with surds squared prime with g.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import split_surds
>>> split_surds(3*sqrt(3) + sqrt(5)/7 + sqrt(6) + sqrt(10) + sqrt(15))
(3, sqrt(2) + sqrt(5) + 3, sqrt(5)/7 + sqrt(10))
"""
args = sorted(expr.args, key=default_sort_key)
coeff_muls = [x.as_coeff_Mul() for x in args]
surds = [x[1]**2 for x in coeff_muls if x[1].is_Pow]
surds.sort(key=default_sort_key)
g, b1, b2 = _split_gcd(*surds)
g2 = g
if not b2 and len(b1) >= 2:
b1n = [x/g for x in b1]
b1n = [x for x in b1n if x != 1]
# only a common factor has been factored; split again
g1, b1n, b2 = _split_gcd(*b1n)
g2 = g*g1
a1v, a2v = [], []
for c, s in coeff_muls:
if s.is_Pow and s.exp == S.Half:
s1 = s.base
if s1 in b1:
a1v.append(c*sqrt(s1/g2))
else:
a2v.append(c*s)
else:
a2v.append(c*s)
a = Add(*a1v)
b = Add(*a2v)
return g2, a, b
def _split_gcd(*a):
"""
Split the list of integers ``a`` into a list of integers, ``a1`` having
``g = gcd(a1)``, and a list ``a2`` whose elements are not divisible by
``g``. Returns ``g, a1, a2``.
Examples
========
>>> from sympy.simplify.radsimp import _split_gcd
>>> _split_gcd(55, 35, 22, 14, 77, 10)
(5, [55, 35, 10], [22, 14, 77])
"""
g = a[0]
b1 = [g]
b2 = []
for x in a[1:]:
g1 = gcd(g, x)
if g1 == 1:
b2.append(x)
else:
g = g1
b1.append(x)
return g, b1, b2
|
35102055e5581845da44d59a64f8df8ca3602e598978cfa2698213b7e62a5d2b | """Tools for manipulation of expressions using paths. """
from sympy.core import Basic
class EPath:
r"""
Manipulate expressions using paths.
EPath grammar in EBNF notation::
literal ::= /[A-Za-z_][A-Za-z_0-9]*/
number ::= /-?\d+/
type ::= literal
attribute ::= literal "?"
all ::= "*"
slice ::= "[" number? (":" number? (":" number?)?)? "]"
range ::= all | slice
query ::= (type | attribute) ("|" (type | attribute))*
selector ::= range | query range?
path ::= "/" selector ("/" selector)*
See the docstring of the epath() function.
"""
__slots__ = ("_path", "_epath")
def __new__(cls, path):
"""Construct new EPath. """
if isinstance(path, EPath):
return path
if not path:
raise ValueError("empty EPath")
_path = path
if path[0] == '/':
path = path[1:]
else:
raise NotImplementedError("non-root EPath")
epath = []
for selector in path.split('/'):
selector = selector.strip()
if not selector:
raise ValueError("empty selector")
index = 0
for c in selector:
if c.isalnum() or c == '_' or c == '|' or c == '?':
index += 1
else:
break
attrs = []
types = []
if index:
elements = selector[:index]
selector = selector[index:]
for element in elements.split('|'):
element = element.strip()
if not element:
raise ValueError("empty element")
if element.endswith('?'):
attrs.append(element[:-1])
else:
types.append(element)
span = None
if selector == '*':
pass
else:
if selector.startswith('['):
try:
i = selector.index(']')
except ValueError:
raise ValueError("expected ']', got EOL")
_span, span = selector[1:i], []
if ':' not in _span:
span = int(_span)
else:
for elt in _span.split(':', 3):
if not elt:
span.append(None)
else:
span.append(int(elt))
span = slice(*span)
selector = selector[i + 1:]
if selector:
raise ValueError("trailing characters in selector")
epath.append((attrs, types, span))
obj = object.__new__(cls)
obj._path = _path
obj._epath = epath
return obj
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._path)
def _get_ordered_args(self, expr):
"""Sort ``expr.args`` using printing order. """
if expr.is_Add:
return expr.as_ordered_terms()
elif expr.is_Mul:
return expr.as_ordered_factors()
else:
return expr.args
def _hasattrs(self, expr, attrs):
"""Check if ``expr`` has any of ``attrs``. """
for attr in attrs:
if not hasattr(expr, attr):
return False
return True
def _hastypes(self, expr, types):
"""Check if ``expr`` is any of ``types``. """
_types = [ cls.__name__ for cls in expr.__class__.mro() ]
return bool(set(_types).intersection(types))
def _has(self, expr, attrs, types):
"""Apply ``_hasattrs`` and ``_hastypes`` to ``expr``. """
if not (attrs or types):
return True
if attrs and self._hasattrs(expr, attrs):
return True
if types and self._hastypes(expr, types):
return True
return False
def apply(self, expr, func, args=None, kwargs=None):
"""
Modify parts of an expression selected by a path.
Examples
========
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.apply(expr, lambda expr: expr**2)
[((x**2, 1), 2), ((3, y**2), z)]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.apply(expr, lambda expr: 2*expr)
t + sin(2*x + 1) + cos(2*x + 2*y + E)
"""
def _apply(path, expr, func):
if not path:
return func(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
if not expr.is_Atom:
args, basic = self._get_ordered_args(expr), True
else:
return expr
elif hasattr(expr, '__iter__'):
args, basic = expr, False
else:
return expr
args = list(args)
if span is not None:
if type(span) == slice:
indices = range(*span.indices(len(args)))
else:
indices = [span]
else:
indices = range(len(args))
for i in indices:
try:
arg = args[i]
except IndexError:
continue
if self._has(arg, attrs, types):
args[i] = _apply(path, arg, func)
if basic:
return expr.func(*args)
else:
return expr.__class__(args)
_args, _kwargs = args or (), kwargs or {}
_func = lambda expr: func(expr, *_args, **_kwargs)
return _apply(self._epath, expr, _func)
def select(self, expr):
"""
Retrieve parts of an expression selected by a path.
Examples
========
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.select(expr)
[x, y]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.select(expr)
[x, x, y]
"""
result = []
def _select(path, expr):
if not path:
result.append(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
args = self._get_ordered_args(expr)
elif hasattr(expr, '__iter__'):
args = expr
else:
return
if span is not None:
if type(span) == slice:
args = args[span]
else:
try:
args = [args[span]]
except IndexError:
return
for arg in args:
if self._has(arg, attrs, types):
_select(path, arg)
_select(self._epath, expr)
return result
def epath(path, expr=None, func=None, args=None, kwargs=None):
r"""
Manipulate parts of an expression selected by a path.
Explanation
===========
This function allows to manipulate large nested expressions in single
line of code, utilizing techniques to those applied in XML processing
standards (e.g. XPath).
If ``func`` is ``None``, :func:`epath` retrieves elements selected by
the ``path``. Otherwise it applies ``func`` to each matching element.
Note that it is more efficient to create an EPath object and use the select
and apply methods of that object, since this will compile the path string
only once. This function should only be used as a convenient shortcut for
interactive use.
This is the supported syntax:
* select all: ``/*``
Equivalent of ``for arg in args:``.
* select slice: ``/[0]`` or ``/[1:5]`` or ``/[1:5:2]``
Supports standard Python's slice syntax.
* select by type: ``/list`` or ``/list|tuple``
Emulates ``isinstance()``.
* select by attribute: ``/__iter__?``
Emulates ``hasattr()``.
Parameters
==========
path : str | EPath
A path as a string or a compiled EPath.
expr : Basic | iterable
An expression or a container of expressions.
func : callable (optional)
A callable that will be applied to matching parts.
args : tuple (optional)
Additional positional arguments to ``func``.
kwargs : dict (optional)
Additional keyword arguments to ``func``.
Examples
========
>>> from sympy.simplify.epathtools import epath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = "/*/[0]/Symbol"
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> epath(path, expr)
[x, y]
>>> epath(path, expr, lambda expr: expr**2)
[((x**2, 1), 2), ((3, y**2), z)]
>>> path = "/*/*/Symbol"
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> epath(path, expr)
[x, x, y]
>>> epath(path, expr, lambda expr: 2*expr)
t + sin(2*x + 1) + cos(2*x + 2*y + E)
"""
_epath = EPath(path)
if expr is None:
return _epath
if func is None:
return _epath.select(expr)
else:
return _epath.apply(expr, func, args, kwargs)
|
a8029e2ba01f3f74c6531cab6bd69f7a122ed75d8ecd345234e2d91eddb775f4 | from sympy.core import Mul
from sympy.core.basic import preorder_traversal
from sympy.core.function import count_ops
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.functions import gamma
from sympy.simplify.gammasimp import gammasimp, _gammasimp
from sympy.utilities.timeutils import timethis
@timethis('combsimp')
def combsimp(expr):
r"""
Simplify combinatorial expressions.
Explanation
===========
This function takes as input an expression containing factorials,
binomials, Pochhammer symbol and other "combinatorial" functions,
and tries to minimize the number of those functions and reduce
the size of their arguments.
The algorithm works by rewriting all combinatorial functions as
gamma functions and applying gammasimp() except simplification
steps that may make an integer argument non-integer. See docstring
of gammasimp for more information.
Then it rewrites expression in terms of factorials and binomials by
rewriting gammas as factorials and converting (a+b)!/a!b! into
binomials.
If expression has gamma functions or combinatorial functions
with non-integer argument, it is automatically passed to gammasimp.
Examples
========
>>> from sympy.simplify import combsimp
>>> from sympy import factorial, binomial, symbols
>>> n, k = symbols('n k', integer = True)
>>> combsimp(factorial(n)/factorial(n - 3))
n*(n - 2)*(n - 1)
>>> combsimp(binomial(n+1, k+1)/binomial(n, k))
(n + 1)/(k + 1)
"""
expr = expr.rewrite(gamma, piecewise=False)
if any(isinstance(node, gamma) and not node.args[0].is_integer
for node in preorder_traversal(expr)):
return gammasimp(expr);
expr = _gammasimp(expr, as_comb = True)
expr = _gamma_as_comb(expr)
return expr
def _gamma_as_comb(expr):
"""
Helper function for combsimp.
Rewrites expression in terms of factorials and binomials
"""
expr = expr.rewrite(factorial)
from .simplify import bottom_up
def f(rv):
if not rv.is_Mul:
return rv
rvd = rv.as_powers_dict()
nd_fact_args = [[], []] # numerator, denominator
for k in rvd:
if isinstance(k, factorial) and rvd[k].is_Integer:
if rvd[k].is_positive:
nd_fact_args[0].extend([k.args[0]]*rvd[k])
else:
nd_fact_args[1].extend([k.args[0]]*-rvd[k])
rvd[k] = 0
if not nd_fact_args[0] or not nd_fact_args[1]:
return rv
hit = False
for m in range(2):
i = 0
while i < len(nd_fact_args[m]):
ai = nd_fact_args[m][i]
for j in range(i + 1, len(nd_fact_args[m])):
aj = nd_fact_args[m][j]
sum = ai + aj
if sum in nd_fact_args[1 - m]:
hit = True
nd_fact_args[1 - m].remove(sum)
del nd_fact_args[m][j]
del nd_fact_args[m][i]
rvd[binomial(sum, ai if count_ops(ai) <
count_ops(aj) else aj)] += (
-1 if m == 0 else 1)
break
else:
i += 1
if hit:
return Mul(*([k**rvd[k] for k in rvd] + [factorial(k)
for k in nd_fact_args[0]]))/Mul(*[factorial(k)
for k in nd_fact_args[1]])
return rv
return bottom_up(expr, f)
|
7e51bf655cdc6a8bbe60c10138c6204747ba3d7f28108a3ec75bdab3c055b72c | from sympy.core import Add, Expr, Mul, S, sympify
from sympy.core.function import _mexpand, count_ops, expand_mul
from sympy.core.symbol import Dummy
from sympy.functions import root, sign, sqrt
from sympy.polys import Poly, PolynomialError
from sympy.utilities import default_sort_key
def is_sqrt(expr):
"""Return True if expr is a sqrt, otherwise False."""
return expr.is_Pow and expr.exp.is_Rational and abs(expr.exp) is S.Half
def sqrt_depth(p):
"""Return the maximum depth of any square root argument of p.
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import sqrt_depth
Neither of these square roots contains any other square roots
so the depth is 1:
>>> sqrt_depth(1 + sqrt(2)*(1 + sqrt(3)))
1
The sqrt(3) is contained within a square root so the depth is
2:
>>> sqrt_depth(1 + sqrt(2)*sqrt(1 + sqrt(3)))
2
"""
if p is S.ImaginaryUnit:
return 1
if p.is_Atom:
return 0
elif p.is_Add or p.is_Mul:
return max([sqrt_depth(x) for x in p.args], key=default_sort_key)
elif is_sqrt(p):
return sqrt_depth(p.base) + 1
else:
return 0
def is_algebraic(p):
"""Return True if p is comprised of only Rationals or square roots
of Rationals and algebraic operations.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import is_algebraic
>>> from sympy import cos
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*sqrt(2))))
True
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*cos(2))))
False
"""
if p.is_Rational:
return True
elif p.is_Atom:
return False
elif is_sqrt(p) or p.is_Pow and p.exp.is_Integer:
return is_algebraic(p.base)
elif p.is_Add or p.is_Mul:
return all(is_algebraic(x) for x in p.args)
else:
return False
def _subsets(n):
"""
Returns all possible subsets of the set (0, 1, ..., n-1) except the
empty set, listed in reversed lexicographical order according to binary
representation, so that the case of the fourth root is treated last.
Examples
========
>>> from sympy.simplify.sqrtdenest import _subsets
>>> _subsets(2)
[[1, 0], [0, 1], [1, 1]]
"""
if n == 1:
a = [[1]]
elif n == 2:
a = [[1, 0], [0, 1], [1, 1]]
elif n == 3:
a = [[1, 0, 0], [0, 1, 0], [1, 1, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
else:
b = _subsets(n - 1)
a0 = [x + [0] for x in b]
a1 = [x + [1] for x in b]
a = a0 + [[0]*(n - 1) + [1]] + a1
return a
def sqrtdenest(expr, max_iter=3):
"""Denests sqrts in an expression that contain other square roots
if possible, otherwise returns the expr unchanged. This is based on the
algorithms of [1].
Examples
========
>>> from sympy.simplify.sqrtdenest import sqrtdenest
>>> from sympy import sqrt
>>> sqrtdenest(sqrt(5 + 2 * sqrt(6)))
sqrt(2) + sqrt(3)
See Also
========
sympy.solvers.solvers.unrad
References
==========
.. [1] http://researcher.watson.ibm.com/researcher/files/us-fagin/symb85.pdf
.. [2] D. J. Jeffrey and A. D. Rich, 'Symplifying Square Roots of Square Roots
by Denesting' (available at http://www.cybertester.com/data/denest.pdf)
"""
expr = expand_mul(sympify(expr))
for i in range(max_iter):
z = _sqrtdenest0(expr)
if expr == z:
return expr
expr = z
return expr
def _sqrt_match(p):
"""Return [a, b, r] for p.match(a + b*sqrt(r)) where, in addition to
matching, sqrt(r) also has then maximal sqrt_depth among addends of p.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrt_match
>>> _sqrt_match(1 + sqrt(2) + sqrt(2)*sqrt(3) + 2*sqrt(1+sqrt(5)))
[1 + sqrt(2) + sqrt(6), 2, 1 + sqrt(5)]
"""
from sympy.simplify.radsimp import split_surds
p = _mexpand(p)
if p.is_Number:
res = (p, S.Zero, S.Zero)
elif p.is_Add:
pargs = sorted(p.args, key=default_sort_key)
sqargs = [x**2 for x in pargs]
if all(sq.is_Rational and sq.is_positive for sq in sqargs):
r, b, a = split_surds(p)
res = a, b, r
return list(res)
# to make the process canonical, the argument is included in the tuple
# so when the max is selected, it will be the largest arg having a
# given depth
v = [(sqrt_depth(x), x, i) for i, x in enumerate(pargs)]
nmax = max(v, key=default_sort_key)
if nmax[0] == 0:
res = []
else:
# select r
depth, _, i = nmax
r = pargs.pop(i)
v.pop(i)
b = S.One
if r.is_Mul:
bv = []
rv = []
for x in r.args:
if sqrt_depth(x) < depth:
bv.append(x)
else:
rv.append(x)
b = Mul._from_args(bv)
r = Mul._from_args(rv)
# collect terms comtaining r
a1 = []
b1 = [b]
for x in v:
if x[0] < depth:
a1.append(x[1])
else:
x1 = x[1]
if x1 == r:
b1.append(1)
else:
if x1.is_Mul:
x1args = list(x1.args)
if r in x1args:
x1args.remove(r)
b1.append(Mul(*x1args))
else:
a1.append(x[1])
else:
a1.append(x[1])
a = Add(*a1)
b = Add(*b1)
res = (a, b, r**2)
else:
b, r = p.as_coeff_Mul()
if is_sqrt(r):
res = (S.Zero, b, r**2)
else:
res = []
return list(res)
class SqrtdenestStopIteration(StopIteration):
pass
def _sqrtdenest0(expr):
"""Returns expr after denesting its arguments."""
if is_sqrt(expr):
n, d = expr.as_numer_denom()
if d is S.One: # n is a square root
if n.base.is_Add:
args = sorted(n.base.args, key=default_sort_key)
if len(args) > 2 and all((x**2).is_Integer for x in args):
try:
return _sqrtdenest_rec(n)
except SqrtdenestStopIteration:
pass
expr = sqrt(_mexpand(Add(*[_sqrtdenest0(x) for x in args])))
return _sqrtdenest1(expr)
else:
n, d = [_sqrtdenest0(i) for i in (n, d)]
return n/d
if isinstance(expr, Add):
cs = []
args = []
for arg in expr.args:
c, a = arg.as_coeff_Mul()
cs.append(c)
args.append(a)
if all(c.is_Rational for c in cs) and all(is_sqrt(arg) for arg in args):
return _sqrt_ratcomb(cs, args)
if isinstance(expr, Expr):
args = expr.args
if args:
return expr.func(*[_sqrtdenest0(a) for a in args])
return expr
def _sqrtdenest_rec(expr):
"""Helper that denests the square root of three or more surds.
Explanation
===========
It returns the denested expression; if it cannot be denested it
throws SqrtdenestStopIteration
Algorithm: expr.base is in the extension Q_m = Q(sqrt(r_1),..,sqrt(r_k));
split expr.base = a + b*sqrt(r_k), where `a` and `b` are on
Q_(m-1) = Q(sqrt(r_1),..,sqrt(r_(k-1))); then a**2 - b**2*r_k is
on Q_(m-1); denest sqrt(a**2 - b**2*r_k) and so on.
See [1], section 6.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrtdenest_rec
>>> _sqrtdenest_rec(sqrt(-72*sqrt(2) + 158*sqrt(5) + 498))
-sqrt(10) + sqrt(2) + 9 + 9*sqrt(5)
>>> w=-6*sqrt(55)-6*sqrt(35)-2*sqrt(22)-2*sqrt(14)+2*sqrt(77)+6*sqrt(10)+65
>>> _sqrtdenest_rec(sqrt(w))
-sqrt(11) - sqrt(7) + sqrt(2) + 3*sqrt(5)
"""
from sympy.simplify.radsimp import radsimp, rad_rationalize, split_surds
if not expr.is_Pow:
return sqrtdenest(expr)
if expr.base < 0:
return sqrt(-1)*_sqrtdenest_rec(sqrt(-expr.base))
g, a, b = split_surds(expr.base)
a = a*sqrt(g)
if a < b:
a, b = b, a
c2 = _mexpand(a**2 - b**2)
if len(c2.args) > 2:
g, a1, b1 = split_surds(c2)
a1 = a1*sqrt(g)
if a1 < b1:
a1, b1 = b1, a1
c2_1 = _mexpand(a1**2 - b1**2)
c_1 = _sqrtdenest_rec(sqrt(c2_1))
d_1 = _sqrtdenest_rec(sqrt(a1 + c_1))
num, den = rad_rationalize(b1, d_1)
c = _mexpand(d_1/sqrt(2) + num/(den*sqrt(2)))
else:
c = _sqrtdenest1(sqrt(c2))
if sqrt_depth(c) > 1:
raise SqrtdenestStopIteration
ac = a + c
if len(ac.args) >= len(expr.args):
if count_ops(ac) >= count_ops(expr.base):
raise SqrtdenestStopIteration
d = sqrtdenest(sqrt(ac))
if sqrt_depth(d) > 1:
raise SqrtdenestStopIteration
num, den = rad_rationalize(b, d)
r = d/sqrt(2) + num/(den*sqrt(2))
r = radsimp(r)
return _mexpand(r)
def _sqrtdenest1(expr, denester=True):
"""Return denested expr after denesting with simpler methods or, that
failing, using the denester."""
from sympy.simplify.simplify import radsimp
if not is_sqrt(expr):
return expr
a = expr.base
if a.is_Atom:
return expr
val = _sqrt_match(a)
if not val:
return expr
a, b, r = val
# try a quick numeric denesting
d2 = _mexpand(a**2 - b**2*r)
if d2.is_Rational:
if d2.is_positive:
z = _sqrt_numeric_denest(a, b, r, d2)
if z is not None:
return z
else:
# fourth root case
# sqrtdenest(sqrt(3 + 2*sqrt(3))) =
# sqrt(2)*3**(1/4)/2 + sqrt(2)*3**(3/4)/2
dr2 = _mexpand(-d2*r)
dr = sqrt(dr2)
if dr.is_Rational:
z = _sqrt_numeric_denest(_mexpand(b*r), a, r, dr2)
if z is not None:
return z/root(r, 4)
else:
z = _sqrt_symbolic_denest(a, b, r)
if z is not None:
return z
if not denester or not is_algebraic(expr):
return expr
res = sqrt_biquadratic_denest(expr, a, b, r, d2)
if res:
return res
# now call to the denester
av0 = [a, b, r, d2]
z = _denester([radsimp(expr**2)], av0, 0, sqrt_depth(expr))[0]
if av0[1] is None:
return expr
if z is not None:
if sqrt_depth(z) == sqrt_depth(expr) and count_ops(z) > count_ops(expr):
return expr
return z
return expr
def _sqrt_symbolic_denest(a, b, r):
"""Given an expression, sqrt(a + b*sqrt(b)), return the denested
expression or None.
Explanation
===========
If r = ra + rb*sqrt(rr), try replacing sqrt(rr) in ``a`` with
(y**2 - ra)/rb, and if the result is a quadratic, ca*y**2 + cb*y + cc, and
(cb + b)**2 - 4*ca*cc is 0, then sqrt(a + b*sqrt(r)) can be rewritten as
sqrt(ca*(sqrt(r) + (cb + b)/(2*ca))**2).
Examples
========
>>> from sympy.simplify.sqrtdenest import _sqrt_symbolic_denest, sqrtdenest
>>> from sympy import sqrt, Symbol
>>> from sympy.abc import x
>>> a, b, r = 16 - 2*sqrt(29), 2, -10*sqrt(29) + 55
>>> _sqrt_symbolic_denest(a, b, r)
sqrt(11 - 2*sqrt(29)) + sqrt(5)
If the expression is numeric, it will be simplified:
>>> w = sqrt(sqrt(sqrt(3) + 1) + 1) + 1 + sqrt(2)
>>> sqrtdenest(sqrt((w**2).expand()))
1 + sqrt(2) + sqrt(1 + sqrt(1 + sqrt(3)))
Otherwise, it will only be simplified if assumptions allow:
>>> w = w.subs(sqrt(3), sqrt(x + 3))
>>> sqrtdenest(sqrt((w**2).expand()))
sqrt((sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2))**2)
Notice that the argument of the sqrt is a square. If x is made positive
then the sqrt of the square is resolved:
>>> _.subs(x, Symbol('x', positive=True))
sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2)
"""
a, b, r = map(sympify, (a, b, r))
rval = _sqrt_match(r)
if not rval:
return None
ra, rb, rr = rval
if rb:
y = Dummy('y', positive=True)
try:
newa = Poly(a.subs(sqrt(rr), (y**2 - ra)/rb), y)
except PolynomialError:
return None
if newa.degree() == 2:
ca, cb, cc = newa.all_coeffs()
cb += b
if _mexpand(cb**2 - 4*ca*cc).equals(0):
z = sqrt(ca*(sqrt(r) + cb/(2*ca))**2)
if z.is_number:
z = _mexpand(Mul._from_args(z.as_content_primitive()))
return z
def _sqrt_numeric_denest(a, b, r, d2):
r"""Helper that denest
$\sqrt{a + b \sqrt{r}}, d^2 = a^2 - b^2 r > 0$
If it cannot be denested, it returns ``None``.
"""
d = sqrt(d2)
s = a + d
# sqrt_depth(res) <= sqrt_depth(s) + 1
# sqrt_depth(expr) = sqrt_depth(r) + 2
# there is denesting if sqrt_depth(s) + 1 < sqrt_depth(r) + 2
# if s**2 is Number there is a fourth root
if sqrt_depth(s) < sqrt_depth(r) + 1 or (s**2).is_Rational:
s1, s2 = sign(s), sign(b)
if s1 == s2 == -1:
s1 = s2 = 1
res = (s1 * sqrt(a + d) + s2 * sqrt(a - d)) * sqrt(2) / 2
return res.expand()
def sqrt_biquadratic_denest(expr, a, b, r, d2):
"""denest expr = sqrt(a + b*sqrt(r))
where a, b, r are linear combinations of square roots of
positive rationals on the rationals (SQRR) and r > 0, b != 0,
d2 = a**2 - b**2*r > 0
If it cannot denest it returns None.
Explanation
===========
Search for a solution A of type SQRR of the biquadratic equation
4*A**4 - 4*a*A**2 + b**2*r = 0 (1)
sqd = sqrt(a**2 - b**2*r)
Choosing the sqrt to be positive, the possible solutions are
A = sqrt(a/2 +/- sqd/2)
Since a, b, r are SQRR, then a**2 - b**2*r is a SQRR,
so if sqd can be denested, it is done by
_sqrtdenest_rec, and the result is a SQRR.
Similarly for A.
Examples of solutions (in both cases a and sqd are positive):
Example of expr with solution sqrt(a/2 + sqd/2) but not
solution sqrt(a/2 - sqd/2):
expr = sqrt(-sqrt(15) - sqrt(2)*sqrt(-sqrt(5) + 5) - sqrt(3) + 8)
a = -sqrt(15) - sqrt(3) + 8; sqd = -2*sqrt(5) - 2 + 4*sqrt(3)
Example of expr with solution sqrt(a/2 - sqd/2) but not
solution sqrt(a/2 + sqd/2):
w = 2 + r2 + r3 + (1 + r3)*sqrt(2 + r2 + 5*r3)
expr = sqrt((w**2).expand())
a = 4*sqrt(6) + 8*sqrt(2) + 47 + 28*sqrt(3)
sqd = 29 + 20*sqrt(3)
Define B = b/2*A; eq.(1) implies a = A**2 + B**2*r; then
expr**2 = a + b*sqrt(r) = (A + B*sqrt(r))**2
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrt_match, sqrt_biquadratic_denest
>>> z = sqrt((2*sqrt(2) + 4)*sqrt(2 + sqrt(2)) + 5*sqrt(2) + 8)
>>> a, b, r = _sqrt_match(z**2)
>>> d2 = a**2 - b**2*r
>>> sqrt_biquadratic_denest(z, a, b, r, d2)
sqrt(2) + sqrt(sqrt(2) + 2) + 2
"""
from sympy.simplify.radsimp import radsimp, rad_rationalize
if r <= 0 or d2 < 0 or not b or sqrt_depth(expr.base) < 2:
return None
for x in (a, b, r):
for y in x.args:
y2 = y**2
if not y2.is_Integer or not y2.is_positive:
return None
sqd = _mexpand(sqrtdenest(sqrt(radsimp(d2))))
if sqrt_depth(sqd) > 1:
return None
x1, x2 = [a/2 + sqd/2, a/2 - sqd/2]
# look for a solution A with depth 1
for x in (x1, x2):
A = sqrtdenest(sqrt(x))
if sqrt_depth(A) > 1:
continue
Bn, Bd = rad_rationalize(b, _mexpand(2*A))
B = Bn/Bd
z = A + B*sqrt(r)
if z < 0:
z = -z
return _mexpand(z)
return None
def _denester(nested, av0, h, max_depth_level):
"""Denests a list of expressions that contain nested square roots.
Explanation
===========
Algorithm based on <http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf>.
It is assumed that all of the elements of 'nested' share the same
bottom-level radicand. (This is stated in the paper, on page 177, in
the paragraph immediately preceding the algorithm.)
When evaluating all of the arguments in parallel, the bottom-level
radicand only needs to be denested once. This means that calling
_denester with x arguments results in a recursive invocation with x+1
arguments; hence _denester has polynomial complexity.
However, if the arguments were evaluated separately, each call would
result in two recursive invocations, and the algorithm would have
exponential complexity.
This is discussed in the paper in the middle paragraph of page 179.
"""
from sympy.simplify.simplify import radsimp
if h > max_depth_level:
return None, None
if av0[1] is None:
return None, None
if (av0[0] is None and
all(n.is_Number for n in nested)): # no arguments are nested
for f in _subsets(len(nested)): # test subset 'f' of nested
p = _mexpand(Mul(*[nested[i] for i in range(len(f)) if f[i]]))
if f.count(1) > 1 and f[-1]:
p = -p
sqp = sqrt(p)
if sqp.is_Rational:
return sqp, f # got a perfect square so return its square root.
# Otherwise, return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
else:
R = None
if av0[0] is not None:
values = [av0[:2]]
R = av0[2]
nested2 = [av0[3], R]
av0[0] = None
else:
values = list(filter(None, [_sqrt_match(expr) for expr in nested]))
for v in values:
if v[2]: # Since if b=0, r is not defined
if R is not None:
if R != v[2]:
av0[1] = None
return None, None
else:
R = v[2]
if R is None:
# return the radicand from the previous invocation
return sqrt(nested[-1]), [0]*len(nested)
nested2 = [_mexpand(v[0]**2) -
_mexpand(R*v[1]**2) for v in values] + [R]
d, f = _denester(nested2, av0, h + 1, max_depth_level)
if not f:
return None, None
if not any(f[i] for i in range(len(nested))):
v = values[-1]
return sqrt(v[0] + _mexpand(v[1]*d)), f
else:
p = Mul(*[nested[i] for i in range(len(nested)) if f[i]])
v = _sqrt_match(p)
if 1 in f and f.index(1) < len(nested) - 1 and f[len(nested) - 1]:
v[0] = -v[0]
v[1] = -v[1]
if not f[len(nested)]: # Solution denests with square roots
vad = _mexpand(v[0] + d)
if vad <= 0:
# return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
if not(sqrt_depth(vad) <= sqrt_depth(R) + 1 or
(vad**2).is_Number):
av0[1] = None
return None, None
sqvad = _sqrtdenest1(sqrt(vad), denester=False)
if not (sqrt_depth(sqvad) <= sqrt_depth(R) + 1):
av0[1] = None
return None, None
sqvad1 = radsimp(1/sqvad)
res = _mexpand(sqvad/sqrt(2) + (v[1]*sqrt(R)*sqvad1/sqrt(2)))
return res, f
# sign(v[1])*sqrt(_mexpand(v[1]**2*R*vad1/2))), f
else: # Solution requires a fourth root
s2 = _mexpand(v[1]*R) + d
if s2 <= 0:
return sqrt(nested[-1]), [0]*len(nested)
FR, s = root(_mexpand(R), 4), sqrt(s2)
return _mexpand(s/(sqrt(2)*FR) + v[0]*FR/(sqrt(2)*s)), f
def _sqrt_ratcomb(cs, args):
"""Denest rational combinations of radicals.
Based on section 5 of [1].
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import sqrtdenest
>>> z = sqrt(1+sqrt(3)) + sqrt(3+3*sqrt(3)) - sqrt(10+6*sqrt(3))
>>> sqrtdenest(z)
0
"""
from sympy.simplify.radsimp import radsimp
# check if there exists a pair of sqrt that can be denested
def find(a):
n = len(a)
for i in range(n - 1):
for j in range(i + 1, n):
s1 = a[i].base
s2 = a[j].base
p = _mexpand(s1 * s2)
s = sqrtdenest(sqrt(p))
if s != sqrt(p):
return s, i, j
indices = find(args)
if indices is None:
return Add(*[c * arg for c, arg in zip(cs, args)])
s, i1, i2 = indices
c2 = cs.pop(i2)
args.pop(i2)
a1 = args[i1]
# replace a2 by s/a1
cs[i1] += radsimp(c2 * s / a1.base)
return _sqrt_ratcomb(cs, args)
|
dedea045629a9e7426b27000dd5ed4e0484979f8f2fe6e670f98a1a6aff17940 | from collections import defaultdict
from sympy.core.function import expand_log, count_ops
from sympy.core import sympify, Basic, Dummy, S, Add, Mul, Pow, expand_mul, factor_terms
from sympy.core.compatibility import ordered, default_sort_key, reduce
from sympy.core.numbers import Integer, Rational
from sympy.core.mul import prod, _keep_coeff
from sympy.core.rules import Transform
from sympy.functions import exp_polar, exp, log, root, polarify, unpolarify
from sympy.polys import lcm, gcd
from sympy.ntheory.factor_ import multiplicity
def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops):
"""
reduces expression by combining powers with similar bases and exponents.
Explanation
===========
If ``deep`` is ``True`` then powsimp() will also simplify arguments of
functions. By default ``deep`` is set to ``False``.
If ``force`` is ``True`` then bases will be combined without checking for
assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true
if x and y are both negative.
You can make powsimp() only combine bases or only combine exponents by
changing combine='base' or combine='exp'. By default, combine='all',
which does both. combine='base' will only combine::
a a a 2x x
x * y => (x*y) as well as things like 2 => 4
and combine='exp' will only combine
::
a b (a + b)
x * x => x
combine='exp' will strictly only combine exponents in the way that used
to be automatic. Also use deep=True if you need the old behavior.
When combine='all', 'exp' is evaluated first. Consider the first
example below for when there could be an ambiguity relating to this.
This is done so things like the second example can be completely
combined. If you want 'base' combined first, do something like
powsimp(powsimp(expr, combine='base'), combine='exp').
Examples
========
>>> from sympy import powsimp, exp, log, symbols
>>> from sympy.abc import x, y, z, n
>>> powsimp(x**y*x**z*y**z, combine='all')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='exp')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='base', force=True)
x**y*(x*y)**z
>>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True)
(n*x)**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='exp')
n**(y + z)*x**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True)
(n*x)**y*(n*x)**z
>>> x, y = symbols('x y', positive=True)
>>> powsimp(log(exp(x)*exp(y)))
log(exp(x)*exp(y))
>>> powsimp(log(exp(x)*exp(y)), deep=True)
x + y
Radicals with Mul bases will be combined if combine='exp'
>>> from sympy import sqrt
>>> x, y = symbols('x y')
Two radicals are automatically joined through Mul:
>>> a=sqrt(x*sqrt(y))
>>> a*a**3 == a**4
True
But if an integer power of that radical has been
autoexpanded then Mul does not join the resulting factors:
>>> a**4 # auto expands to a Mul, no longer a Pow
x**2*y
>>> _*a # so Mul doesn't combine them
x**2*y*sqrt(x*sqrt(y))
>>> powsimp(_) # but powsimp will
(x*sqrt(y))**(5/2)
>>> powsimp(x*y*a) # but won't when doing so would violate assumptions
x*y*sqrt(x*sqrt(y))
"""
from sympy.matrices.expressions.matexpr import MatrixSymbol
def recurse(arg, **kwargs):
_deep = kwargs.get('deep', deep)
_combine = kwargs.get('combine', combine)
_force = kwargs.get('force', force)
_measure = kwargs.get('measure', measure)
return powsimp(arg, _deep, _combine, _force, _measure)
expr = sympify(expr)
if (not isinstance(expr, Basic) or isinstance(expr, MatrixSymbol) or (
expr.is_Atom or expr in (exp_polar(0), exp_polar(1)))):
return expr
if deep or expr.is_Add or expr.is_Mul and _y not in expr.args:
expr = expr.func(*[recurse(w) for w in expr.args])
if expr.is_Pow:
return recurse(expr*_y, deep=False)/_y
if not expr.is_Mul:
return expr
# handle the Mul
if combine in ('exp', 'all'):
# Collect base/exp data, while maintaining order in the
# non-commutative parts of the product
c_powers = defaultdict(list)
nc_part = []
newexpr = []
coeff = S.One
for term in expr.args:
if term.is_Rational:
coeff *= term
continue
if term.is_Pow:
term = _denest_pow(term)
if term.is_commutative:
b, e = term.as_base_exp()
if deep:
b, e = [recurse(i) for i in [b, e]]
if b.is_Pow or isinstance(b, exp):
# don't let smthg like sqrt(x**a) split into x**a, 1/2
# or else it will be joined as x**(a/2) later
b, e = b**e, S.One
c_powers[b].append(e)
else:
# This is the logic that combines exponents for equal,
# but non-commutative bases: A**x*A**y == A**(x+y).
if nc_part:
b1, e1 = nc_part[-1].as_base_exp()
b2, e2 = term.as_base_exp()
if (b1 == b2 and
e1.is_commutative and e2.is_commutative):
nc_part[-1] = Pow(b1, Add(e1, e2))
continue
nc_part.append(term)
# add up exponents of common bases
for b, e in ordered(iter(c_powers.items())):
# allow 2**x/4 -> 2**(x - 2); don't do this when b and e are
# Numbers since autoevaluation will undo it, e.g.
# 2**(1/3)/4 -> 2**(1/3 - 2) -> 2**(1/3)/4
if (b and b.is_Rational and not all(ei.is_Number for ei in e) and \
coeff is not S.One and
b not in (S.One, S.NegativeOne)):
m = multiplicity(abs(b), abs(coeff))
if m:
e.append(m)
coeff /= b**m
c_powers[b] = Add(*e)
if coeff is not S.One:
if coeff in c_powers:
c_powers[coeff] += S.One
else:
c_powers[coeff] = S.One
# convert to plain dictionary
c_powers = dict(c_powers)
# check for base and inverted base pairs
be = list(c_powers.items())
skip = set() # skip if we already saw them
for b, e in be:
if b in skip:
continue
bpos = b.is_positive or b.is_polar
if bpos:
binv = 1/b
if b != binv and binv in c_powers:
if b.as_numer_denom()[0] is S.One:
c_powers.pop(b)
c_powers[binv] -= e
else:
skip.add(binv)
e = c_powers.pop(binv)
c_powers[b] -= e
# check for base and negated base pairs
be = list(c_powers.items())
_n = S.NegativeOne
for b, e in be:
if (b.is_Symbol or b.is_Add) and -b in c_powers and b in c_powers:
if (b.is_positive is not None or e.is_integer):
if e.is_integer or b.is_negative:
c_powers[-b] += c_powers.pop(b)
else: # (-b).is_positive so use its e
e = c_powers.pop(-b)
c_powers[b] += e
if _n in c_powers:
c_powers[_n] += e
else:
c_powers[_n] = e
# filter c_powers and convert to a list
c_powers = [(b, e) for b, e in c_powers.items() if e]
# ==============================================================
# check for Mul bases of Rational powers that can be combined with
# separated bases, e.g. x*sqrt(x*y)*sqrt(x*sqrt(x*y)) ->
# (x*sqrt(x*y))**(3/2)
# ---------------- helper functions
def ratq(x):
'''Return Rational part of x's exponent as it appears in the bkey.
'''
return bkey(x)[0][1]
def bkey(b, e=None):
'''Return (b**s, c.q), c.p where e -> c*s. If e is not given then
it will be taken by using as_base_exp() on the input b.
e.g.
x**3/2 -> (x, 2), 3
x**y -> (x**y, 1), 1
x**(2*y/3) -> (x**y, 3), 2
exp(x/2) -> (exp(a), 2), 1
'''
if e is not None: # coming from c_powers or from below
if e.is_Integer:
return (b, S.One), e
elif e.is_Rational:
return (b, Integer(e.q)), Integer(e.p)
else:
c, m = e.as_coeff_Mul(rational=True)
if c is not S.One:
if m.is_integer:
return (b, Integer(c.q)), m*Integer(c.p)
return (b**m, Integer(c.q)), Integer(c.p)
else:
return (b**e, S.One), S.One
else:
return bkey(*b.as_base_exp())
def update(b):
'''Decide what to do with base, b. If its exponent is now an
integer multiple of the Rational denominator, then remove it
and put the factors of its base in the common_b dictionary or
update the existing bases if necessary. If it has been zeroed
out, simply remove the base.
'''
newe, r = divmod(common_b[b], b[1])
if not r:
common_b.pop(b)
if newe:
for m in Mul.make_args(b[0]**newe):
b, e = bkey(m)
if b not in common_b:
common_b[b] = 0
common_b[b] += e
if b[1] != 1:
bases.append(b)
# ---------------- end of helper functions
# assemble a dictionary of the factors having a Rational power
common_b = {}
done = []
bases = []
for b, e in c_powers:
b, e = bkey(b, e)
if b in common_b:
common_b[b] = common_b[b] + e
else:
common_b[b] = e
if b[1] != 1 and b[0].is_Mul:
bases.append(b)
bases.sort(key=default_sort_key) # this makes tie-breaking canonical
bases.sort(key=measure, reverse=True) # handle longest first
for base in bases:
if base not in common_b: # it may have been removed already
continue
b, exponent = base
last = False # True when no factor of base is a radical
qlcm = 1 # the lcm of the radical denominators
while True:
bstart = b
qstart = qlcm
bb = [] # list of factors
ee = [] # (factor's expo. and it's current value in common_b)
for bi in Mul.make_args(b):
bib, bie = bkey(bi)
if bib not in common_b or common_b[bib] < bie:
ee = bb = [] # failed
break
ee.append([bie, common_b[bib]])
bb.append(bib)
if ee:
# find the number of integral extractions possible
# e.g. [(1, 2), (2, 2)] -> min(2/1, 2/2) -> 1
min1 = ee[0][1]//ee[0][0]
for i in range(1, len(ee)):
rat = ee[i][1]//ee[i][0]
if rat < 1:
break
min1 = min(min1, rat)
else:
# update base factor counts
# e.g. if ee = [(2, 5), (3, 6)] then min1 = 2
# and the new base counts will be 5-2*2 and 6-2*3
for i in range(len(bb)):
common_b[bb[i]] -= min1*ee[i][0]
update(bb[i])
# update the count of the base
# e.g. x**2*y*sqrt(x*sqrt(y)) the count of x*sqrt(y)
# will increase by 4 to give bkey (x*sqrt(y), 2, 5)
common_b[base] += min1*qstart*exponent
if (last # no more radicals in base
or len(common_b) == 1 # nothing left to join with
or all(k[1] == 1 for k in common_b) # no rad's in common_b
):
break
# see what we can exponentiate base by to remove any radicals
# so we know what to search for
# e.g. if base were x**(1/2)*y**(1/3) then we should
# exponentiate by 6 and look for powers of x and y in the ratio
# of 2 to 3
qlcm = lcm([ratq(bi) for bi in Mul.make_args(bstart)])
if qlcm == 1:
break # we are done
b = bstart**qlcm
qlcm *= qstart
if all(ratq(bi) == 1 for bi in Mul.make_args(b)):
last = True # we are going to be done after this next pass
# this base no longer can find anything to join with and
# since it was longer than any other we are done with it
b, q = base
done.append((b, common_b.pop(base)*Rational(1, q)))
# update c_powers and get ready to continue with powsimp
c_powers = done
# there may be terms still in common_b that were bases that were
# identified as needing processing, so remove those, too
for (b, q), e in common_b.items():
if (b.is_Pow or isinstance(b, exp)) and \
q is not S.One and not b.exp.is_Rational:
b, be = b.as_base_exp()
b = b**(be/q)
else:
b = root(b, q)
c_powers.append((b, e))
check = len(c_powers)
c_powers = dict(c_powers)
assert len(c_powers) == check # there should have been no duplicates
# ==============================================================
# rebuild the expression
newexpr = expr.func(*(newexpr + [Pow(b, e) for b, e in c_powers.items()]))
if combine == 'exp':
return expr.func(newexpr, expr.func(*nc_part))
else:
return recurse(expr.func(*nc_part), combine='base') * \
recurse(newexpr, combine='base')
elif combine == 'base':
# Build c_powers and nc_part. These must both be lists not
# dicts because exp's are not combined.
c_powers = []
nc_part = []
for term in expr.args:
if term.is_commutative:
c_powers.append(list(term.as_base_exp()))
else:
nc_part.append(term)
# Pull out numerical coefficients from exponent if assumptions allow
# e.g., 2**(2*x) => 4**x
for i in range(len(c_powers)):
b, e = c_powers[i]
if not (all(x.is_nonnegative for x in b.as_numer_denom()) or e.is_integer or force or b.is_polar):
continue
exp_c, exp_t = e.as_coeff_Mul(rational=True)
if exp_c is not S.One and exp_t is not S.One:
c_powers[i] = [Pow(b, exp_c), exp_t]
# Combine bases whenever they have the same exponent and
# assumptions allow
# first gather the potential bases under the common exponent
c_exp = defaultdict(list)
for b, e in c_powers:
if deep:
e = recurse(e)
c_exp[e].append(b)
del c_powers
# Merge back in the results of the above to form a new product
c_powers = defaultdict(list)
for e in c_exp:
bases = c_exp[e]
# calculate the new base for e
if len(bases) == 1:
new_base = bases[0]
elif e.is_integer or force:
new_base = expr.func(*bases)
else:
# see which ones can be joined
unk = []
nonneg = []
neg = []
for bi in bases:
if bi.is_negative:
neg.append(bi)
elif bi.is_nonnegative:
nonneg.append(bi)
elif bi.is_polar:
nonneg.append(
bi) # polar can be treated like non-negative
else:
unk.append(bi)
if len(unk) == 1 and not neg or len(neg) == 1 and not unk:
# a single neg or a single unk can join the rest
nonneg.extend(unk + neg)
unk = neg = []
elif neg:
# their negative signs cancel in groups of 2*q if we know
# that e = p/q else we have to treat them as unknown
israt = False
if e.is_Rational:
israt = True
else:
p, d = e.as_numer_denom()
if p.is_integer and d.is_integer:
israt = True
if israt:
neg = [-w for w in neg]
unk.extend([S.NegativeOne]*len(neg))
else:
unk.extend(neg)
neg = []
del israt
# these shouldn't be joined
for b in unk:
c_powers[b].append(e)
# here is a new joined base
new_base = expr.func(*(nonneg + neg))
# if there are positive parts they will just get separated
# again unless some change is made
def _terms(e):
# return the number of terms of this expression
# when multiplied out -- assuming no joining of terms
if e.is_Add:
return sum([_terms(ai) for ai in e.args])
if e.is_Mul:
return prod([_terms(mi) for mi in e.args])
return 1
xnew_base = expand_mul(new_base, deep=False)
if len(Add.make_args(xnew_base)) < _terms(new_base):
new_base = factor_terms(xnew_base)
c_powers[new_base].append(e)
# break out the powers from c_powers now
c_part = [Pow(b, ei) for b, e in c_powers.items() for ei in e]
# we're done
return expr.func(*(c_part + nc_part))
else:
raise ValueError("combine must be one of ('all', 'exp', 'base').")
def powdenest(eq, force=False, polar=False):
r"""
Collect exponents on powers as assumptions allow.
Explanation
===========
Given ``(bb**be)**e``, this can be simplified as follows:
* if ``bb`` is positive, or
* ``e`` is an integer, or
* ``|be| < 1`` then this simplifies to ``bb**(be*e)``
Given a product of powers raised to a power, ``(bb1**be1 *
bb2**be2...)**e``, simplification can be done as follows:
- if e is positive, the gcd of all bei can be joined with e;
- all non-negative bb can be separated from those that are negative
and their gcd can be joined with e; autosimplification already
handles this separation.
- integer factors from powers that have integers in the denominator
of the exponent can be removed from any term and the gcd of such
integers can be joined with e
Setting ``force`` to ``True`` will make symbols that are not explicitly
negative behave as though they are positive, resulting in more
denesting.
Setting ``polar`` to ``True`` will do simplifications on the Riemann surface of
the logarithm, also resulting in more denestings.
When there are sums of logs in exp() then a product of powers may be
obtained e.g. ``exp(3*(log(a) + 2*log(b)))`` - > ``a**3*b**6``.
Examples
========
>>> from sympy.abc import a, b, x, y, z
>>> from sympy import Symbol, exp, log, sqrt, symbols, powdenest
>>> powdenest((x**(2*a/3))**(3*x))
(x**(2*a/3))**(3*x)
>>> powdenest(exp(3*x*log(2)))
2**(3*x)
Assumptions may prevent expansion:
>>> powdenest(sqrt(x**2))
sqrt(x**2)
>>> p = symbols('p', positive=True)
>>> powdenest(sqrt(p**2))
p
No other expansion is done.
>>> i, j = symbols('i,j', integer=True)
>>> powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j
x**(x*(i + j))
But exp() will be denested by moving all non-log terms outside of
the function; this may result in the collapsing of the exp to a power
with a different base:
>>> powdenest(exp(3*y*log(x)))
x**(3*y)
>>> powdenest(exp(y*(log(a) + log(b))))
(a*b)**y
>>> powdenest(exp(3*(log(a) + log(b))))
a**3*b**3
If assumptions allow, symbols can also be moved to the outermost exponent:
>>> i = Symbol('i', integer=True)
>>> powdenest(((x**(2*i))**(3*y))**x)
((x**(2*i))**(3*y))**x
>>> powdenest(((x**(2*i))**(3*y))**x, force=True)
x**(6*i*x*y)
>>> powdenest(((x**(2*a/3))**(3*y/i))**x)
((x**(2*a/3))**(3*y/i))**x
>>> powdenest((x**(2*i)*y**(4*i))**z, force=True)
(x*y**2)**(2*i*z)
>>> n = Symbol('n', negative=True)
>>> powdenest((x**i)**y, force=True)
x**(i*y)
>>> powdenest((n**i)**x, force=True)
(n**i)**x
"""
from sympy.simplify.simplify import posify
if force:
eq, rep = posify(eq)
return powdenest(eq, force=False).xreplace(rep)
if polar:
eq, rep = polarify(eq)
return unpolarify(powdenest(unpolarify(eq, exponents_only=True)), rep)
new = powsimp(sympify(eq))
return new.xreplace(Transform(
_denest_pow, filter=lambda m: m.is_Pow or isinstance(m, exp)))
_y = Dummy('y')
def _denest_pow(eq):
"""
Denest powers.
This is a helper function for powdenest that performs the actual
transformation.
"""
from sympy.simplify.simplify import logcombine
b, e = eq.as_base_exp()
if b.is_Pow or isinstance(b.func, exp) and e != 1:
new = b._eval_power(e)
if new is not None:
eq = new
b, e = new.as_base_exp()
# denest exp with log terms in exponent
if b is S.Exp1 and e.is_Mul:
logs = []
other = []
for ei in e.args:
if any(isinstance(ai, log) for ai in Add.make_args(ei)):
logs.append(ei)
else:
other.append(ei)
logs = logcombine(Mul(*logs))
return Pow(exp(logs), Mul(*other))
_, be = b.as_base_exp()
if be is S.One and not (b.is_Mul or
b.is_Rational and b.q != 1 or
b.is_positive):
return eq
# denest eq which is either pos**e or Pow**e or Mul**e or
# Mul(b1**e1, b2**e2)
# handle polar numbers specially
polars, nonpolars = [], []
for bb in Mul.make_args(b):
if bb.is_polar:
polars.append(bb.as_base_exp())
else:
nonpolars.append(bb)
if len(polars) == 1 and not polars[0][0].is_Mul:
return Pow(polars[0][0], polars[0][1]*e)*powdenest(Mul(*nonpolars)**e)
elif polars:
return Mul(*[powdenest(bb**(ee*e)) for (bb, ee) in polars]) \
*powdenest(Mul(*nonpolars)**e)
if b.is_Integer:
# use log to see if there is a power here
logb = expand_log(log(b))
if logb.is_Mul:
c, logb = logb.args
e *= c
base = logb.args[0]
return Pow(base, e)
# if b is not a Mul or any factor is an atom then there is nothing to do
if not b.is_Mul or any(s.is_Atom for s in Mul.make_args(b)):
return eq
# let log handle the case of the base of the argument being a Mul, e.g.
# sqrt(x**(2*i)*y**(6*i)) -> x**i*y**(3**i) if x and y are positive; we
# will take the log, expand it, and then factor out the common powers that
# now appear as coefficient. We do this manually since terms_gcd pulls out
# fractions, terms_gcd(x+x*y/2) -> x*(y + 2)/2 and we don't want the 1/2;
# gcd won't pull out numerators from a fraction: gcd(3*x, 9*x/2) -> x but
# we want 3*x. Neither work with noncommutatives.
def nc_gcd(aa, bb):
a, b = [i.as_coeff_Mul() for i in [aa, bb]]
c = gcd(a[0], b[0]).as_numer_denom()[0]
g = Mul(*(a[1].args_cnc(cset=True)[0] & b[1].args_cnc(cset=True)[0]))
return _keep_coeff(c, g)
glogb = expand_log(log(b))
if glogb.is_Add:
args = glogb.args
g = reduce(nc_gcd, args)
if g != 1:
cg, rg = g.as_coeff_Mul()
glogb = _keep_coeff(cg, rg*Add(*[a/g for a in args]))
# now put the log back together again
if isinstance(glogb, log) or not glogb.is_Mul:
if glogb.args[0].is_Pow or isinstance(glogb.args[0], exp):
glogb = _denest_pow(glogb.args[0])
if (abs(glogb.exp) < 1) == True:
return Pow(glogb.base, glogb.exp*e)
return eq
# the log(b) was a Mul so join any adds with logcombine
add = []
other = []
for a in glogb.args:
if a.is_Add:
add.append(a)
else:
other.append(a)
return Pow(exp(logcombine(Mul(*add))), e*Mul(*other))
|
0bf150af2b5a340b38eeeac388b9b62ce263c692768c3b697e04659a984923ce | from sympy.core import Function, S, Mul, Pow, Add
from sympy.core.compatibility import ordered, default_sort_key
from sympy.core.function import count_ops, expand_func
from sympy.functions.combinatorial.factorials import binomial
from sympy.functions import gamma, sqrt, sin
from sympy.polys import factor, cancel
from sympy.utilities.iterables import sift, uniq
def gammasimp(expr):
r"""
Simplify expressions with gamma functions.
Explanation
===========
This function takes as input an expression containing gamma
functions or functions that can be rewritten in terms of gamma
functions and tries to minimize the number of those functions and
reduce the size of their arguments.
The algorithm works by rewriting all gamma functions as expressions
involving rising factorials (Pochhammer symbols) and applies
recurrence relations and other transformations applicable to rising
factorials, to reduce their arguments, possibly letting the resulting
rising factorial to cancel. Rising factorials with the second argument
being an integer are expanded into polynomial forms and finally all
other rising factorial are rewritten in terms of gamma functions.
Then the following two steps are performed.
1. Reduce the number of gammas by applying the reflection theorem
gamma(x)*gamma(1-x) == pi/sin(pi*x).
2. Reduce the number of gammas by applying the multiplication theorem
gamma(x)*gamma(x+1/n)*...*gamma(x+(n-1)/n) == C*gamma(n*x).
It then reduces the number of prefactors by absorbing them into gammas
where possible and expands gammas with rational argument.
All transformation rules can be found (or was derived from) here:
.. [1] http://functions.wolfram.com/GammaBetaErf/Pochhammer/17/01/02/
.. [2] http://functions.wolfram.com/GammaBetaErf/Pochhammer/27/01/0005/
Examples
========
>>> from sympy.simplify import gammasimp
>>> from sympy import gamma, Symbol
>>> from sympy.abc import x
>>> n = Symbol('n', integer = True)
>>> gammasimp(gamma(x)/gamma(x - 3))
(x - 3)*(x - 2)*(x - 1)
>>> gammasimp(gamma(n + 3))
gamma(n + 3)
"""
expr = expr.rewrite(gamma)
return _gammasimp(expr, as_comb = False)
def _gammasimp(expr, as_comb):
"""
Helper function for gammasimp and combsimp.
Explanation
===========
Simplifies expressions written in terms of gamma function. If
as_comb is True, it tries to preserve integer arguments. See
docstring of gammasimp for more information. This was part of
combsimp() in combsimp.py.
"""
expr = expr.replace(gamma,
lambda n: _rf(1, (n - 1).expand()))
if as_comb:
expr = expr.replace(_rf,
lambda a, b: gamma(b + 1))
else:
expr = expr.replace(_rf,
lambda a, b: gamma(a + b)/gamma(a))
def rule(n, k):
coeff, rewrite = S.One, False
cn, _n = n.as_coeff_Add()
if _n and cn.is_Integer and cn:
coeff *= _rf(_n + 1, cn)/_rf(_n - k + 1, cn)
rewrite = True
n = _n
# this sort of binomial has already been removed by
# rising factorials but is left here in case the order
# of rule application is changed
if k.is_Add:
ck, _k = k.as_coeff_Add()
if _k and ck.is_Integer and ck:
coeff *= _rf(n - ck - _k + 1, ck)/_rf(_k + 1, ck)
rewrite = True
k = _k
if count_ops(k) > count_ops(n - k):
rewrite = True
k = n - k
if rewrite:
return coeff*binomial(n, k)
expr = expr.replace(binomial, rule)
def rule_gamma(expr, level=0):
""" Simplify products of gamma functions further. """
if expr.is_Atom:
return expr
def gamma_rat(x):
# helper to simplify ratios of gammas
was = x.count(gamma)
xx = x.replace(gamma, lambda n: _rf(1, (n - 1).expand()
).replace(_rf, lambda a, b: gamma(a + b)/gamma(a)))
if xx.count(gamma) < was:
x = xx
return x
def gamma_factor(x):
# return True if there is a gamma factor in shallow args
if isinstance(x, gamma):
return True
if x.is_Add or x.is_Mul:
return any(gamma_factor(xi) for xi in x.args)
if x.is_Pow and (x.exp.is_integer or x.base.is_positive):
return gamma_factor(x.base)
return False
# recursion step
if level == 0:
expr = expr.func(*[rule_gamma(x, level + 1) for x in expr.args])
level += 1
if not expr.is_Mul:
return expr
# non-commutative step
if level == 1:
args, nc = expr.args_cnc()
if not args:
return expr
if nc:
return rule_gamma(Mul._from_args(args), level + 1)*Mul._from_args(nc)
level += 1
# pure gamma handling, not factor absorption
if level == 2:
T, F = sift(expr.args, gamma_factor, binary=True)
gamma_ind = Mul(*F)
d = Mul(*T)
nd, dd = d.as_numer_denom()
for ipass in range(2):
args = list(ordered(Mul.make_args(nd)))
for i, ni in enumerate(args):
if ni.is_Add:
ni, dd = Add(*[
rule_gamma(gamma_rat(a/dd), level + 1) for a in ni.args]
).as_numer_denom()
args[i] = ni
if not dd.has(gamma):
break
nd = Mul(*args)
if ipass == 0 and not gamma_factor(nd):
break
nd, dd = dd, nd # now process in reversed order
expr = gamma_ind*nd/dd
if not (expr.is_Mul and (gamma_factor(dd) or gamma_factor(nd))):
return expr
level += 1
# iteration until constant
if level == 3:
while True:
was = expr
expr = rule_gamma(expr, 4)
if expr == was:
return expr
numer_gammas = []
denom_gammas = []
numer_others = []
denom_others = []
def explicate(p):
if p is S.One:
return None, []
b, e = p.as_base_exp()
if e.is_Integer:
if isinstance(b, gamma):
return True, [b.args[0]]*e
else:
return False, [b]*e
else:
return False, [p]
newargs = list(ordered(expr.args))
while newargs:
n, d = newargs.pop().as_numer_denom()
isg, l = explicate(n)
if isg:
numer_gammas.extend(l)
elif isg is False:
numer_others.extend(l)
isg, l = explicate(d)
if isg:
denom_gammas.extend(l)
elif isg is False:
denom_others.extend(l)
# =========== level 2 work: pure gamma manipulation =========
if not as_comb:
# Try to reduce the number of gamma factors by applying the
# reflection formula gamma(x)*gamma(1-x) = pi/sin(pi*x)
for gammas, numer, denom in [(
numer_gammas, numer_others, denom_others),
(denom_gammas, denom_others, numer_others)]:
new = []
while gammas:
g1 = gammas.pop()
if g1.is_integer:
new.append(g1)
continue
for i, g2 in enumerate(gammas):
n = g1 + g2 - 1
if not n.is_Integer:
continue
numer.append(S.Pi)
denom.append(sin(S.Pi*g1))
gammas.pop(i)
if n > 0:
for k in range(n):
numer.append(1 - g1 + k)
elif n < 0:
for k in range(-n):
denom.append(-g1 - k)
break
else:
new.append(g1)
# /!\ updating IN PLACE
gammas[:] = new
# Try to reduce the number of gammas by using the duplication
# theorem to cancel an upper and lower: gamma(2*s)/gamma(s) =
# 2**(2*s + 1)/(4*sqrt(pi))*gamma(s + 1/2). Although this could
# be done with higher argument ratios like gamma(3*x)/gamma(x),
# this would not reduce the number of gammas as in this case.
for ng, dg, no, do in [(numer_gammas, denom_gammas, numer_others,
denom_others),
(denom_gammas, numer_gammas, denom_others,
numer_others)]:
while True:
for x in ng:
for y in dg:
n = x - 2*y
if n.is_Integer:
break
else:
continue
break
else:
break
ng.remove(x)
dg.remove(y)
if n > 0:
for k in range(n):
no.append(2*y + k)
elif n < 0:
for k in range(-n):
do.append(2*y - 1 - k)
ng.append(y + S.Half)
no.append(2**(2*y - 1))
do.append(sqrt(S.Pi))
# Try to reduce the number of gamma factors by applying the
# multiplication theorem (used when n gammas with args differing
# by 1/n mod 1 are encountered).
#
# run of 2 with args differing by 1/2
#
# >>> gammasimp(gamma(x)*gamma(x+S.Half))
# 2*sqrt(2)*2**(-2*x - 1/2)*sqrt(pi)*gamma(2*x)
#
# run of 3 args differing by 1/3 (mod 1)
#
# >>> gammasimp(gamma(x)*gamma(x+S(1)/3)*gamma(x+S(2)/3))
# 6*3**(-3*x - 1/2)*pi*gamma(3*x)
# >>> gammasimp(gamma(x)*gamma(x+S(1)/3)*gamma(x+S(5)/3))
# 2*3**(-3*x - 1/2)*pi*(3*x + 2)*gamma(3*x)
#
def _run(coeffs):
# find runs in coeffs such that the difference in terms (mod 1)
# of t1, t2, ..., tn is 1/n
u = list(uniq(coeffs))
for i in range(len(u)):
dj = ([((u[j] - u[i]) % 1, j) for j in range(i + 1, len(u))])
for one, j in dj:
if one.p == 1 and one.q != 1:
n = one.q
got = [i]
get = list(range(1, n))
for d, j in dj:
m = n*d
if m.is_Integer and m in get:
get.remove(m)
got.append(j)
if not get:
break
else:
continue
for i, j in enumerate(got):
c = u[j]
coeffs.remove(c)
got[i] = c
return one.q, got[0], got[1:]
def _mult_thm(gammas, numer, denom):
# pull off and analyze the leading coefficient from each gamma arg
# looking for runs in those Rationals
# expr -> coeff + resid -> rats[resid] = coeff
rats = {}
for g in gammas:
c, resid = g.as_coeff_Add()
rats.setdefault(resid, []).append(c)
# look for runs in Rationals for each resid
keys = sorted(rats, key=default_sort_key)
for resid in keys:
coeffs = list(sorted(rats[resid]))
new = []
while True:
run = _run(coeffs)
if run is None:
break
# process the sequence that was found:
# 1) convert all the gamma functions to have the right
# argument (could be off by an integer)
# 2) append the factors corresponding to the theorem
# 3) append the new gamma function
n, ui, other = run
# (1)
for u in other:
con = resid + u - 1
for k in range(int(u - ui)):
numer.append(con - k)
con = n*(resid + ui) # for (2) and (3)
# (2)
numer.append((2*S.Pi)**(S(n - 1)/2)*
n**(S.Half - con))
# (3)
new.append(con)
# restore resid to coeffs
rats[resid] = [resid + c for c in coeffs] + new
# rebuild the gamma arguments
g = []
for resid in keys:
g += rats[resid]
# /!\ updating IN PLACE
gammas[:] = g
for l, numer, denom in [(numer_gammas, numer_others, denom_others),
(denom_gammas, denom_others, numer_others)]:
_mult_thm(l, numer, denom)
# =========== level >= 2 work: factor absorption =========
if level >= 2:
# Try to absorb factors into the gammas: x*gamma(x) -> gamma(x + 1)
# and gamma(x)/(x - 1) -> gamma(x - 1)
# This code (in particular repeated calls to find_fuzzy) can be very
# slow.
def find_fuzzy(l, x):
if not l:
return
S1, T1 = compute_ST(x)
for y in l:
S2, T2 = inv[y]
if T1 != T2 or (not S1.intersection(S2) and
(S1 != set() or S2 != set())):
continue
# XXX we want some simplification (e.g. cancel or
# simplify) but no matter what it's slow.
a = len(cancel(x/y).free_symbols)
b = len(x.free_symbols)
c = len(y.free_symbols)
# TODO is there a better heuristic?
if a == 0 and (b > 0 or c > 0):
return y
# We thus try to avoid expensive calls by building the following
# "invariants": For every factor or gamma function argument
# - the set of free symbols S
# - the set of functional components T
# We will only try to absorb if T1==T2 and (S1 intersect S2 != emptyset
# or S1 == S2 == emptyset)
inv = {}
def compute_ST(expr):
if expr in inv:
return inv[expr]
return (expr.free_symbols, expr.atoms(Function).union(
{e.exp for e in expr.atoms(Pow)}))
def update_ST(expr):
inv[expr] = compute_ST(expr)
for expr in numer_gammas + denom_gammas + numer_others + denom_others:
update_ST(expr)
for gammas, numer, denom in [(
numer_gammas, numer_others, denom_others),
(denom_gammas, denom_others, numer_others)]:
new = []
while gammas:
g = gammas.pop()
cont = True
while cont:
cont = False
y = find_fuzzy(numer, g)
if y is not None:
numer.remove(y)
if y != g:
numer.append(y/g)
update_ST(y/g)
g += 1
cont = True
y = find_fuzzy(denom, g - 1)
if y is not None:
denom.remove(y)
if y != g - 1:
numer.append((g - 1)/y)
update_ST((g - 1)/y)
g -= 1
cont = True
new.append(g)
# /!\ updating IN PLACE
gammas[:] = new
# =========== rebuild expr ==================================
return Mul(*[gamma(g) for g in numer_gammas]) \
/ Mul(*[gamma(g) for g in denom_gammas]) \
* Mul(*numer_others) / Mul(*denom_others)
# (for some reason we cannot use Basic.replace in this case)
was = factor(expr)
expr = rule_gamma(was)
if expr != was:
expr = factor(expr)
expr = expr.replace(gamma,
lambda n: expand_func(gamma(n)) if n.is_Rational else gamma(n))
return expr
class _rf(Function):
@classmethod
def eval(cls, a, b):
if b.is_Integer:
if not b:
return S.One
n, result = int(b), S.One
if n > 0:
for i in range(n):
result *= a + i
return result
elif n < 0:
for i in range(1, -n + 1):
result *= a - i
return 1/result
else:
if b.is_Add:
c, _b = b.as_coeff_Add()
if c.is_Integer:
if c > 0:
return _rf(a, _b)*_rf(a + _b, c)
elif c < 0:
return _rf(a, _b)/_rf(a + _b + c, -c)
if a.is_Add:
c, _a = a.as_coeff_Add()
if c.is_Integer:
if c > 0:
return _rf(_a, b)*_rf(_a + b, c)/_rf(_a, c)
elif c < 0:
return _rf(_a, b)*_rf(_a + c, -c)/_rf(_a + b + c, -c)
|
e381b806646ed3ea8fd1280303b457cfb59c29dfc4986c44aaab050d544235dd | from collections import defaultdict
from sympy.core import (sympify, Basic, S, Expr, expand_mul, factor_terms,
Mul, Dummy, igcd, FunctionClass, Add, symbols, Wild, expand)
from sympy.core.cache import cacheit
from sympy.core.compatibility import reduce, iterable, SYMPY_INTS
from sympy.core.function import count_ops, _mexpand
from sympy.core.numbers import I, Integer
from sympy.functions import sin, cos, exp, cosh, tanh, sinh, tan, cot, coth
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.polys import Poly, factor, cancel, parallel_poly_from_expr
from sympy.polys.domains import ZZ
from sympy.polys.polyerrors import PolificationFailed
from sympy.polys.polytools import groebner
from sympy.simplify.cse_main import cse
from sympy.strategies.core import identity
from sympy.strategies.tree import greedy
from sympy.utilities.misc import debug
def trigsimp_groebner(expr, hints=[], quick=False, order="grlex",
polynomial=False):
"""
Simplify trigonometric expressions using a groebner basis algorithm.
Explanation
===========
This routine takes a fraction involving trigonometric or hyperbolic
expressions, and tries to simplify it. The primary metric is the
total degree. Some attempts are made to choose the simplest possible
expression of the minimal degree, but this is non-rigorous, and also
very slow (see the ``quick=True`` option).
If ``polynomial`` is set to True, instead of simplifying numerator and
denominator together, this function just brings numerator and denominator
into a canonical form. This is much faster, but has potentially worse
results. However, if the input is a polynomial, then the result is
guaranteed to be an equivalent polynomial of minimal degree.
The most important option is hints. Its entries can be any of the
following:
- a natural number
- a function
- an iterable of the form (func, var1, var2, ...)
- anything else, interpreted as a generator
A number is used to indicate that the search space should be increased.
A function is used to indicate that said function is likely to occur in a
simplified expression.
An iterable is used indicate that func(var1 + var2 + ...) is likely to
occur in a simplified .
An additional generator also indicates that it is likely to occur.
(See examples below).
This routine carries out various computationally intensive algorithms.
The option ``quick=True`` can be used to suppress one particularly slow
step (at the expense of potentially more complicated results, but never at
the expense of increased total degree).
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, tan, cos, sinh, cosh, tanh
>>> from sympy.simplify.trigsimp import trigsimp_groebner
Suppose you want to simplify ``sin(x)*cos(x)``. Naively, nothing happens:
>>> ex = sin(x)*cos(x)
>>> trigsimp_groebner(ex)
sin(x)*cos(x)
This is because ``trigsimp_groebner`` only looks for a simplification
involving just ``sin(x)`` and ``cos(x)``. You can tell it to also try
``2*x`` by passing ``hints=[2]``:
>>> trigsimp_groebner(ex, hints=[2])
sin(2*x)/2
>>> trigsimp_groebner(sin(x)**2 - cos(x)**2, hints=[2])
-cos(2*x)
Increasing the search space this way can quickly become expensive. A much
faster way is to give a specific expression that is likely to occur:
>>> trigsimp_groebner(ex, hints=[sin(2*x)])
sin(2*x)/2
Hyperbolic expressions are similarly supported:
>>> trigsimp_groebner(sinh(2*x)/sinh(x))
2*cosh(x)
Note how no hints had to be passed, since the expression already involved
``2*x``.
The tangent function is also supported. You can either pass ``tan`` in the
hints, to indicate that tan should be tried whenever cosine or sine are,
or you can pass a specific generator:
>>> trigsimp_groebner(sin(x)/cos(x), hints=[tan])
tan(x)
>>> trigsimp_groebner(sinh(x)/cosh(x), hints=[tanh(x)])
tanh(x)
Finally, you can use the iterable form to suggest that angle sum formulae
should be tried:
>>> ex = (tan(x) + tan(y))/(1 - tan(x)*tan(y))
>>> trigsimp_groebner(ex, hints=[(tan, x, y)])
tan(x + y)
"""
# TODO
# - preprocess by replacing everything by funcs we can handle
# - optionally use cot instead of tan
# - more intelligent hinting.
# For example, if the ideal is small, and we have sin(x), sin(y),
# add sin(x + y) automatically... ?
# - algebraic numbers ...
# - expressions of lowest degree are not distinguished properly
# e.g. 1 - sin(x)**2
# - we could try to order the generators intelligently, so as to influence
# which monomials appear in the quotient basis
# THEORY
# ------
# Ratsimpmodprime above can be used to "simplify" a rational function
# modulo a prime ideal. "Simplify" mainly means finding an equivalent
# expression of lower total degree.
#
# We intend to use this to simplify trigonometric functions. To do that,
# we need to decide (a) which ring to use, and (b) modulo which ideal to
# simplify. In practice, (a) means settling on a list of "generators"
# a, b, c, ..., such that the fraction we want to simplify is a rational
# function in a, b, c, ..., with coefficients in ZZ (integers).
# (2) means that we have to decide what relations to impose on the
# generators. There are two practical problems:
# (1) The ideal has to be *prime* (a technical term).
# (2) The relations have to be polynomials in the generators.
#
# We typically have two kinds of generators:
# - trigonometric expressions, like sin(x), cos(5*x), etc
# - "everything else", like gamma(x), pi, etc.
#
# Since this function is trigsimp, we will concentrate on what to do with
# trigonometric expressions. We can also simplify hyperbolic expressions,
# but the extensions should be clear.
#
# One crucial point is that all *other* generators really should behave
# like indeterminates. In particular if (say) "I" is one of them, then
# in fact I**2 + 1 = 0 and we may and will compute non-sensical
# expressions. However, we can work with a dummy and add the relation
# I**2 + 1 = 0 to our ideal, then substitute back in the end.
#
# Now regarding trigonometric generators. We split them into groups,
# according to the argument of the trigonometric functions. We want to
# organise this in such a way that most trigonometric identities apply in
# the same group. For example, given sin(x), cos(2*x) and cos(y), we would
# group as [sin(x), cos(2*x)] and [cos(y)].
#
# Our prime ideal will be built in three steps:
# (1) For each group, compute a "geometrically prime" ideal of relations.
# Geometrically prime means that it generates a prime ideal in
# CC[gens], not just ZZ[gens].
# (2) Take the union of all the generators of the ideals for all groups.
# By the geometric primality condition, this is still prime.
# (3) Add further inter-group relations which preserve primality.
#
# Step (1) works as follows. We will isolate common factors in the
# argument, so that all our generators are of the form sin(n*x), cos(n*x)
# or tan(n*x), with n an integer. Suppose first there are no tan terms.
# The ideal [sin(x)**2 + cos(x)**2 - 1] is geometrically prime, since
# X**2 + Y**2 - 1 is irreducible over CC.
# Now, if we have a generator sin(n*x), than we can, using trig identities,
# express sin(n*x) as a polynomial in sin(x) and cos(x). We can add this
# relation to the ideal, preserving geometric primality, since the quotient
# ring is unchanged.
# Thus we have treated all sin and cos terms.
# For tan(n*x), we add a relation tan(n*x)*cos(n*x) - sin(n*x) = 0.
# (This requires of course that we already have relations for cos(n*x) and
# sin(n*x).) It is not obvious, but it seems that this preserves geometric
# primality.
# XXX A real proof would be nice. HELP!
# Sketch that <S**2 + C**2 - 1, C*T - S> is a prime ideal of
# CC[S, C, T]:
# - it suffices to show that the projective closure in CP**3 is
# irreducible
# - using the half-angle substitutions, we can express sin(x), tan(x),
# cos(x) as rational functions in tan(x/2)
# - from this, we get a rational map from CP**1 to our curve
# - this is a morphism, hence the curve is prime
#
# Step (2) is trivial.
#
# Step (3) works by adding selected relations of the form
# sin(x + y) - sin(x)*cos(y) - sin(y)*cos(x), etc. Geometric primality is
# preserved by the same argument as before.
def parse_hints(hints):
"""Split hints into (n, funcs, iterables, gens)."""
n = 1
funcs, iterables, gens = [], [], []
for e in hints:
if isinstance(e, (SYMPY_INTS, Integer)):
n = e
elif isinstance(e, FunctionClass):
funcs.append(e)
elif iterable(e):
iterables.append((e[0], e[1:]))
# XXX sin(x+2y)?
# Note: we go through polys so e.g.
# sin(-x) -> -sin(x) -> sin(x)
gens.extend(parallel_poly_from_expr(
[e[0](x) for x in e[1:]] + [e[0](Add(*e[1:]))])[1].gens)
else:
gens.append(e)
return n, funcs, iterables, gens
def build_ideal(x, terms):
"""
Build generators for our ideal. ``Terms`` is an iterable with elements of
the form (fn, coeff), indicating that we have a generator fn(coeff*x).
If any of the terms is trigonometric, sin(x) and cos(x) are guaranteed
to appear in terms. Similarly for hyperbolic functions. For tan(n*x),
sin(n*x) and cos(n*x) are guaranteed.
"""
I = []
y = Dummy('y')
for fn, coeff in terms:
for c, s, t, rel in (
[cos, sin, tan, cos(x)**2 + sin(x)**2 - 1],
[cosh, sinh, tanh, cosh(x)**2 - sinh(x)**2 - 1]):
if coeff == 1 and fn in [c, s]:
I.append(rel)
elif fn == t:
I.append(t(coeff*x)*c(coeff*x) - s(coeff*x))
elif fn in [c, s]:
cn = fn(coeff*y).expand(trig=True).subs(y, x)
I.append(fn(coeff*x) - cn)
return list(set(I))
def analyse_gens(gens, hints):
"""
Analyse the generators ``gens``, using the hints ``hints``.
The meaning of ``hints`` is described in the main docstring.
Return a new list of generators, and also the ideal we should
work with.
"""
# First parse the hints
n, funcs, iterables, extragens = parse_hints(hints)
debug('n=%s' % n, 'funcs:', funcs, 'iterables:',
iterables, 'extragens:', extragens)
# We just add the extragens to gens and analyse them as before
gens = list(gens)
gens.extend(extragens)
# remove duplicates
funcs = list(set(funcs))
iterables = list(set(iterables))
gens = list(set(gens))
# all the functions we can do anything with
allfuncs = {sin, cos, tan, sinh, cosh, tanh}
# sin(3*x) -> ((3, x), sin)
trigterms = [(g.args[0].as_coeff_mul(), g.func) for g in gens
if g.func in allfuncs]
# Our list of new generators - start with anything that we cannot
# work with (i.e. is not a trigonometric term)
freegens = [g for g in gens if g.func not in allfuncs]
newgens = []
trigdict = {}
for (coeff, var), fn in trigterms:
trigdict.setdefault(var, []).append((coeff, fn))
res = [] # the ideal
for key, val in trigdict.items():
# We have now assembeled a dictionary. Its keys are common
# arguments in trigonometric expressions, and values are lists of
# pairs (fn, coeff). x0, (fn, coeff) in trigdict means that we
# need to deal with fn(coeff*x0). We take the rational gcd of the
# coeffs, call it ``gcd``. We then use x = x0/gcd as "base symbol",
# all other arguments are integral multiples thereof.
# We will build an ideal which works with sin(x), cos(x).
# If hint tan is provided, also work with tan(x). Moreover, if
# n > 1, also work with sin(k*x) for k <= n, and similarly for cos
# (and tan if the hint is provided). Finally, any generators which
# the ideal does not work with but we need to accommodate (either
# because it was in expr or because it was provided as a hint)
# we also build into the ideal.
# This selection process is expressed in the list ``terms``.
# build_ideal then generates the actual relations in our ideal,
# from this list.
fns = [x[1] for x in val]
val = [x[0] for x in val]
gcd = reduce(igcd, val)
terms = [(fn, v/gcd) for (fn, v) in zip(fns, val)]
fs = set(funcs + fns)
for c, s, t in ([cos, sin, tan], [cosh, sinh, tanh]):
if any(x in fs for x in (c, s, t)):
fs.add(c)
fs.add(s)
for fn in fs:
for k in range(1, n + 1):
terms.append((fn, k))
extra = []
for fn, v in terms:
if fn == tan:
extra.append((sin, v))
extra.append((cos, v))
if fn in [sin, cos] and tan in fs:
extra.append((tan, v))
if fn == tanh:
extra.append((sinh, v))
extra.append((cosh, v))
if fn in [sinh, cosh] and tanh in fs:
extra.append((tanh, v))
terms.extend(extra)
x = gcd*Mul(*key)
r = build_ideal(x, terms)
res.extend(r)
newgens.extend({fn(v*x) for fn, v in terms})
# Add generators for compound expressions from iterables
for fn, args in iterables:
if fn == tan:
# Tan expressions are recovered from sin and cos.
iterables.extend([(sin, args), (cos, args)])
elif fn == tanh:
# Tanh expressions are recovered from sihn and cosh.
iterables.extend([(sinh, args), (cosh, args)])
else:
dummys = symbols('d:%i' % len(args), cls=Dummy)
expr = fn( Add(*dummys)).expand(trig=True).subs(list(zip(dummys, args)))
res.append(fn(Add(*args)) - expr)
if myI in gens:
res.append(myI**2 + 1)
freegens.remove(myI)
newgens.append(myI)
return res, freegens, newgens
myI = Dummy('I')
expr = expr.subs(S.ImaginaryUnit, myI)
subs = [(myI, S.ImaginaryUnit)]
num, denom = cancel(expr).as_numer_denom()
try:
(pnum, pdenom), opt = parallel_poly_from_expr([num, denom])
except PolificationFailed:
return expr
debug('initial gens:', opt.gens)
ideal, freegens, gens = analyse_gens(opt.gens, hints)
debug('ideal:', ideal)
debug('new gens:', gens, " -- len", len(gens))
debug('free gens:', freegens, " -- len", len(gens))
# NOTE we force the domain to be ZZ to stop polys from injecting generators
# (which is usually a sign of a bug in the way we build the ideal)
if not gens:
return expr
G = groebner(ideal, order=order, gens=gens, domain=ZZ)
debug('groebner basis:', list(G), " -- len", len(G))
# If our fraction is a polynomial in the free generators, simplify all
# coefficients separately:
from sympy.simplify.ratsimp import ratsimpmodprime
if freegens and pdenom.has_only_gens(*set(gens).intersection(pdenom.gens)):
num = Poly(num, gens=gens+freegens).eject(*gens)
res = []
for monom, coeff in num.terms():
ourgens = set(parallel_poly_from_expr([coeff, denom])[1].gens)
# We compute the transitive closure of all generators that can
# be reached from our generators through relations in the ideal.
changed = True
while changed:
changed = False
for p in ideal:
p = Poly(p)
if not ourgens.issuperset(p.gens) and \
not p.has_only_gens(*set(p.gens).difference(ourgens)):
changed = True
ourgens.update(p.exclude().gens)
# NOTE preserve order!
realgens = [x for x in gens if x in ourgens]
# The generators of the ideal have now been (implicitly) split
# into two groups: those involving ourgens and those that don't.
# Since we took the transitive closure above, these two groups
# live in subgrings generated by a *disjoint* set of variables.
# Any sensible groebner basis algorithm will preserve this disjoint
# structure (i.e. the elements of the groebner basis can be split
# similarly), and and the two subsets of the groebner basis then
# form groebner bases by themselves. (For the smaller generating
# sets, of course.)
ourG = [g.as_expr() for g in G.polys if
g.has_only_gens(*ourgens.intersection(g.gens))]
res.append(Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, ourG, order=order,
gens=realgens, quick=quick, domain=ZZ,
polynomial=polynomial).subs(subs))
return Add(*res)
# NOTE The following is simpler and has less assumptions on the
# groebner basis algorithm. If the above turns out to be broken,
# use this.
return Add(*[Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, list(G), order=order,
gens=gens, quick=quick, domain=ZZ)
for monom, coeff in num.terms()])
else:
return ratsimpmodprime(
expr, list(G), order=order, gens=freegens+gens,
quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)
_trigs = (TrigonometricFunction, HyperbolicFunction)
def trigsimp(expr, **opts):
"""
reduces expression by using known trig identities
Explanation
===========
method:
- Determine the method to use. Valid choices are 'matching' (default),
'groebner', 'combined', and 'fu'. If 'matching', simplify the
expression recursively by targeting common patterns. If 'groebner', apply
an experimental groebner basis algorithm. In this case further options
are forwarded to ``trigsimp_groebner``, please refer to its docstring.
If 'combined', first run the groebner basis algorithm with small
default parameters, then run the 'matching' algorithm. 'fu' runs the
collection of trigonometric transformations described by Fu, et al.
(see the `fu` docstring).
Examples
========
>>> from sympy import trigsimp, sin, cos, log
>>> from sympy.abc import x
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e)
2
Simplification occurs wherever trigonometric functions are located.
>>> trigsimp(log(e))
log(2)
Using `method="groebner"` (or `"combined"`) might lead to greater
simplification.
The old trigsimp routine can be accessed as with method 'old'.
>>> from sympy import coth, tanh
>>> t = 3*tanh(x)**7 - 2/coth(x)**7
>>> trigsimp(t, method='old') == t
True
>>> trigsimp(t)
tanh(x)**7
"""
from sympy.simplify.fu import fu
expr = sympify(expr)
_eval_trigsimp = getattr(expr, '_eval_trigsimp', None)
if _eval_trigsimp is not None:
return _eval_trigsimp(**opts)
old = opts.pop('old', False)
if not old:
opts.pop('deep', None)
opts.pop('recursive', None)
method = opts.pop('method', 'matching')
else:
method = 'old'
def groebnersimp(ex, **opts):
def traverse(e):
if e.is_Atom:
return e
args = [traverse(x) for x in e.args]
if e.is_Function or e.is_Pow:
args = [trigsimp_groebner(x, **opts) for x in args]
return e.func(*args)
new = traverse(ex)
if not isinstance(new, Expr):
return new
return trigsimp_groebner(new, **opts)
trigsimpfunc = {
'fu': (lambda x: fu(x, **opts)),
'matching': (lambda x: futrig(x)),
'groebner': (lambda x: groebnersimp(x, **opts)),
'combined': (lambda x: futrig(groebnersimp(x,
polynomial=True, hints=[2, tan]))),
'old': lambda x: trigsimp_old(x, **opts),
}[method]
return trigsimpfunc(expr)
def exptrigsimp(expr):
"""
Simplifies exponential / trigonometric / hyperbolic functions.
Examples
========
>>> from sympy import exptrigsimp, exp, cosh, sinh
>>> from sympy.abc import z
>>> exptrigsimp(exp(z) + exp(-z))
2*cosh(z)
>>> exptrigsimp(cosh(z) - sinh(z))
exp(-z)
"""
from sympy.simplify.fu import hyper_as_trig, TR2i
from sympy.simplify.simplify import bottom_up
def exp_trig(e):
# select the better of e, and e rewritten in terms of exp or trig
# functions
choices = [e]
if e.has(*_trigs):
choices.append(e.rewrite(exp))
choices.append(e.rewrite(cos))
return min(*choices, key=count_ops)
newexpr = bottom_up(expr, exp_trig)
def f(rv):
if not rv.is_Mul:
return rv
commutative_part, noncommutative_part = rv.args_cnc()
# Since as_powers_dict loses order information,
# if there is more than one noncommutative factor,
# it should only be used to simplify the commutative part.
if (len(noncommutative_part) > 1):
return f(Mul(*commutative_part))*Mul(*noncommutative_part)
rvd = rv.as_powers_dict()
newd = rvd.copy()
def signlog(expr, sign=1):
if expr is S.Exp1:
return sign, 1
elif isinstance(expr, exp):
return sign, expr.args[0]
elif sign == 1:
return signlog(-expr, sign=-1)
else:
return None, None
ee = rvd[S.Exp1]
for k in rvd:
if k.is_Add and len(k.args) == 2:
# k == c*(1 + sign*E**x)
c = k.args[0]
sign, x = signlog(k.args[1]/c)
if not x:
continue
m = rvd[k]
newd[k] -= m
if ee == -x*m/2:
# sinh and cosh
newd[S.Exp1] -= ee
ee = 0
if sign == 1:
newd[2*c*cosh(x/2)] += m
else:
newd[-2*c*sinh(x/2)] += m
elif newd[1 - sign*S.Exp1**x] == -m:
# tanh
del newd[1 - sign*S.Exp1**x]
if sign == 1:
newd[-c/tanh(x/2)] += m
else:
newd[-c*tanh(x/2)] += m
else:
newd[1 + sign*S.Exp1**x] += m
newd[c] += m
return Mul(*[k**newd[k] for k in newd])
newexpr = bottom_up(newexpr, f)
# sin/cos and sinh/cosh ratios to tan and tanh, respectively
if newexpr.has(HyperbolicFunction):
e, f = hyper_as_trig(newexpr)
newexpr = f(TR2i(e))
if newexpr.has(TrigonometricFunction):
newexpr = TR2i(newexpr)
# can we ever generate an I where there was none previously?
if not (newexpr.has(I) and not expr.has(I)):
expr = newexpr
return expr
#-------------------- the old trigsimp routines ---------------------
def trigsimp_old(expr, *, first=True, **opts):
"""
Reduces expression by using known trig identities.
Notes
=====
deep:
- Apply trigsimp inside all objects with arguments
recursive:
- Use common subexpression elimination (cse()) and apply
trigsimp recursively (this is quite expensive if the
expression is large)
method:
- Determine the method to use. Valid choices are 'matching' (default),
'groebner', 'combined', 'fu' and 'futrig'. If 'matching', simplify the
expression recursively by pattern matching. If 'groebner', apply an
experimental groebner basis algorithm. In this case further options
are forwarded to ``trigsimp_groebner``, please refer to its docstring.
If 'combined', first run the groebner basis algorithm with small
default parameters, then run the 'matching' algorithm. 'fu' runs the
collection of trigonometric transformations described by Fu, et al.
(see the `fu` docstring) while `futrig` runs a subset of Fu-transforms
that mimic the behavior of `trigsimp`.
compare:
- show input and output from `trigsimp` and `futrig` when different,
but returns the `trigsimp` value.
Examples
========
>>> from sympy import trigsimp, sin, cos, log, cot
>>> from sympy.abc import x
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e, old=True)
2
>>> trigsimp(log(e), old=True)
log(2*sin(x)**2 + 2*cos(x)**2)
>>> trigsimp(log(e), deep=True, old=True)
log(2)
Using `method="groebner"` (or `"combined"`) can sometimes lead to a lot
more simplification:
>>> e = (-sin(x) + 1)/cos(x) + cos(x)/(-sin(x) + 1)
>>> trigsimp(e, old=True)
(1 - sin(x))/cos(x) + cos(x)/(1 - sin(x))
>>> trigsimp(e, method="groebner", old=True)
2/cos(x)
>>> trigsimp(1/cot(x)**2, compare=True, old=True)
futrig: tan(x)**2
cot(x)**(-2)
"""
old = expr
if first:
if not expr.has(*_trigs):
return expr
trigsyms = set().union(*[t.free_symbols for t in expr.atoms(*_trigs)])
if len(trigsyms) > 1:
from sympy.simplify.simplify import separatevars
d = separatevars(expr)
if d.is_Mul:
d = separatevars(d, dict=True) or d
if isinstance(d, dict):
expr = 1
for k, v in d.items():
# remove hollow factoring
was = v
v = expand_mul(v)
opts['first'] = False
vnew = trigsimp(v, **opts)
if vnew == v:
vnew = was
expr *= vnew
old = expr
else:
if d.is_Add:
for s in trigsyms:
r, e = expr.as_independent(s)
if r:
opts['first'] = False
expr = r + trigsimp(e, **opts)
if not expr.is_Add:
break
old = expr
recursive = opts.pop('recursive', False)
deep = opts.pop('deep', False)
method = opts.pop('method', 'matching')
def groebnersimp(ex, deep, **opts):
def traverse(e):
if e.is_Atom:
return e
args = [traverse(x) for x in e.args]
if e.is_Function or e.is_Pow:
args = [trigsimp_groebner(x, **opts) for x in args]
return e.func(*args)
if deep:
ex = traverse(ex)
return trigsimp_groebner(ex, **opts)
trigsimpfunc = {
'matching': (lambda x, d: _trigsimp(x, d)),
'groebner': (lambda x, d: groebnersimp(x, d, **opts)),
'combined': (lambda x, d: _trigsimp(groebnersimp(x,
d, polynomial=True, hints=[2, tan]),
d))
}[method]
if recursive:
w, g = cse(expr)
g = trigsimpfunc(g[0], deep)
for sub in reversed(w):
g = g.subs(sub[0], sub[1])
g = trigsimpfunc(g, deep)
result = g
else:
result = trigsimpfunc(expr, deep)
if opts.get('compare', False):
f = futrig(old)
if f != result:
print('\tfutrig:', f)
return result
def _dotrig(a, b):
"""Helper to tell whether ``a`` and ``b`` have the same sorts
of symbols in them -- no need to test hyperbolic patterns against
expressions that have no hyperbolics in them."""
return a.func == b.func and (
a.has(TrigonometricFunction) and b.has(TrigonometricFunction) or
a.has(HyperbolicFunction) and b.has(HyperbolicFunction))
_trigpat = None
def _trigpats():
global _trigpat
a, b, c = symbols('a b c', cls=Wild)
d = Wild('d', commutative=False)
# for the simplifications like sinh/cosh -> tanh:
# DO NOT REORDER THE FIRST 14 since these are assumed to be in this
# order in _match_div_rewrite.
matchers_division = (
(a*sin(b)**c/cos(b)**c, a*tan(b)**c, sin(b), cos(b)),
(a*tan(b)**c*cos(b)**c, a*sin(b)**c, sin(b), cos(b)),
(a*cot(b)**c*sin(b)**c, a*cos(b)**c, sin(b), cos(b)),
(a*tan(b)**c/sin(b)**c, a/cos(b)**c, sin(b), cos(b)),
(a*cot(b)**c/cos(b)**c, a/sin(b)**c, sin(b), cos(b)),
(a*cot(b)**c*tan(b)**c, a, sin(b), cos(b)),
(a*(cos(b) + 1)**c*(cos(b) - 1)**c,
a*(-sin(b)**2)**c, cos(b) + 1, cos(b) - 1),
(a*(sin(b) + 1)**c*(sin(b) - 1)**c,
a*(-cos(b)**2)**c, sin(b) + 1, sin(b) - 1),
(a*sinh(b)**c/cosh(b)**c, a*tanh(b)**c, S.One, S.One),
(a*tanh(b)**c*cosh(b)**c, a*sinh(b)**c, S.One, S.One),
(a*coth(b)**c*sinh(b)**c, a*cosh(b)**c, S.One, S.One),
(a*tanh(b)**c/sinh(b)**c, a/cosh(b)**c, S.One, S.One),
(a*coth(b)**c/cosh(b)**c, a/sinh(b)**c, S.One, S.One),
(a*coth(b)**c*tanh(b)**c, a, S.One, S.One),
(c*(tanh(a) + tanh(b))/(1 + tanh(a)*tanh(b)),
tanh(a + b)*c, S.One, S.One),
)
matchers_add = (
(c*sin(a)*cos(b) + c*cos(a)*sin(b) + d, sin(a + b)*c + d),
(c*cos(a)*cos(b) - c*sin(a)*sin(b) + d, cos(a + b)*c + d),
(c*sin(a)*cos(b) - c*cos(a)*sin(b) + d, sin(a - b)*c + d),
(c*cos(a)*cos(b) + c*sin(a)*sin(b) + d, cos(a - b)*c + d),
(c*sinh(a)*cosh(b) + c*sinh(b)*cosh(a) + d, sinh(a + b)*c + d),
(c*cosh(a)*cosh(b) + c*sinh(a)*sinh(b) + d, cosh(a + b)*c + d),
)
# for cos(x)**2 + sin(x)**2 -> 1
matchers_identity = (
(a*sin(b)**2, a - a*cos(b)**2),
(a*tan(b)**2, a*(1/cos(b))**2 - a),
(a*cot(b)**2, a*(1/sin(b))**2 - a),
(a*sin(b + c), a*(sin(b)*cos(c) + sin(c)*cos(b))),
(a*cos(b + c), a*(cos(b)*cos(c) - sin(b)*sin(c))),
(a*tan(b + c), a*((tan(b) + tan(c))/(1 - tan(b)*tan(c)))),
(a*sinh(b)**2, a*cosh(b)**2 - a),
(a*tanh(b)**2, a - a*(1/cosh(b))**2),
(a*coth(b)**2, a + a*(1/sinh(b))**2),
(a*sinh(b + c), a*(sinh(b)*cosh(c) + sinh(c)*cosh(b))),
(a*cosh(b + c), a*(cosh(b)*cosh(c) + sinh(b)*sinh(c))),
(a*tanh(b + c), a*((tanh(b) + tanh(c))/(1 + tanh(b)*tanh(c)))),
)
# Reduce any lingering artifacts, such as sin(x)**2 changing
# to 1-cos(x)**2 when sin(x)**2 was "simpler"
artifacts = (
(a - a*cos(b)**2 + c, a*sin(b)**2 + c, cos),
(a - a*(1/cos(b))**2 + c, -a*tan(b)**2 + c, cos),
(a - a*(1/sin(b))**2 + c, -a*cot(b)**2 + c, sin),
(a - a*cosh(b)**2 + c, -a*sinh(b)**2 + c, cosh),
(a - a*(1/cosh(b))**2 + c, a*tanh(b)**2 + c, cosh),
(a + a*(1/sinh(b))**2 + c, a*coth(b)**2 + c, sinh),
# same as above but with noncommutative prefactor
(a*d - a*d*cos(b)**2 + c, a*d*sin(b)**2 + c, cos),
(a*d - a*d*(1/cos(b))**2 + c, -a*d*tan(b)**2 + c, cos),
(a*d - a*d*(1/sin(b))**2 + c, -a*d*cot(b)**2 + c, sin),
(a*d - a*d*cosh(b)**2 + c, -a*d*sinh(b)**2 + c, cosh),
(a*d - a*d*(1/cosh(b))**2 + c, a*d*tanh(b)**2 + c, cosh),
(a*d + a*d*(1/sinh(b))**2 + c, a*d*coth(b)**2 + c, sinh),
)
_trigpat = (a, b, c, d, matchers_division, matchers_add,
matchers_identity, artifacts)
return _trigpat
def _replace_mul_fpowxgpow(expr, f, g, rexp, h, rexph):
"""Helper for _match_div_rewrite.
Replace f(b_)**c_*g(b_)**(rexp(c_)) with h(b)**rexph(c) if f(b_)
and g(b_) are both positive or if c_ is an integer.
"""
# assert expr.is_Mul and expr.is_commutative and f != g
fargs = defaultdict(int)
gargs = defaultdict(int)
args = []
for x in expr.args:
if x.is_Pow or x.func in (f, g):
b, e = x.as_base_exp()
if b.is_positive or e.is_integer:
if b.func == f:
fargs[b.args[0]] += e
continue
elif b.func == g:
gargs[b.args[0]] += e
continue
args.append(x)
common = set(fargs) & set(gargs)
hit = False
while common:
key = common.pop()
fe = fargs.pop(key)
ge = gargs.pop(key)
if fe == rexp(ge):
args.append(h(key)**rexph(fe))
hit = True
else:
fargs[key] = fe
gargs[key] = ge
if not hit:
return expr
while fargs:
key, e = fargs.popitem()
args.append(f(key)**e)
while gargs:
key, e = gargs.popitem()
args.append(g(key)**e)
return Mul(*args)
_idn = lambda x: x
_midn = lambda x: -x
_one = lambda x: S.One
def _match_div_rewrite(expr, i):
"""helper for __trigsimp"""
if i == 0:
expr = _replace_mul_fpowxgpow(expr, sin, cos,
_midn, tan, _idn)
elif i == 1:
expr = _replace_mul_fpowxgpow(expr, tan, cos,
_idn, sin, _idn)
elif i == 2:
expr = _replace_mul_fpowxgpow(expr, cot, sin,
_idn, cos, _idn)
elif i == 3:
expr = _replace_mul_fpowxgpow(expr, tan, sin,
_midn, cos, _midn)
elif i == 4:
expr = _replace_mul_fpowxgpow(expr, cot, cos,
_midn, sin, _midn)
elif i == 5:
expr = _replace_mul_fpowxgpow(expr, cot, tan,
_idn, _one, _idn)
# i in (6, 7) is skipped
elif i == 8:
expr = _replace_mul_fpowxgpow(expr, sinh, cosh,
_midn, tanh, _idn)
elif i == 9:
expr = _replace_mul_fpowxgpow(expr, tanh, cosh,
_idn, sinh, _idn)
elif i == 10:
expr = _replace_mul_fpowxgpow(expr, coth, sinh,
_idn, cosh, _idn)
elif i == 11:
expr = _replace_mul_fpowxgpow(expr, tanh, sinh,
_midn, cosh, _midn)
elif i == 12:
expr = _replace_mul_fpowxgpow(expr, coth, cosh,
_midn, sinh, _midn)
elif i == 13:
expr = _replace_mul_fpowxgpow(expr, coth, tanh,
_idn, _one, _idn)
else:
return None
return expr
def _trigsimp(expr, deep=False):
# protect the cache from non-trig patterns; we only allow
# trig patterns to enter the cache
if expr.has(*_trigs):
return __trigsimp(expr, deep)
return expr
@cacheit
def __trigsimp(expr, deep=False):
"""recursive helper for trigsimp"""
from sympy.simplify.fu import TR10i
if _trigpat is None:
_trigpats()
a, b, c, d, matchers_division, matchers_add, \
matchers_identity, artifacts = _trigpat
if expr.is_Mul:
# do some simplifications like sin/cos -> tan:
if not expr.is_commutative:
com, nc = expr.args_cnc()
expr = _trigsimp(Mul._from_args(com), deep)*Mul._from_args(nc)
else:
for i, (pattern, simp, ok1, ok2) in enumerate(matchers_division):
if not _dotrig(expr, pattern):
continue
newexpr = _match_div_rewrite(expr, i)
if newexpr is not None:
if newexpr != expr:
expr = newexpr
break
else:
continue
# use SymPy matching instead
res = expr.match(pattern)
if res and res.get(c, 0):
if not res[c].is_integer:
ok = ok1.subs(res)
if not ok.is_positive:
continue
ok = ok2.subs(res)
if not ok.is_positive:
continue
# if "a" contains any of trig or hyperbolic funcs with
# argument "b" then skip the simplification
if any(w.args[0] == res[b] for w in res[a].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
# simplify and finish:
expr = simp.subs(res)
break # process below
if expr.is_Add:
args = []
for term in expr.args:
if not term.is_commutative:
com, nc = term.args_cnc()
nc = Mul._from_args(nc)
term = Mul._from_args(com)
else:
nc = S.One
term = _trigsimp(term, deep)
for pattern, result in matchers_identity:
res = term.match(pattern)
if res is not None:
term = result.subs(res)
break
args.append(term*nc)
if args != expr.args:
expr = Add(*args)
expr = min(expr, expand(expr), key=count_ops)
if expr.is_Add:
for pattern, result in matchers_add:
if not _dotrig(expr, pattern):
continue
expr = TR10i(expr)
if expr.has(HyperbolicFunction):
res = expr.match(pattern)
# if "d" contains any trig or hyperbolic funcs with
# argument "a" or "b" then skip the simplification;
# this isn't perfect -- see tests
if res is None or not (a in res and b in res) or any(
w.args[0] in (res[a], res[b]) for w in res[d].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
expr = result.subs(res)
break
# Reduce any lingering artifacts, such as sin(x)**2 changing
# to 1 - cos(x)**2 when sin(x)**2 was "simpler"
for pattern, result, ex in artifacts:
if not _dotrig(expr, pattern):
continue
# Substitute a new wild that excludes some function(s)
# to help influence a better match. This is because
# sometimes, for example, 'a' would match sec(x)**2
a_t = Wild('a', exclude=[ex])
pattern = pattern.subs(a, a_t)
result = result.subs(a, a_t)
m = expr.match(pattern)
was = None
while m and was != expr:
was = expr
if m[a_t] == 0 or \
-m[a_t] in m[c].args or m[a_t] + m[c] == 0:
break
if d in m and m[a_t]*m[d] + m[c] == 0:
break
expr = result.subs(m)
m = expr.match(pattern)
m.setdefault(c, S.Zero)
elif expr.is_Mul or expr.is_Pow or deep and expr.args:
expr = expr.func(*[_trigsimp(a, deep) for a in expr.args])
try:
if not expr.has(*_trigs):
raise TypeError
e = expr.atoms(exp)
new = expr.rewrite(exp, deep=deep)
if new == e:
raise TypeError
fnew = factor(new)
if fnew != new:
new = sorted([new, factor(new)], key=count_ops)[0]
# if all exp that were introduced disappeared then accept it
if not (new.atoms(exp) - e):
expr = new
except TypeError:
pass
return expr
#------------------- end of old trigsimp routines --------------------
def futrig(e, *, hyper=True, **kwargs):
"""Return simplified ``e`` using Fu-like transformations.
This is not the "Fu" algorithm. This is called by default
from ``trigsimp``. By default, hyperbolics subexpressions
will be simplified, but this can be disabled by setting
``hyper=False``.
Examples
========
>>> from sympy import trigsimp, tan, sinh, tanh
>>> from sympy.simplify.trigsimp import futrig
>>> from sympy.abc import x
>>> trigsimp(1/tan(x)**2)
tan(x)**(-2)
>>> futrig(sinh(x)/tanh(x))
cosh(x)
"""
from sympy.simplify.fu import hyper_as_trig
from sympy.simplify.simplify import bottom_up
e = sympify(e)
if not isinstance(e, Basic):
return e
if not e.args:
return e
old = e
e = bottom_up(e, _futrig)
if hyper and e.has(HyperbolicFunction):
e, f = hyper_as_trig(e)
e = f(bottom_up(e, _futrig))
if e != old and e.is_Mul and e.args[0].is_Rational:
# redistribute leading coeff on 2-arg Add
e = Mul(*e.as_coeff_Mul())
return e
def _futrig(e):
"""Helper for futrig."""
from sympy.simplify.fu import (
TR1, TR2, TR3, TR2i, TR10, L, TR10i,
TR8, TR6, TR15, TR16, TR111, TR5, TRmorrie, TR11, _TR11, TR14, TR22,
TR12)
from sympy.core.compatibility import _nodes
if not e.has(TrigonometricFunction):
return e
if e.is_Mul:
coeff, e = e.as_independent(TrigonometricFunction)
else:
coeff = None
Lops = lambda x: (L(x), x.count_ops(), _nodes(x), len(x.args), x.is_Add)
trigs = lambda x: x.has(TrigonometricFunction)
tree = [identity,
(
TR3, # canonical angles
TR1, # sec-csc -> cos-sin
TR12, # expand tan of sum
lambda x: _eapply(factor, x, trigs),
TR2, # tan-cot -> sin-cos
[identity, lambda x: _eapply(_mexpand, x, trigs)],
TR2i, # sin-cos ratio -> tan
lambda x: _eapply(lambda i: factor(i.normal()), x, trigs),
TR14, # factored identities
TR5, # sin-pow -> cos_pow
TR10, # sin-cos of sums -> sin-cos prod
TR11, _TR11, TR6, # reduce double angles and rewrite cos pows
lambda x: _eapply(factor, x, trigs),
TR14, # factored powers of identities
[identity, lambda x: _eapply(_mexpand, x, trigs)],
TR10i, # sin-cos products > sin-cos of sums
TRmorrie,
[identity, TR8], # sin-cos products -> sin-cos of sums
[identity, lambda x: TR2i(TR2(x))], # tan -> sin-cos -> tan
[
lambda x: _eapply(expand_mul, TR5(x), trigs),
lambda x: _eapply(
expand_mul, TR15(x), trigs)], # pos/neg powers of sin
[
lambda x: _eapply(expand_mul, TR6(x), trigs),
lambda x: _eapply(
expand_mul, TR16(x), trigs)], # pos/neg powers of cos
TR111, # tan, sin, cos to neg power -> cot, csc, sec
[identity, TR2i], # sin-cos ratio to tan
[identity, lambda x: _eapply(
expand_mul, TR22(x), trigs)], # tan-cot to sec-csc
TR1, TR2, TR2i,
[identity, lambda x: _eapply(
factor_terms, TR12(x), trigs)], # expand tan of sum
)]
e = greedy(tree, objective=Lops)(e)
if coeff is not None:
e = coeff * e
return e
def _is_Expr(e):
"""_eapply helper to tell whether ``e`` and all its args
are Exprs."""
from sympy import Derivative
if isinstance(e, Derivative):
return _is_Expr(e.expr)
if not isinstance(e, Expr):
return False
return all(_is_Expr(i) for i in e.args)
def _eapply(func, e, cond=None):
"""Apply ``func`` to ``e`` if all args are Exprs else only
apply it to those args that *are* Exprs."""
if not isinstance(e, Expr):
return e
if _is_Expr(e) or not e.args:
return func(e)
return e.func(*[
_eapply(func, ei) if (cond is None or cond(ei)) else ei
for ei in e.args])
|
99eb4868fae8df81d96636f8d8b7e2d8e496eb20494cb78c3e7f605caba04a7e | """ Tools for doing common subexpression elimination.
"""
from sympy.core import Basic, Mul, Add, Pow, sympify, Symbol
from sympy.core.compatibility import iterable
from sympy.core.containers import Tuple, OrderedSet
from sympy.core.exprtools import factor_terms
from sympy.core.function import _coeff_isneg
from sympy.core.singleton import S
from sympy.utilities.iterables import numbered_symbols, sift, \
topological_sort, ordered
from . import cse_opts
# (preprocessor, postprocessor) pairs which are commonly useful. They should
# each take a sympy expression and return a possibly transformed expression.
# When used in the function ``cse()``, the target expressions will be transformed
# by each of the preprocessor functions in order. After the common
# subexpressions are eliminated, each resulting expression will have the
# postprocessor functions transform them in *reverse* order in order to undo the
# transformation if necessary. This allows the algorithm to operate on
# a representation of the expressions that allows for more optimization
# opportunities.
# ``None`` can be used to specify no transformation for either the preprocessor or
# postprocessor.
basic_optimizations = [(cse_opts.sub_pre, cse_opts.sub_post),
(factor_terms, None)]
# sometimes we want the output in a different format; non-trivial
# transformations can be put here for users
# ===============================================================
def reps_toposort(r):
"""Sort replacements ``r`` so (k1, v1) appears before (k2, v2)
if k2 is in v1's free symbols. This orders items in the
way that cse returns its results (hence, in order to use the
replacements in a substitution option it would make sense
to reverse the order).
Examples
========
>>> from sympy.simplify.cse_main import reps_toposort
>>> from sympy.abc import x, y
>>> from sympy import Eq
>>> for l, r in reps_toposort([(x, y + 1), (y, 2)]):
... print(Eq(l, r))
...
Eq(y, 2)
Eq(x, y + 1)
"""
r = sympify(r)
E = []
for c1, (k1, v1) in enumerate(r):
for c2, (k2, v2) in enumerate(r):
if k1 in v2.free_symbols:
E.append((c1, c2))
return [r[i] for i in topological_sort((range(len(r)), E))]
def cse_separate(r, e):
"""Move expressions that are in the form (symbol, expr) out of the
expressions and sort them into the replacements using the reps_toposort.
Examples
========
>>> from sympy.simplify.cse_main import cse_separate
>>> from sympy.abc import x, y, z
>>> from sympy import cos, exp, cse, Eq, symbols
>>> x0, x1 = symbols('x:2')
>>> eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1))
>>> cse([eq, Eq(x, z + 1), z - 2], postprocess=cse_separate) in [
... [[(x0, y + 1), (x, z + 1), (x1, x + 1)],
... [x1 + exp(x1/x0) + cos(x0), z - 2]],
... [[(x1, y + 1), (x, z + 1), (x0, x + 1)],
... [x0 + exp(x0/x1) + cos(x1), z - 2]]]
...
True
"""
d = sift(e, lambda w: w.is_Equality and w.lhs.is_Symbol)
r = r + [w.args for w in d[True]]
e = d[False]
return [reps_toposort(r), e]
# ====end of cse postprocess idioms===========================
def preprocess_for_cse(expr, optimizations):
""" Preprocess an expression to optimize for common subexpression
elimination.
Parameters
==========
expr : sympy expression
The target expression to optimize.
optimizations : list of (callable, callable) pairs
The (preprocessor, postprocessor) pairs.
Returns
=======
expr : sympy expression
The transformed expression.
"""
for pre, post in optimizations:
if pre is not None:
expr = pre(expr)
return expr
def postprocess_for_cse(expr, optimizations):
""" Postprocess an expression after common subexpression elimination to
return the expression to canonical sympy form.
Parameters
==========
expr : sympy expression
The target expression to transform.
optimizations : list of (callable, callable) pairs, optional
The (preprocessor, postprocessor) pairs. The postprocessors will be
applied in reversed order to undo the effects of the preprocessors
correctly.
Returns
=======
expr : sympy expression
The transformed expression.
"""
for pre, post in reversed(optimizations):
if post is not None:
expr = post(expr)
return expr
class FuncArgTracker:
"""
A class which manages a mapping from functions to arguments and an inverse
mapping from arguments to functions.
"""
def __init__(self, funcs):
# To minimize the number of symbolic comparisons, all function arguments
# get assigned a value number.
self.value_numbers = {}
self.value_number_to_value = []
# Both of these maps use integer indices for arguments / functions.
self.arg_to_funcset = []
self.func_to_argset = []
for func_i, func in enumerate(funcs):
func_argset = OrderedSet()
for func_arg in func.args:
arg_number = self.get_or_add_value_number(func_arg)
func_argset.add(arg_number)
self.arg_to_funcset[arg_number].add(func_i)
self.func_to_argset.append(func_argset)
def get_args_in_value_order(self, argset):
"""
Return the list of arguments in sorted order according to their value
numbers.
"""
return [self.value_number_to_value[argn] for argn in sorted(argset)]
def get_or_add_value_number(self, value):
"""
Return the value number for the given argument.
"""
nvalues = len(self.value_numbers)
value_number = self.value_numbers.setdefault(value, nvalues)
if value_number == nvalues:
self.value_number_to_value.append(value)
self.arg_to_funcset.append(OrderedSet())
return value_number
def stop_arg_tracking(self, func_i):
"""
Remove the function func_i from the argument to function mapping.
"""
for arg in self.func_to_argset[func_i]:
self.arg_to_funcset[arg].remove(func_i)
def get_common_arg_candidates(self, argset, min_func_i=0):
"""Return a dict whose keys are function numbers. The entries of the dict are
the number of arguments said function has in common with
``argset``. Entries have at least 2 items in common. All keys have
value at least ``min_func_i``.
"""
from collections import defaultdict
count_map = defaultdict(lambda: 0)
funcsets = [self.arg_to_funcset[arg] for arg in argset]
# As an optimization below, we handle the largest funcset separately from
# the others.
largest_funcset = max(funcsets, key=len)
for funcset in funcsets:
if largest_funcset is funcset:
continue
for func_i in funcset:
if func_i >= min_func_i:
count_map[func_i] += 1
# We pick the smaller of the two containers (count_map, largest_funcset)
# to iterate over to reduce the number of iterations needed.
(smaller_funcs_container,
larger_funcs_container) = sorted(
[largest_funcset, count_map],
key=len)
for func_i in smaller_funcs_container:
# Not already in count_map? It can't possibly be in the output, so
# skip it.
if count_map[func_i] < 1:
continue
if func_i in larger_funcs_container:
count_map[func_i] += 1
return {k: v for k, v in count_map.items() if v >= 2}
def get_subset_candidates(self, argset, restrict_to_funcset=None):
"""
Return a set of functions each of which whose argument list contains
``argset``, optionally filtered only to contain functions in
``restrict_to_funcset``.
"""
iarg = iter(argset)
indices = OrderedSet(
fi for fi in self.arg_to_funcset[next(iarg)])
if restrict_to_funcset is not None:
indices &= restrict_to_funcset
for arg in iarg:
indices &= self.arg_to_funcset[arg]
return indices
def update_func_argset(self, func_i, new_argset):
"""
Update a function with a new set of arguments.
"""
new_args = OrderedSet(new_argset)
old_args = self.func_to_argset[func_i]
for deleted_arg in old_args - new_args:
self.arg_to_funcset[deleted_arg].remove(func_i)
for added_arg in new_args - old_args:
self.arg_to_funcset[added_arg].add(func_i)
self.func_to_argset[func_i].clear()
self.func_to_argset[func_i].update(new_args)
class Unevaluated:
def __init__(self, func, args):
self.func = func
self.args = args
def __str__(self):
return "Uneval<{}>({})".format(
self.func, ", ".join(str(a) for a in self.args))
def as_unevaluated_basic(self):
return self.func(*self.args, evaluate=False)
@property
def free_symbols(self):
return set().union(*[a.free_symbols for a in self.args])
__repr__ = __str__
def match_common_args(func_class, funcs, opt_subs):
"""
Recognize and extract common subexpressions of function arguments within a
set of function calls. For instance, for the following function calls::
x + z + y
sin(x + y)
this will extract a common subexpression of `x + y`::
w = x + y
w + z
sin(w)
The function we work with is assumed to be associative and commutative.
Parameters
==========
func_class: class
The function class (e.g. Add, Mul)
funcs: list of functions
A list of function calls.
opt_subs: dict
A dictionary of substitutions which this function may update.
"""
# Sort to ensure that whole-function subexpressions come before the items
# that use them.
funcs = sorted(funcs, key=lambda f: len(f.args))
arg_tracker = FuncArgTracker(funcs)
changed = OrderedSet()
for i in range(len(funcs)):
common_arg_candidates_counts = arg_tracker.get_common_arg_candidates(
arg_tracker.func_to_argset[i], min_func_i=i + 1)
# Sort the candidates in order of match size.
# This makes us try combining smaller matches first.
common_arg_candidates = OrderedSet(sorted(
common_arg_candidates_counts.keys(),
key=lambda k: (common_arg_candidates_counts[k], k)))
while common_arg_candidates:
j = common_arg_candidates.pop(last=False)
com_args = arg_tracker.func_to_argset[i].intersection(
arg_tracker.func_to_argset[j])
if len(com_args) <= 1:
# This may happen if a set of common arguments was already
# combined in a previous iteration.
continue
# For all sets, replace the common symbols by the function
# over them, to allow recursive matches.
diff_i = arg_tracker.func_to_argset[i].difference(com_args)
if diff_i:
# com_func needs to be unevaluated to allow for recursive matches.
com_func = Unevaluated(
func_class, arg_tracker.get_args_in_value_order(com_args))
com_func_number = arg_tracker.get_or_add_value_number(com_func)
arg_tracker.update_func_argset(i, diff_i | OrderedSet([com_func_number]))
changed.add(i)
else:
# Treat the whole expression as a CSE.
#
# The reason this needs to be done is somewhat subtle. Within
# tree_cse(), to_eliminate only contains expressions that are
# seen more than once. The problem is unevaluated expressions
# do not compare equal to the evaluated equivalent. So
# tree_cse() won't mark funcs[i] as a CSE if we use an
# unevaluated version.
com_func_number = arg_tracker.get_or_add_value_number(funcs[i])
diff_j = arg_tracker.func_to_argset[j].difference(com_args)
arg_tracker.update_func_argset(j, diff_j | OrderedSet([com_func_number]))
changed.add(j)
for k in arg_tracker.get_subset_candidates(
com_args, common_arg_candidates):
diff_k = arg_tracker.func_to_argset[k].difference(com_args)
arg_tracker.update_func_argset(k, diff_k | OrderedSet([com_func_number]))
changed.add(k)
if i in changed:
opt_subs[funcs[i]] = Unevaluated(func_class,
arg_tracker.get_args_in_value_order(arg_tracker.func_to_argset[i]))
arg_tracker.stop_arg_tracking(i)
def opt_cse(exprs, order='canonical'):
"""Find optimization opportunities in Adds, Muls, Pows and negative
coefficient Muls.
Parameters
==========
exprs : list of sympy expressions
The expressions to optimize.
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. For large
expressions where speed is a concern, use the setting order='none'.
Returns
=======
opt_subs : dictionary of expression substitutions
The expression substitutions which can be useful to optimize CSE.
Examples
========
>>> from sympy.simplify.cse_main import opt_cse
>>> from sympy.abc import x
>>> opt_subs = opt_cse([x**-2])
>>> k, v = list(opt_subs.keys())[0], list(opt_subs.values())[0]
>>> print((k, v.as_unevaluated_basic()))
(x**(-2), 1/(x**2))
"""
from sympy.matrices.expressions import MatAdd, MatMul, MatPow
opt_subs = dict()
adds = OrderedSet()
muls = OrderedSet()
seen_subexp = set()
def _find_opts(expr):
if not isinstance(expr, (Basic, Unevaluated)):
return
if expr.is_Atom or expr.is_Order:
return
if iterable(expr):
list(map(_find_opts, expr))
return
if expr in seen_subexp:
return expr
seen_subexp.add(expr)
list(map(_find_opts, expr.args))
if _coeff_isneg(expr):
neg_expr = -expr
if not neg_expr.is_Atom:
opt_subs[expr] = Unevaluated(Mul, (S.NegativeOne, neg_expr))
seen_subexp.add(neg_expr)
expr = neg_expr
if isinstance(expr, (Mul, MatMul)):
muls.add(expr)
elif isinstance(expr, (Add, MatAdd)):
adds.add(expr)
elif isinstance(expr, (Pow, MatPow)):
base, exp = expr.base, expr.exp
if _coeff_isneg(exp):
opt_subs[expr] = Unevaluated(Pow, (Pow(base, -exp), -1))
for e in exprs:
if isinstance(e, (Basic, Unevaluated)):
_find_opts(e)
# split muls into commutative
commutative_muls = OrderedSet()
for m in muls:
c, nc = m.args_cnc(cset=False)
if c:
c_mul = m.func(*c)
if nc:
if c_mul == 1:
new_obj = m.func(*nc)
else:
new_obj = m.func(c_mul, m.func(*nc), evaluate=False)
opt_subs[m] = new_obj
if len(c) > 1:
commutative_muls.add(c_mul)
match_common_args(Add, adds, opt_subs)
match_common_args(Mul, commutative_muls, opt_subs)
return opt_subs
def tree_cse(exprs, symbols, opt_subs=None, order='canonical', ignore=()):
"""Perform raw CSE on expression tree, taking opt_subs into account.
Parameters
==========
exprs : list of sympy expressions
The expressions to reduce.
symbols : infinite iterator yielding unique Symbols
The symbols used to label the common subexpressions which are pulled
out.
opt_subs : dictionary of expression substitutions
The expressions to be substituted before any CSE action is performed.
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. For large
expressions where speed is a concern, use the setting order='none'.
ignore : iterable of Symbols
Substitutions containing any Symbol from ``ignore`` will be ignored.
"""
from sympy.matrices.expressions import MatrixExpr, MatrixSymbol, MatMul, MatAdd
from sympy.polys.rootoftools import RootOf
if opt_subs is None:
opt_subs = dict()
## Find repeated sub-expressions
to_eliminate = set()
seen_subexp = set()
excluded_symbols = set()
def _find_repeated(expr):
if not isinstance(expr, (Basic, Unevaluated)):
return
if isinstance(expr, RootOf):
return
if isinstance(expr, Basic) and (expr.is_Atom or expr.is_Order):
if expr.is_Symbol:
excluded_symbols.add(expr)
return
if iterable(expr):
args = expr
else:
if expr in seen_subexp:
for ign in ignore:
if ign in expr.free_symbols:
break
else:
to_eliminate.add(expr)
return
seen_subexp.add(expr)
if expr in opt_subs:
expr = opt_subs[expr]
args = expr.args
list(map(_find_repeated, args))
for e in exprs:
if isinstance(e, Basic):
_find_repeated(e)
## Rebuild tree
# Remove symbols from the generator that conflict with names in the expressions.
symbols = (symbol for symbol in symbols if symbol not in excluded_symbols)
replacements = []
subs = dict()
def _rebuild(expr):
if not isinstance(expr, (Basic, Unevaluated)):
return expr
if not expr.args:
return expr
if iterable(expr):
new_args = [_rebuild(arg) for arg in expr]
return expr.func(*new_args)
if expr in subs:
return subs[expr]
orig_expr = expr
if expr in opt_subs:
expr = opt_subs[expr]
# If enabled, parse Muls and Adds arguments by order to ensure
# replacement order independent from hashes
if order != 'none':
if isinstance(expr, (Mul, MatMul)):
c, nc = expr.args_cnc()
if c == [1]:
args = nc
else:
args = list(ordered(c)) + nc
elif isinstance(expr, (Add, MatAdd)):
args = list(ordered(expr.args))
else:
args = expr.args
else:
args = expr.args
new_args = list(map(_rebuild, args))
if isinstance(expr, Unevaluated) or new_args != args:
new_expr = expr.func(*new_args)
else:
new_expr = expr
if orig_expr in to_eliminate:
try:
sym = next(symbols)
except StopIteration:
raise ValueError("Symbols iterator ran out of symbols.")
if isinstance(orig_expr, MatrixExpr):
sym = MatrixSymbol(sym.name, orig_expr.rows,
orig_expr.cols)
subs[orig_expr] = sym
replacements.append((sym, new_expr))
return sym
else:
return new_expr
reduced_exprs = []
for e in exprs:
if isinstance(e, Basic):
reduced_e = _rebuild(e)
else:
reduced_e = e
reduced_exprs.append(reduced_e)
return replacements, reduced_exprs
def cse(exprs, symbols=None, optimizations=None, postprocess=None,
order='canonical', ignore=()):
""" Perform common subexpression elimination on an expression.
Parameters
==========
exprs : list of sympy expressions, or a single sympy expression
The expressions to reduce.
symbols : infinite iterator yielding unique Symbols
The symbols used to label the common subexpressions which are pulled
out. The ``numbered_symbols`` generator is useful. The default is a
stream of symbols of the form "x0", "x1", etc. This must be an
infinite iterator.
optimizations : list of (callable, callable) pairs
The (preprocessor, postprocessor) pairs of external optimization
functions. Optionally 'basic' can be passed for a set of predefined
basic optimizations. Such 'basic' optimizations were used by default
in old implementation, however they can be really slow on larger
expressions. Now, no pre or post optimizations are made by default.
postprocess : a function which accepts the two return values of cse and
returns the desired form of output from cse, e.g. if you want the
replacements reversed the function might be the following lambda:
lambda r, e: return reversed(r), e
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. If set to
'canonical', arguments will be canonically ordered. If set to 'none',
ordering will be faster but dependent on expressions hashes, thus
machine dependent and variable. For large expressions where speed is a
concern, use the setting order='none'.
ignore : iterable of Symbols
Substitutions containing any Symbol from ``ignore`` will be ignored.
Returns
=======
replacements : list of (Symbol, expression) pairs
All of the common subexpressions that were replaced. Subexpressions
earlier in this list might show up in subexpressions later in this
list.
reduced_exprs : list of sympy expressions
The reduced expressions with all of the replacements above.
Examples
========
>>> from sympy import cse, SparseMatrix
>>> from sympy.abc import x, y, z, w
>>> cse(((w + x + y + z)*(w + y + z))/(w + x)**3)
([(x0, y + z), (x1, w + x)], [(w + x0)*(x0 + x1)/x1**3])
Note that currently, y + z will not get substituted if -y - z is used.
>>> cse(((w + x + y + z)*(w - y - z))/(w + x)**3)
([(x0, w + x)], [(w - y - z)*(x0 + y + z)/x0**3])
List of expressions with recursive substitutions:
>>> m = SparseMatrix([x + y, x + y + z])
>>> cse([(x+y)**2, x + y + z, y + z, x + z + y, m])
([(x0, x + y), (x1, x0 + z)], [x0**2, x1, y + z, x1, Matrix([
[x0],
[x1]])])
Note: the type and mutability of input matrices is retained.
>>> isinstance(_[1][-1], SparseMatrix)
True
The user may disallow substitutions containing certain symbols:
>>> cse([y**2*(x + 1), 3*y**2*(x + 1)], ignore=(y,))
([(x0, x + 1)], [x0*y**2, 3*x0*y**2])
"""
from sympy.matrices import (MatrixBase, Matrix, ImmutableMatrix,
SparseMatrix, ImmutableSparseMatrix)
if isinstance(exprs, (int, float)):
exprs = sympify(exprs)
# Handle the case if just one expression was passed.
if isinstance(exprs, (Basic, MatrixBase)):
exprs = [exprs]
copy = exprs
temp = []
for e in exprs:
if isinstance(e, (Matrix, ImmutableMatrix)):
temp.append(Tuple(*e._mat))
elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
temp.append(Tuple(*e._smat.items()))
else:
temp.append(e)
exprs = temp
del temp
if optimizations is None:
optimizations = list()
elif optimizations == 'basic':
optimizations = basic_optimizations
# Preprocess the expressions to give us better optimization opportunities.
reduced_exprs = [preprocess_for_cse(e, optimizations) for e in exprs]
if symbols is None:
symbols = numbered_symbols(cls=Symbol)
else:
# In case we get passed an iterable with an __iter__ method instead of
# an actual iterator.
symbols = iter(symbols)
# Find other optimization opportunities.
opt_subs = opt_cse(reduced_exprs, order)
# Main CSE algorithm.
replacements, reduced_exprs = tree_cse(reduced_exprs, symbols, opt_subs,
order, ignore)
# Postprocess the expressions to return the expressions to canonical form.
exprs = copy
for i, (sym, subtree) in enumerate(replacements):
subtree = postprocess_for_cse(subtree, optimizations)
replacements[i] = (sym, subtree)
reduced_exprs = [postprocess_for_cse(e, optimizations)
for e in reduced_exprs]
# Get the matrices back
for i, e in enumerate(exprs):
if isinstance(e, (Matrix, ImmutableMatrix)):
reduced_exprs[i] = Matrix(e.rows, e.cols, reduced_exprs[i])
if isinstance(e, ImmutableMatrix):
reduced_exprs[i] = reduced_exprs[i].as_immutable()
elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
m = SparseMatrix(e.rows, e.cols, {})
for k, v in reduced_exprs[i]:
m[k] = v
if isinstance(e, ImmutableSparseMatrix):
m = m.as_immutable()
reduced_exprs[i] = m
if postprocess is None:
return replacements, reduced_exprs
return postprocess(replacements, reduced_exprs)
|
daa9e086514933f6bdcd1d99ff39fd392d2ebd56fbe575774ab4e0421f77ffbe | from collections import defaultdict
from sympy.core.add import Add
from sympy.core.basic import S
from sympy.core.compatibility import ordered
from sympy.core.expr import Expr
from sympy.core.exprtools import Factors, gcd_terms, factor_terms
from sympy.core.function import expand_mul
from sympy.core.mul import Mul
from sympy.core.numbers import pi, I
from sympy.core.power import Pow
from sympy.core.symbol import Dummy
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.factorials import binomial
from sympy.functions.elementary.hyperbolic import (
cosh, sinh, tanh, coth, sech, csch, HyperbolicFunction)
from sympy.functions.elementary.trigonometric import (
cos, sin, tan, cot, sec, csc, sqrt, TrigonometricFunction)
from sympy.ntheory.factor_ import perfect_power
from sympy.polys.polytools import factor
from sympy.simplify.simplify import bottom_up
from sympy.strategies.tree import greedy
from sympy.strategies.core import identity, debug
from sympy import SYMPY_DEBUG
# ================== Fu-like tools ===========================
def TR0(rv):
"""Simplification of rational polynomials, trying to simplify
the expression, e.g. combine things like 3*x + 2*x, etc....
"""
# although it would be nice to use cancel, it doesn't work
# with noncommutatives
return rv.normal().factor().expand()
def TR1(rv):
"""Replace sec, csc with 1/cos, 1/sin
Examples
========
>>> from sympy.simplify.fu import TR1, sec, csc
>>> from sympy.abc import x
>>> TR1(2*csc(x) + sec(x))
1/cos(x) + 2/sin(x)
"""
def f(rv):
if isinstance(rv, sec):
a = rv.args[0]
return S.One/cos(a)
elif isinstance(rv, csc):
a = rv.args[0]
return S.One/sin(a)
return rv
return bottom_up(rv, f)
def TR2(rv):
"""Replace tan and cot with sin/cos and cos/sin
Examples
========
>>> from sympy.simplify.fu import TR2
>>> from sympy.abc import x
>>> from sympy import tan, cot, sin, cos
>>> TR2(tan(x))
sin(x)/cos(x)
>>> TR2(cot(x))
cos(x)/sin(x)
>>> TR2(tan(tan(x) - sin(x)/cos(x)))
0
"""
def f(rv):
if isinstance(rv, tan):
a = rv.args[0]
return sin(a)/cos(a)
elif isinstance(rv, cot):
a = rv.args[0]
return cos(a)/sin(a)
return rv
return bottom_up(rv, f)
def TR2i(rv, half=False):
"""Converts ratios involving sin and cos as follows::
sin(x)/cos(x) -> tan(x)
sin(x)/(cos(x) + 1) -> tan(x/2) if half=True
Examples
========
>>> from sympy.simplify.fu import TR2i
>>> from sympy.abc import x, a
>>> from sympy import sin, cos
>>> TR2i(sin(x)/cos(x))
tan(x)
Powers of the numerator and denominator are also recognized
>>> TR2i(sin(x)**2/(cos(x) + 1)**2, half=True)
tan(x/2)**2
The transformation does not take place unless assumptions allow
(i.e. the base must be positive or the exponent must be an integer
for both numerator and denominator)
>>> TR2i(sin(x)**a/(cos(x) + 1)**a)
(cos(x) + 1)**(-a)*sin(x)**a
"""
def f(rv):
if not rv.is_Mul:
return rv
n, d = rv.as_numer_denom()
if n.is_Atom or d.is_Atom:
return rv
def ok(k, e):
# initial filtering of factors
return (
(e.is_integer or k.is_positive) and (
k.func in (sin, cos) or (half and
k.is_Add and
len(k.args) >= 2 and
any(any(isinstance(ai, cos) or ai.is_Pow and ai.base is cos
for ai in Mul.make_args(a)) for a in k.args))))
n = n.as_powers_dict()
ndone = [(k, n.pop(k)) for k in list(n.keys()) if not ok(k, n[k])]
if not n:
return rv
d = d.as_powers_dict()
ddone = [(k, d.pop(k)) for k in list(d.keys()) if not ok(k, d[k])]
if not d:
return rv
# factoring if necessary
def factorize(d, ddone):
newk = []
for k in d:
if k.is_Add and len(k.args) > 1:
knew = factor(k) if half else factor_terms(k)
if knew != k:
newk.append((k, knew))
if newk:
for i, (k, knew) in enumerate(newk):
del d[k]
newk[i] = knew
newk = Mul(*newk).as_powers_dict()
for k in newk:
v = d[k] + newk[k]
if ok(k, v):
d[k] = v
else:
ddone.append((k, v))
del newk
factorize(n, ndone)
factorize(d, ddone)
# joining
t = []
for k in n:
if isinstance(k, sin):
a = cos(k.args[0], evaluate=False)
if a in d and d[a] == n[k]:
t.append(tan(k.args[0])**n[k])
n[k] = d[a] = None
elif half:
a1 = 1 + a
if a1 in d and d[a1] == n[k]:
t.append((tan(k.args[0]/2))**n[k])
n[k] = d[a1] = None
elif isinstance(k, cos):
a = sin(k.args[0], evaluate=False)
if a in d and d[a] == n[k]:
t.append(tan(k.args[0])**-n[k])
n[k] = d[a] = None
elif half and k.is_Add and k.args[0] is S.One and \
isinstance(k.args[1], cos):
a = sin(k.args[1].args[0], evaluate=False)
if a in d and d[a] == n[k] and (d[a].is_integer or \
a.is_positive):
t.append(tan(a.args[0]/2)**-n[k])
n[k] = d[a] = None
if t:
rv = Mul(*(t + [b**e for b, e in n.items() if e]))/\
Mul(*[b**e for b, e in d.items() if e])
rv *= Mul(*[b**e for b, e in ndone])/Mul(*[b**e for b, e in ddone])
return rv
return bottom_up(rv, f)
def TR3(rv):
"""Induced formula: example sin(-a) = -sin(a)
Examples
========
>>> from sympy.simplify.fu import TR3
>>> from sympy.abc import x, y
>>> from sympy import pi
>>> from sympy import cos
>>> TR3(cos(y - x*(y - x)))
cos(x*(x - y) + y)
>>> cos(pi/2 + x)
-sin(x)
>>> cos(30*pi/2 + x)
-cos(x)
"""
from sympy.simplify.simplify import signsimp
# Negative argument (already automatic for funcs like sin(-x) -> -sin(x)
# but more complicated expressions can use it, too). Also, trig angles
# between pi/4 and pi/2 are not reduced to an angle between 0 and pi/4.
# The following are automatically handled:
# Argument of type: pi/2 +/- angle
# Argument of type: pi +/- angle
# Argument of type : 2k*pi +/- angle
def f(rv):
if not isinstance(rv, TrigonometricFunction):
return rv
rv = rv.func(signsimp(rv.args[0]))
if not isinstance(rv, TrigonometricFunction):
return rv
if (rv.args[0] - S.Pi/4).is_positive is (S.Pi/2 - rv.args[0]).is_positive is True:
fmap = {cos: sin, sin: cos, tan: cot, cot: tan, sec: csc, csc: sec}
rv = fmap[rv.func](S.Pi/2 - rv.args[0])
return rv
return bottom_up(rv, f)
def TR4(rv):
"""Identify values of special angles.
a= 0 pi/6 pi/4 pi/3 pi/2
----------------------------------------------------
cos(a) 0 1/2 sqrt(2)/2 sqrt(3)/2 1
sin(a) 1 sqrt(3)/2 sqrt(2)/2 1/2 0
tan(a) 0 sqt(3)/3 1 sqrt(3) --
Examples
========
>>> from sympy import pi
>>> from sympy import cos, sin, tan, cot
>>> for s in (0, pi/6, pi/4, pi/3, pi/2):
... print('%s %s %s %s' % (cos(s), sin(s), tan(s), cot(s)))
...
1 0 0 zoo
sqrt(3)/2 1/2 sqrt(3)/3 sqrt(3)
sqrt(2)/2 sqrt(2)/2 1 1
1/2 sqrt(3)/2 sqrt(3) sqrt(3)/3
0 1 zoo 0
"""
# special values at 0, pi/6, pi/4, pi/3, pi/2 already handled
return rv
def _TR56(rv, f, g, h, max, pow):
"""Helper for TR5 and TR6 to replace f**2 with h(g**2)
Options
=======
max : controls size of exponent that can appear on f
e.g. if max=4 then f**4 will be changed to h(g**2)**2.
pow : controls whether the exponent must be a perfect power of 2
e.g. if pow=True (and max >= 6) then f**6 will not be changed
but f**8 will be changed to h(g**2)**4
>>> from sympy.simplify.fu import _TR56 as T
>>> from sympy.abc import x
>>> from sympy import sin, cos
>>> h = lambda x: 1 - x
>>> T(sin(x)**3, sin, cos, h, 4, False)
sin(x)**3
>>> T(sin(x)**6, sin, cos, h, 6, False)
(1 - cos(x)**2)**3
>>> T(sin(x)**6, sin, cos, h, 6, True)
sin(x)**6
>>> T(sin(x)**8, sin, cos, h, 10, True)
(1 - cos(x)**2)**4
"""
def _f(rv):
# I'm not sure if this transformation should target all even powers
# or only those expressible as powers of 2. Also, should it only
# make the changes in powers that appear in sums -- making an isolated
# change is not going to allow a simplification as far as I can tell.
if not (rv.is_Pow and rv.base.func == f):
return rv
if not rv.exp.is_real:
return rv
if (rv.exp < 0) == True:
return rv
if (rv.exp > max) == True:
return rv
if rv.exp == 2:
return h(g(rv.base.args[0])**2)
else:
if rv.exp == 4:
e = 2
elif not pow:
if rv.exp % 2:
return rv
e = rv.exp//2
else:
p = perfect_power(rv.exp)
if not p:
return rv
e = rv.exp//2
return h(g(rv.base.args[0])**2)**e
return bottom_up(rv, _f)
def TR5(rv, max=4, pow=False):
"""Replacement of sin**2 with 1 - cos(x)**2.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR5
>>> from sympy.abc import x
>>> from sympy import sin
>>> TR5(sin(x)**2)
1 - cos(x)**2
>>> TR5(sin(x)**-2) # unchanged
sin(x)**(-2)
>>> TR5(sin(x)**4)
(1 - cos(x)**2)**2
"""
return _TR56(rv, sin, cos, lambda x: 1 - x, max=max, pow=pow)
def TR6(rv, max=4, pow=False):
"""Replacement of cos**2 with 1 - sin(x)**2.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR6
>>> from sympy.abc import x
>>> from sympy import cos
>>> TR6(cos(x)**2)
1 - sin(x)**2
>>> TR6(cos(x)**-2) #unchanged
cos(x)**(-2)
>>> TR6(cos(x)**4)
(1 - sin(x)**2)**2
"""
return _TR56(rv, cos, sin, lambda x: 1 - x, max=max, pow=pow)
def TR7(rv):
"""Lowering the degree of cos(x)**2.
Examples
========
>>> from sympy.simplify.fu import TR7
>>> from sympy.abc import x
>>> from sympy import cos
>>> TR7(cos(x)**2)
cos(2*x)/2 + 1/2
>>> TR7(cos(x)**2 + 1)
cos(2*x)/2 + 3/2
"""
def f(rv):
if not (rv.is_Pow and rv.base.func == cos and rv.exp == 2):
return rv
return (1 + cos(2*rv.base.args[0]))/2
return bottom_up(rv, f)
def TR8(rv, first=True):
"""Converting products of ``cos`` and/or ``sin`` to a sum or
difference of ``cos`` and or ``sin`` terms.
Examples
========
>>> from sympy.simplify.fu import TR8
>>> from sympy import cos, sin
>>> TR8(cos(2)*cos(3))
cos(5)/2 + cos(1)/2
>>> TR8(cos(2)*sin(3))
sin(5)/2 + sin(1)/2
>>> TR8(sin(2)*sin(3))
-cos(5)/2 + cos(1)/2
"""
def f(rv):
if not (
rv.is_Mul or
rv.is_Pow and
rv.base.func in (cos, sin) and
(rv.exp.is_integer or rv.base.is_positive)):
return rv
if first:
n, d = [expand_mul(i) for i in rv.as_numer_denom()]
newn = TR8(n, first=False)
newd = TR8(d, first=False)
if newn != n or newd != d:
rv = gcd_terms(newn/newd)
if rv.is_Mul and rv.args[0].is_Rational and \
len(rv.args) == 2 and rv.args[1].is_Add:
rv = Mul(*rv.as_coeff_Mul())
return rv
args = {cos: [], sin: [], None: []}
for a in ordered(Mul.make_args(rv)):
if a.func in (cos, sin):
args[a.func].append(a.args[0])
elif (a.is_Pow and a.exp.is_Integer and a.exp > 0 and \
a.base.func in (cos, sin)):
# XXX this is ok but pathological expression could be handled
# more efficiently as in TRmorrie
args[a.base.func].extend([a.base.args[0]]*a.exp)
else:
args[None].append(a)
c = args[cos]
s = args[sin]
if not (c and s or len(c) > 1 or len(s) > 1):
return rv
args = args[None]
n = min(len(c), len(s))
for i in range(n):
a1 = s.pop()
a2 = c.pop()
args.append((sin(a1 + a2) + sin(a1 - a2))/2)
while len(c) > 1:
a1 = c.pop()
a2 = c.pop()
args.append((cos(a1 + a2) + cos(a1 - a2))/2)
if c:
args.append(cos(c.pop()))
while len(s) > 1:
a1 = s.pop()
a2 = s.pop()
args.append((-cos(a1 + a2) + cos(a1 - a2))/2)
if s:
args.append(sin(s.pop()))
return TR8(expand_mul(Mul(*args)))
return bottom_up(rv, f)
def TR9(rv):
"""Sum of ``cos`` or ``sin`` terms as a product of ``cos`` or ``sin``.
Examples
========
>>> from sympy.simplify.fu import TR9
>>> from sympy import cos, sin
>>> TR9(cos(1) + cos(2))
2*cos(1/2)*cos(3/2)
>>> TR9(cos(1) + 2*sin(1) + 2*sin(2))
cos(1) + 4*sin(3/2)*cos(1/2)
If no change is made by TR9, no re-arrangement of the
expression will be made. For example, though factoring
of common term is attempted, if the factored expression
wasn't changed, the original expression will be returned:
>>> TR9(cos(3) + cos(3)*cos(2))
cos(3) + cos(2)*cos(3)
"""
def f(rv):
if not rv.is_Add:
return rv
def do(rv, first=True):
# cos(a)+/-cos(b) can be combined into a product of cosines and
# sin(a)+/-sin(b) can be combined into a product of cosine and
# sine.
#
# If there are more than two args, the pairs which "work" will
# have a gcd extractable and the remaining two terms will have
# the above structure -- all pairs must be checked to find the
# ones that work. args that don't have a common set of symbols
# are skipped since this doesn't lead to a simpler formula and
# also has the arbitrariness of combining, for example, the x
# and y term instead of the y and z term in something like
# cos(x) + cos(y) + cos(z).
if not rv.is_Add:
return rv
args = list(ordered(rv.args))
if len(args) != 2:
hit = False
for i in range(len(args)):
ai = args[i]
if ai is None:
continue
for j in range(i + 1, len(args)):
aj = args[j]
if aj is None:
continue
was = ai + aj
new = do(was)
if new != was:
args[i] = new # update in place
args[j] = None
hit = True
break # go to next i
if hit:
rv = Add(*[_f for _f in args if _f])
if rv.is_Add:
rv = do(rv)
return rv
# two-arg Add
split = trig_split(*args)
if not split:
return rv
gcd, n1, n2, a, b, iscos = split
# application of rule if possible
if iscos:
if n1 == n2:
return gcd*n1*2*cos((a + b)/2)*cos((a - b)/2)
if n1 < 0:
a, b = b, a
return -2*gcd*sin((a + b)/2)*sin((a - b)/2)
else:
if n1 == n2:
return gcd*n1*2*sin((a + b)/2)*cos((a - b)/2)
if n1 < 0:
a, b = b, a
return 2*gcd*cos((a + b)/2)*sin((a - b)/2)
return process_common_addends(rv, do) # DON'T sift by free symbols
return bottom_up(rv, f)
def TR10(rv, first=True):
"""Separate sums in ``cos`` and ``sin``.
Examples
========
>>> from sympy.simplify.fu import TR10
>>> from sympy.abc import a, b, c
>>> from sympy import cos, sin
>>> TR10(cos(a + b))
-sin(a)*sin(b) + cos(a)*cos(b)
>>> TR10(sin(a + b))
sin(a)*cos(b) + sin(b)*cos(a)
>>> TR10(sin(a + b + c))
(-sin(a)*sin(b) + cos(a)*cos(b))*sin(c) + \
(sin(a)*cos(b) + sin(b)*cos(a))*cos(c)
"""
def f(rv):
if not rv.func in (cos, sin):
return rv
f = rv.func
arg = rv.args[0]
if arg.is_Add:
if first:
args = list(ordered(arg.args))
else:
args = list(arg.args)
a = args.pop()
b = Add._from_args(args)
if b.is_Add:
if f == sin:
return sin(a)*TR10(cos(b), first=False) + \
cos(a)*TR10(sin(b), first=False)
else:
return cos(a)*TR10(cos(b), first=False) - \
sin(a)*TR10(sin(b), first=False)
else:
if f == sin:
return sin(a)*cos(b) + cos(a)*sin(b)
else:
return cos(a)*cos(b) - sin(a)*sin(b)
return rv
return bottom_up(rv, f)
def TR10i(rv):
"""Sum of products to function of sum.
Examples
========
>>> from sympy.simplify.fu import TR10i
>>> from sympy import cos, sin, sqrt
>>> from sympy.abc import x
>>> TR10i(cos(1)*cos(3) + sin(1)*sin(3))
cos(2)
>>> TR10i(cos(1)*sin(3) + sin(1)*cos(3) + cos(3))
cos(3) + sin(4)
>>> TR10i(sqrt(2)*cos(x)*x + sqrt(6)*sin(x)*x)
2*sqrt(2)*x*sin(x + pi/6)
"""
global _ROOT2, _ROOT3, _invROOT3
if _ROOT2 is None:
_roots()
def f(rv):
if not rv.is_Add:
return rv
def do(rv, first=True):
# args which can be expressed as A*(cos(a)*cos(b)+/-sin(a)*sin(b))
# or B*(cos(a)*sin(b)+/-cos(b)*sin(a)) can be combined into
# A*f(a+/-b) where f is either sin or cos.
#
# If there are more than two args, the pairs which "work" will have
# a gcd extractable and the remaining two terms will have the above
# structure -- all pairs must be checked to find the ones that
# work.
if not rv.is_Add:
return rv
args = list(ordered(rv.args))
if len(args) != 2:
hit = False
for i in range(len(args)):
ai = args[i]
if ai is None:
continue
for j in range(i + 1, len(args)):
aj = args[j]
if aj is None:
continue
was = ai + aj
new = do(was)
if new != was:
args[i] = new # update in place
args[j] = None
hit = True
break # go to next i
if hit:
rv = Add(*[_f for _f in args if _f])
if rv.is_Add:
rv = do(rv)
return rv
# two-arg Add
split = trig_split(*args, two=True)
if not split:
return rv
gcd, n1, n2, a, b, same = split
# identify and get c1 to be cos then apply rule if possible
if same: # coscos, sinsin
gcd = n1*gcd
if n1 == n2:
return gcd*cos(a - b)
return gcd*cos(a + b)
else: #cossin, cossin
gcd = n1*gcd
if n1 == n2:
return gcd*sin(a + b)
return gcd*sin(b - a)
rv = process_common_addends(
rv, do, lambda x: tuple(ordered(x.free_symbols)))
# need to check for inducible pairs in ratio of sqrt(3):1 that
# appeared in different lists when sorting by coefficient
while rv.is_Add:
byrad = defaultdict(list)
for a in rv.args:
hit = 0
if a.is_Mul:
for ai in a.args:
if ai.is_Pow and ai.exp is S.Half and \
ai.base.is_Integer:
byrad[ai].append(a)
hit = 1
break
if not hit:
byrad[S.One].append(a)
# no need to check all pairs -- just check for the onees
# that have the right ratio
args = []
for a in byrad:
for b in [_ROOT3*a, _invROOT3]:
if b in byrad:
for i in range(len(byrad[a])):
if byrad[a][i] is None:
continue
for j in range(len(byrad[b])):
if byrad[b][j] is None:
continue
was = Add(byrad[a][i] + byrad[b][j])
new = do(was)
if new != was:
args.append(new)
byrad[a][i] = None
byrad[b][j] = None
break
if args:
rv = Add(*(args + [Add(*[_f for _f in v if _f])
for v in byrad.values()]))
else:
rv = do(rv) # final pass to resolve any new inducible pairs
break
return rv
return bottom_up(rv, f)
def TR11(rv, base=None):
"""Function of double angle to product. The ``base`` argument can be used
to indicate what is the un-doubled argument, e.g. if 3*pi/7 is the base
then cosine and sine functions with argument 6*pi/7 will be replaced.
Examples
========
>>> from sympy.simplify.fu import TR11
>>> from sympy import cos, sin, pi
>>> from sympy.abc import x
>>> TR11(sin(2*x))
2*sin(x)*cos(x)
>>> TR11(cos(2*x))
-sin(x)**2 + cos(x)**2
>>> TR11(sin(4*x))
4*(-sin(x)**2 + cos(x)**2)*sin(x)*cos(x)
>>> TR11(sin(4*x/3))
4*(-sin(x/3)**2 + cos(x/3)**2)*sin(x/3)*cos(x/3)
If the arguments are simply integers, no change is made
unless a base is provided:
>>> TR11(cos(2))
cos(2)
>>> TR11(cos(4), 2)
-sin(2)**2 + cos(2)**2
There is a subtle issue here in that autosimplification will convert
some higher angles to lower angles
>>> cos(6*pi/7) + cos(3*pi/7)
-cos(pi/7) + cos(3*pi/7)
The 6*pi/7 angle is now pi/7 but can be targeted with TR11 by supplying
the 3*pi/7 base:
>>> TR11(_, 3*pi/7)
-sin(3*pi/7)**2 + cos(3*pi/7)**2 + cos(3*pi/7)
"""
def f(rv):
if not rv.func in (cos, sin):
return rv
if base:
f = rv.func
t = f(base*2)
co = S.One
if t.is_Mul:
co, t = t.as_coeff_Mul()
if not t.func in (cos, sin):
return rv
if rv.args[0] == t.args[0]:
c = cos(base)
s = sin(base)
if f is cos:
return (c**2 - s**2)/co
else:
return 2*c*s/co
return rv
elif not rv.args[0].is_Number:
# make a change if the leading coefficient's numerator is
# divisible by 2
c, m = rv.args[0].as_coeff_Mul(rational=True)
if c.p % 2 == 0:
arg = c.p//2*m/c.q
c = TR11(cos(arg))
s = TR11(sin(arg))
if rv.func == sin:
rv = 2*s*c
else:
rv = c**2 - s**2
return rv
return bottom_up(rv, f)
def _TR11(rv):
"""
Helper for TR11 to find half-arguments for sin in factors of
num/den that appear in cos or sin factors in the den/num.
Examples
========
>>> from sympy.simplify.fu import TR11, _TR11
>>> from sympy import cos, sin
>>> from sympy.abc import x
>>> TR11(sin(x/3)/(cos(x/6)))
sin(x/3)/cos(x/6)
>>> _TR11(sin(x/3)/(cos(x/6)))
2*sin(x/6)
>>> TR11(sin(x/6)/(sin(x/3)))
sin(x/6)/sin(x/3)
>>> _TR11(sin(x/6)/(sin(x/3)))
1/(2*cos(x/6))
"""
def f(rv):
if not isinstance(rv, Expr):
return rv
def sincos_args(flat):
# find arguments of sin and cos that
# appears as bases in args of flat
# and have Integer exponents
args = defaultdict(set)
for fi in Mul.make_args(flat):
b, e = fi.as_base_exp()
if e.is_Integer and e > 0:
if b.func in (cos, sin):
args[b.func].add(b.args[0])
return args
num_args, den_args = map(sincos_args, rv.as_numer_denom())
def handle_match(rv, num_args, den_args):
# for arg in sin args of num_args, look for arg/2
# in den_args and pass this half-angle to TR11
# for handling in rv
for narg in num_args[sin]:
half = narg/2
if half in den_args[cos]:
func = cos
elif half in den_args[sin]:
func = sin
else:
continue
rv = TR11(rv, half)
den_args[func].remove(half)
return rv
# sin in num, sin or cos in den
rv = handle_match(rv, num_args, den_args)
# sin in den, sin or cos in num
rv = handle_match(rv, den_args, num_args)
return rv
return bottom_up(rv, f)
def TR12(rv, first=True):
"""Separate sums in ``tan``.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import tan
>>> from sympy.simplify.fu import TR12
>>> TR12(tan(x + y))
(tan(x) + tan(y))/(-tan(x)*tan(y) + 1)
"""
def f(rv):
if not rv.func == tan:
return rv
arg = rv.args[0]
if arg.is_Add:
if first:
args = list(ordered(arg.args))
else:
args = list(arg.args)
a = args.pop()
b = Add._from_args(args)
if b.is_Add:
tb = TR12(tan(b), first=False)
else:
tb = tan(b)
return (tan(a) + tb)/(1 - tan(a)*tb)
return rv
return bottom_up(rv, f)
def TR12i(rv):
"""Combine tan arguments as
(tan(y) + tan(x))/(tan(x)*tan(y) - 1) -> -tan(x + y).
Examples
========
>>> from sympy.simplify.fu import TR12i
>>> from sympy import tan
>>> from sympy.abc import a, b, c
>>> ta, tb, tc = [tan(i) for i in (a, b, c)]
>>> TR12i((ta + tb)/(-ta*tb + 1))
tan(a + b)
>>> TR12i((ta + tb)/(ta*tb - 1))
-tan(a + b)
>>> TR12i((-ta - tb)/(ta*tb - 1))
tan(a + b)
>>> eq = (ta + tb)/(-ta*tb + 1)**2*(-3*ta - 3*tc)/(2*(ta*tc - 1))
>>> TR12i(eq.expand())
-3*tan(a + b)*tan(a + c)/(2*(tan(a) + tan(b) - 1))
"""
from sympy import factor
def f(rv):
if not (rv.is_Add or rv.is_Mul or rv.is_Pow):
return rv
n, d = rv.as_numer_denom()
if not d.args or not n.args:
return rv
dok = {}
def ok(di):
m = as_f_sign_1(di)
if m:
g, f, s = m
if s is S.NegativeOne and f.is_Mul and len(f.args) == 2 and \
all(isinstance(fi, tan) for fi in f.args):
return g, f
d_args = list(Mul.make_args(d))
for i, di in enumerate(d_args):
m = ok(di)
if m:
g, t = m
s = Add(*[_.args[0] for _ in t.args])
dok[s] = S.One
d_args[i] = g
continue
if di.is_Add:
di = factor(di)
if di.is_Mul:
d_args.extend(di.args)
d_args[i] = S.One
elif di.is_Pow and (di.exp.is_integer or di.base.is_positive):
m = ok(di.base)
if m:
g, t = m
s = Add(*[_.args[0] for _ in t.args])
dok[s] = di.exp
d_args[i] = g**di.exp
else:
di = factor(di)
if di.is_Mul:
d_args.extend(di.args)
d_args[i] = S.One
if not dok:
return rv
def ok(ni):
if ni.is_Add and len(ni.args) == 2:
a, b = ni.args
if isinstance(a, tan) and isinstance(b, tan):
return a, b
n_args = list(Mul.make_args(factor_terms(n)))
hit = False
for i, ni in enumerate(n_args):
m = ok(ni)
if not m:
m = ok(-ni)
if m:
n_args[i] = S.NegativeOne
else:
if ni.is_Add:
ni = factor(ni)
if ni.is_Mul:
n_args.extend(ni.args)
n_args[i] = S.One
continue
elif ni.is_Pow and (
ni.exp.is_integer or ni.base.is_positive):
m = ok(ni.base)
if m:
n_args[i] = S.One
else:
ni = factor(ni)
if ni.is_Mul:
n_args.extend(ni.args)
n_args[i] = S.One
continue
else:
continue
else:
n_args[i] = S.One
hit = True
s = Add(*[_.args[0] for _ in m])
ed = dok[s]
newed = ed.extract_additively(S.One)
if newed is not None:
if newed:
dok[s] = newed
else:
dok.pop(s)
n_args[i] *= -tan(s)
if hit:
rv = Mul(*n_args)/Mul(*d_args)/Mul(*[(Add(*[
tan(a) for a in i.args]) - 1)**e for i, e in dok.items()])
return rv
return bottom_up(rv, f)
def TR13(rv):
"""Change products of ``tan`` or ``cot``.
Examples
========
>>> from sympy.simplify.fu import TR13
>>> from sympy import tan, cot
>>> TR13(tan(3)*tan(2))
-tan(2)/tan(5) - tan(3)/tan(5) + 1
>>> TR13(cot(3)*cot(2))
cot(2)*cot(5) + 1 + cot(3)*cot(5)
"""
def f(rv):
if not rv.is_Mul:
return rv
# XXX handle products of powers? or let power-reducing handle it?
args = {tan: [], cot: [], None: []}
for a in ordered(Mul.make_args(rv)):
if a.func in (tan, cot):
args[a.func].append(a.args[0])
else:
args[None].append(a)
t = args[tan]
c = args[cot]
if len(t) < 2 and len(c) < 2:
return rv
args = args[None]
while len(t) > 1:
t1 = t.pop()
t2 = t.pop()
args.append(1 - (tan(t1)/tan(t1 + t2) + tan(t2)/tan(t1 + t2)))
if t:
args.append(tan(t.pop()))
while len(c) > 1:
t1 = c.pop()
t2 = c.pop()
args.append(1 + cot(t1)*cot(t1 + t2) + cot(t2)*cot(t1 + t2))
if c:
args.append(cot(c.pop()))
return Mul(*args)
return bottom_up(rv, f)
def TRmorrie(rv):
"""Returns cos(x)*cos(2*x)*...*cos(2**(k-1)*x) -> sin(2**k*x)/(2**k*sin(x))
Examples
========
>>> from sympy.simplify.fu import TRmorrie, TR8, TR3
>>> from sympy.abc import x
>>> from sympy import Mul, cos, pi
>>> TRmorrie(cos(x)*cos(2*x))
sin(4*x)/(4*sin(x))
>>> TRmorrie(7*Mul(*[cos(x) for x in range(10)]))
7*sin(12)*sin(16)*cos(5)*cos(7)*cos(9)/(64*sin(1)*sin(3))
Sometimes autosimplification will cause a power to be
not recognized. e.g. in the following, cos(4*pi/7) automatically
simplifies to -cos(3*pi/7) so only 2 of the 3 terms are
recognized:
>>> TRmorrie(cos(pi/7)*cos(2*pi/7)*cos(4*pi/7))
-sin(3*pi/7)*cos(3*pi/7)/(4*sin(pi/7))
A touch by TR8 resolves the expression to a Rational
>>> TR8(_)
-1/8
In this case, if eq is unsimplified, the answer is obtained
directly:
>>> eq = cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9)
>>> TRmorrie(eq)
1/16
But if angles are made canonical with TR3 then the answer
is not simplified without further work:
>>> TR3(eq)
sin(pi/18)*cos(pi/9)*cos(2*pi/9)/2
>>> TRmorrie(_)
sin(pi/18)*sin(4*pi/9)/(8*sin(pi/9))
>>> TR8(_)
cos(7*pi/18)/(16*sin(pi/9))
>>> TR3(_)
1/16
The original expression would have resolve to 1/16 directly with TR8,
however:
>>> TR8(eq)
1/16
References
==========
.. [1] https://en.wikipedia.org/wiki/Morrie%27s_law
"""
def f(rv, first=True):
if not rv.is_Mul:
return rv
if first:
n, d = rv.as_numer_denom()
return f(n, 0)/f(d, 0)
args = defaultdict(list)
coss = {}
other = []
for c in rv.args:
b, e = c.as_base_exp()
if e.is_Integer and isinstance(b, cos):
co, a = b.args[0].as_coeff_Mul()
args[a].append(co)
coss[b] = e
else:
other.append(c)
new = []
for a in args:
c = args[a]
c.sort()
no = []
while c:
k = 0
cc = ci = c[0]
while cc in c:
k += 1
cc *= 2
if k > 1:
newarg = sin(2**k*ci*a)/2**k/sin(ci*a)
# see how many times this can be taken
take = None
ccs = []
for i in range(k):
cc /= 2
key = cos(a*cc, evaluate=False)
ccs.append(cc)
take = min(coss[key], take or coss[key])
# update exponent counts
for i in range(k):
cc = ccs.pop()
key = cos(a*cc, evaluate=False)
coss[key] -= take
if not coss[key]:
c.remove(cc)
new.append(newarg**take)
else:
no.append(c.pop(0))
c[:] = no
if new:
rv = Mul(*(new + other + [
cos(k*a, evaluate=False) for a in args for k in args[a]]))
return rv
return bottom_up(rv, f)
def TR14(rv, first=True):
"""Convert factored powers of sin and cos identities into simpler
expressions.
Examples
========
>>> from sympy.simplify.fu import TR14
>>> from sympy.abc import x, y
>>> from sympy import cos, sin
>>> TR14((cos(x) - 1)*(cos(x) + 1))
-sin(x)**2
>>> TR14((sin(x) - 1)*(sin(x) + 1))
-cos(x)**2
>>> p1 = (cos(x) + 1)*(cos(x) - 1)
>>> p2 = (cos(y) - 1)*2*(cos(y) + 1)
>>> p3 = (3*(cos(y) - 1))*(3*(cos(y) + 1))
>>> TR14(p1*p2*p3*(x - 1))
-18*(x - 1)*sin(x)**2*sin(y)**4
"""
def f(rv):
if not rv.is_Mul:
return rv
if first:
# sort them by location in numerator and denominator
# so the code below can just deal with positive exponents
n, d = rv.as_numer_denom()
if d is not S.One:
newn = TR14(n, first=False)
newd = TR14(d, first=False)
if newn != n or newd != d:
rv = newn/newd
return rv
other = []
process = []
for a in rv.args:
if a.is_Pow:
b, e = a.as_base_exp()
if not (e.is_integer or b.is_positive):
other.append(a)
continue
a = b
else:
e = S.One
m = as_f_sign_1(a)
if not m or m[1].func not in (cos, sin):
if e is S.One:
other.append(a)
else:
other.append(a**e)
continue
g, f, si = m
process.append((g, e.is_Number, e, f, si, a))
# sort them to get like terms next to each other
process = list(ordered(process))
# keep track of whether there was any change
nother = len(other)
# access keys
keys = (g, t, e, f, si, a) = list(range(6))
while process:
A = process.pop(0)
if process:
B = process[0]
if A[e].is_Number and B[e].is_Number:
# both exponents are numbers
if A[f] == B[f]:
if A[si] != B[si]:
B = process.pop(0)
take = min(A[e], B[e])
# reinsert any remainder
# the B will likely sort after A so check it first
if B[e] != take:
rem = [B[i] for i in keys]
rem[e] -= take
process.insert(0, rem)
elif A[e] != take:
rem = [A[i] for i in keys]
rem[e] -= take
process.insert(0, rem)
if isinstance(A[f], cos):
t = sin
else:
t = cos
other.append((-A[g]*B[g]*t(A[f].args[0])**2)**take)
continue
elif A[e] == B[e]:
# both exponents are equal symbols
if A[f] == B[f]:
if A[si] != B[si]:
B = process.pop(0)
take = A[e]
if isinstance(A[f], cos):
t = sin
else:
t = cos
other.append((-A[g]*B[g]*t(A[f].args[0])**2)**take)
continue
# either we are done or neither condition above applied
other.append(A[a]**A[e])
if len(other) != nother:
rv = Mul(*other)
return rv
return bottom_up(rv, f)
def TR15(rv, max=4, pow=False):
"""Convert sin(x)*-2 to 1 + cot(x)**2.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR15
>>> from sympy.abc import x
>>> from sympy import sin
>>> TR15(1 - 1/sin(x)**2)
-cot(x)**2
"""
def f(rv):
if not (isinstance(rv, Pow) and isinstance(rv.base, sin)):
return rv
ia = 1/rv
a = _TR56(ia, sin, cot, lambda x: 1 + x, max=max, pow=pow)
if a != ia:
rv = a
return rv
return bottom_up(rv, f)
def TR16(rv, max=4, pow=False):
"""Convert cos(x)*-2 to 1 + tan(x)**2.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR16
>>> from sympy.abc import x
>>> from sympy import cos
>>> TR16(1 - 1/cos(x)**2)
-tan(x)**2
"""
def f(rv):
if not (isinstance(rv, Pow) and isinstance(rv.base, cos)):
return rv
ia = 1/rv
a = _TR56(ia, cos, tan, lambda x: 1 + x, max=max, pow=pow)
if a != ia:
rv = a
return rv
return bottom_up(rv, f)
def TR111(rv):
"""Convert f(x)**-i to g(x)**i where either ``i`` is an integer
or the base is positive and f, g are: tan, cot; sin, csc; or cos, sec.
Examples
========
>>> from sympy.simplify.fu import TR111
>>> from sympy.abc import x
>>> from sympy import tan
>>> TR111(1 - 1/tan(x)**2)
1 - cot(x)**2
"""
def f(rv):
if not (
isinstance(rv, Pow) and
(rv.base.is_positive or rv.exp.is_integer and rv.exp.is_negative)):
return rv
if isinstance(rv.base, tan):
return cot(rv.base.args[0])**-rv.exp
elif isinstance(rv.base, sin):
return csc(rv.base.args[0])**-rv.exp
elif isinstance(rv.base, cos):
return sec(rv.base.args[0])**-rv.exp
return rv
return bottom_up(rv, f)
def TR22(rv, max=4, pow=False):
"""Convert tan(x)**2 to sec(x)**2 - 1 and cot(x)**2 to csc(x)**2 - 1.
See _TR56 docstring for advanced use of ``max`` and ``pow``.
Examples
========
>>> from sympy.simplify.fu import TR22
>>> from sympy.abc import x
>>> from sympy import tan, cot
>>> TR22(1 + tan(x)**2)
sec(x)**2
>>> TR22(1 + cot(x)**2)
csc(x)**2
"""
def f(rv):
if not (isinstance(rv, Pow) and rv.base.func in (cot, tan)):
return rv
rv = _TR56(rv, tan, sec, lambda x: x - 1, max=max, pow=pow)
rv = _TR56(rv, cot, csc, lambda x: x - 1, max=max, pow=pow)
return rv
return bottom_up(rv, f)
def TRpower(rv):
"""Convert sin(x)**n and cos(x)**n with positive n to sums.
Examples
========
>>> from sympy.simplify.fu import TRpower
>>> from sympy.abc import x
>>> from sympy import cos, sin
>>> TRpower(sin(x)**6)
-15*cos(2*x)/32 + 3*cos(4*x)/16 - cos(6*x)/32 + 5/16
>>> TRpower(sin(x)**3*cos(2*x)**4)
(3*sin(x)/4 - sin(3*x)/4)*(cos(4*x)/2 + cos(8*x)/8 + 3/8)
References
==========
.. [1] https://en.wikipedia.org/wiki/List_of_trigonometric_identities#Power-reduction_formulae
"""
def f(rv):
if not (isinstance(rv, Pow) and isinstance(rv.base, (sin, cos))):
return rv
b, n = rv.as_base_exp()
x = b.args[0]
if n.is_Integer and n.is_positive:
if n.is_odd and isinstance(b, cos):
rv = 2**(1-n)*Add(*[binomial(n, k)*cos((n - 2*k)*x)
for k in range((n + 1)/2)])
elif n.is_odd and isinstance(b, sin):
rv = 2**(1-n)*(-1)**((n-1)/2)*Add(*[binomial(n, k)*
(-1)**k*sin((n - 2*k)*x) for k in range((n + 1)/2)])
elif n.is_even and isinstance(b, cos):
rv = 2**(1-n)*Add(*[binomial(n, k)*cos((n - 2*k)*x)
for k in range(n/2)])
elif n.is_even and isinstance(b, sin):
rv = 2**(1-n)*(-1)**(n/2)*Add(*[binomial(n, k)*
(-1)**k*cos((n - 2*k)*x) for k in range(n/2)])
if n.is_even:
rv += 2**(-n)*binomial(n, n/2)
return rv
return bottom_up(rv, f)
def L(rv):
"""Return count of trigonometric functions in expression.
Examples
========
>>> from sympy.simplify.fu import L
>>> from sympy.abc import x
>>> from sympy import cos, sin
>>> L(cos(x)+sin(x))
2
"""
return S(rv.count(TrigonometricFunction))
# ============== end of basic Fu-like tools =====================
if SYMPY_DEBUG:
(TR0, TR1, TR2, TR3, TR4, TR5, TR6, TR7, TR8, TR9, TR10, TR11, TR12, TR13,
TR2i, TRmorrie, TR14, TR15, TR16, TR12i, TR111, TR22
)= list(map(debug,
(TR0, TR1, TR2, TR3, TR4, TR5, TR6, TR7, TR8, TR9, TR10, TR11, TR12, TR13,
TR2i, TRmorrie, TR14, TR15, TR16, TR12i, TR111, TR22)))
# tuples are chains -- (f, g) -> lambda x: g(f(x))
# lists are choices -- [f, g] -> lambda x: min(f(x), g(x), key=objective)
CTR1 = [(TR5, TR0), (TR6, TR0), identity]
CTR2 = (TR11, [(TR5, TR0), (TR6, TR0), TR0])
CTR3 = [(TRmorrie, TR8, TR0), (TRmorrie, TR8, TR10i, TR0), identity]
CTR4 = [(TR4, TR10i), identity]
RL1 = (TR4, TR3, TR4, TR12, TR4, TR13, TR4, TR0)
# XXX it's a little unclear how this one is to be implemented
# see Fu paper of reference, page 7. What is the Union symbol referring to?
# The diagram shows all these as one chain of transformations, but the
# text refers to them being applied independently. Also, a break
# if L starts to increase has not been implemented.
RL2 = [
(TR4, TR3, TR10, TR4, TR3, TR11),
(TR5, TR7, TR11, TR4),
(CTR3, CTR1, TR9, CTR2, TR4, TR9, TR9, CTR4),
identity,
]
def fu(rv, measure=lambda x: (L(x), x.count_ops())):
"""Attempt to simplify expression by using transformation rules given
in the algorithm by Fu et al.
:func:`fu` will try to minimize the objective function ``measure``.
By default this first minimizes the number of trig terms and then minimizes
the number of total operations.
Examples
========
>>> from sympy.simplify.fu import fu
>>> from sympy import cos, sin, tan, pi, S, sqrt
>>> from sympy.abc import x, y, a, b
>>> fu(sin(50)**2 + cos(50)**2 + sin(pi/6))
3/2
>>> fu(sqrt(6)*cos(x) + sqrt(2)*sin(x))
2*sqrt(2)*sin(x + pi/3)
CTR1 example
>>> eq = sin(x)**4 - cos(y)**2 + sin(y)**2 + 2*cos(x)**2
>>> fu(eq)
cos(x)**4 - 2*cos(y)**2 + 2
CTR2 example
>>> fu(S.Half - cos(2*x)/2)
sin(x)**2
CTR3 example
>>> fu(sin(a)*(cos(b) - sin(b)) + cos(a)*(sin(b) + cos(b)))
sqrt(2)*sin(a + b + pi/4)
CTR4 example
>>> fu(sqrt(3)*cos(x)/2 + sin(x)/2)
sin(x + pi/3)
Example 1
>>> fu(1-sin(2*x)**2/4-sin(y)**2-cos(x)**4)
-cos(x)**2 + cos(y)**2
Example 2
>>> fu(cos(4*pi/9))
sin(pi/18)
>>> fu(cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9))
1/16
Example 3
>>> fu(tan(7*pi/18)+tan(5*pi/18)-sqrt(3)*tan(5*pi/18)*tan(7*pi/18))
-sqrt(3)
Objective function example
>>> fu(sin(x)/cos(x)) # default objective function
tan(x)
>>> fu(sin(x)/cos(x), measure=lambda x: -x.count_ops()) # maximize op count
sin(x)/cos(x)
References
==========
.. [1] https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.657.2478&rep=rep1&type=pdf
"""
fRL1 = greedy(RL1, measure)
fRL2 = greedy(RL2, measure)
was = rv
rv = sympify(rv)
if not isinstance(rv, Expr):
return rv.func(*[fu(a, measure=measure) for a in rv.args])
rv = TR1(rv)
if rv.has(tan, cot):
rv1 = fRL1(rv)
if (measure(rv1) < measure(rv)):
rv = rv1
if rv.has(tan, cot):
rv = TR2(rv)
if rv.has(sin, cos):
rv1 = fRL2(rv)
rv2 = TR8(TRmorrie(rv1))
rv = min([was, rv, rv1, rv2], key=measure)
return min(TR2i(rv), rv, key=measure)
def process_common_addends(rv, do, key2=None, key1=True):
"""Apply ``do`` to addends of ``rv`` that (if ``key1=True``) share at least
a common absolute value of their coefficient and the value of ``key2`` when
applied to the argument. If ``key1`` is False ``key2`` must be supplied and
will be the only key applied.
"""
# collect by absolute value of coefficient and key2
absc = defaultdict(list)
if key1:
for a in rv.args:
c, a = a.as_coeff_Mul()
if c < 0:
c = -c
a = -a # put the sign on `a`
absc[(c, key2(a) if key2 else 1)].append(a)
elif key2:
for a in rv.args:
absc[(S.One, key2(a))].append(a)
else:
raise ValueError('must have at least one key')
args = []
hit = False
for k in absc:
v = absc[k]
c, _ = k
if len(v) > 1:
e = Add(*v, evaluate=False)
new = do(e)
if new != e:
e = new
hit = True
args.append(c*e)
else:
args.append(c*v[0])
if hit:
rv = Add(*args)
return rv
fufuncs = '''
TR0 TR1 TR2 TR3 TR4 TR5 TR6 TR7 TR8 TR9 TR10 TR10i TR11
TR12 TR13 L TR2i TRmorrie TR12i
TR14 TR15 TR16 TR111 TR22'''.split()
FU = dict(list(zip(fufuncs, list(map(locals().get, fufuncs)))))
def _roots():
global _ROOT2, _ROOT3, _invROOT3
_ROOT2, _ROOT3 = sqrt(2), sqrt(3)
_invROOT3 = 1/_ROOT3
_ROOT2 = None
def trig_split(a, b, two=False):
"""Return the gcd, s1, s2, a1, a2, bool where
If two is False (default) then::
a + b = gcd*(s1*f(a1) + s2*f(a2)) where f = cos if bool else sin
else:
if bool, a + b was +/- cos(a1)*cos(a2) +/- sin(a1)*sin(a2) and equals
n1*gcd*cos(a - b) if n1 == n2 else
n1*gcd*cos(a + b)
else a + b was +/- cos(a1)*sin(a2) +/- sin(a1)*cos(a2) and equals
n1*gcd*sin(a + b) if n1 = n2 else
n1*gcd*sin(b - a)
Examples
========
>>> from sympy.simplify.fu import trig_split
>>> from sympy.abc import x, y, z
>>> from sympy import cos, sin, sqrt
>>> trig_split(cos(x), cos(y))
(1, 1, 1, x, y, True)
>>> trig_split(2*cos(x), -2*cos(y))
(2, 1, -1, x, y, True)
>>> trig_split(cos(x)*sin(y), cos(y)*sin(y))
(sin(y), 1, 1, x, y, True)
>>> trig_split(cos(x), -sqrt(3)*sin(x), two=True)
(2, 1, -1, x, pi/6, False)
>>> trig_split(cos(x), sin(x), two=True)
(sqrt(2), 1, 1, x, pi/4, False)
>>> trig_split(cos(x), -sin(x), two=True)
(sqrt(2), 1, -1, x, pi/4, False)
>>> trig_split(sqrt(2)*cos(x), -sqrt(6)*sin(x), two=True)
(2*sqrt(2), 1, -1, x, pi/6, False)
>>> trig_split(-sqrt(6)*cos(x), -sqrt(2)*sin(x), two=True)
(-2*sqrt(2), 1, 1, x, pi/3, False)
>>> trig_split(cos(x)/sqrt(6), sin(x)/sqrt(2), two=True)
(sqrt(6)/3, 1, 1, x, pi/6, False)
>>> trig_split(-sqrt(6)*cos(x)*sin(y), -sqrt(2)*sin(x)*sin(y), two=True)
(-2*sqrt(2)*sin(y), 1, 1, x, pi/3, False)
>>> trig_split(cos(x), sin(x))
>>> trig_split(cos(x), sin(z))
>>> trig_split(2*cos(x), -sin(x))
>>> trig_split(cos(x), -sqrt(3)*sin(x))
>>> trig_split(cos(x)*cos(y), sin(x)*sin(z))
>>> trig_split(cos(x)*cos(y), sin(x)*sin(y))
>>> trig_split(-sqrt(6)*cos(x), sqrt(2)*sin(x)*sin(y), two=True)
"""
global _ROOT2, _ROOT3, _invROOT3
if _ROOT2 is None:
_roots()
a, b = [Factors(i) for i in (a, b)]
ua, ub = a.normal(b)
gcd = a.gcd(b).as_expr()
n1 = n2 = 1
if S.NegativeOne in ua.factors:
ua = ua.quo(S.NegativeOne)
n1 = -n1
elif S.NegativeOne in ub.factors:
ub = ub.quo(S.NegativeOne)
n2 = -n2
a, b = [i.as_expr() for i in (ua, ub)]
def pow_cos_sin(a, two):
"""Return ``a`` as a tuple (r, c, s) such that
``a = (r or 1)*(c or 1)*(s or 1)``.
Three arguments are returned (radical, c-factor, s-factor) as
long as the conditions set by ``two`` are met; otherwise None is
returned. If ``two`` is True there will be one or two non-None
values in the tuple: c and s or c and r or s and r or s or c with c
being a cosine function (if possible) else a sine, and s being a sine
function (if possible) else oosine. If ``two`` is False then there
will only be a c or s term in the tuple.
``two`` also require that either two cos and/or sin be present (with
the condition that if the functions are the same the arguments are
different or vice versa) or that a single cosine or a single sine
be present with an optional radical.
If the above conditions dictated by ``two`` are not met then None
is returned.
"""
c = s = None
co = S.One
if a.is_Mul:
co, a = a.as_coeff_Mul()
if len(a.args) > 2 or not two:
return None
if a.is_Mul:
args = list(a.args)
else:
args = [a]
a = args.pop(0)
if isinstance(a, cos):
c = a
elif isinstance(a, sin):
s = a
elif a.is_Pow and a.exp is S.Half: # autoeval doesn't allow -1/2
co *= a
else:
return None
if args:
b = args[0]
if isinstance(b, cos):
if c:
s = b
else:
c = b
elif isinstance(b, sin):
if s:
c = b
else:
s = b
elif b.is_Pow and b.exp is S.Half:
co *= b
else:
return None
return co if co is not S.One else None, c, s
elif isinstance(a, cos):
c = a
elif isinstance(a, sin):
s = a
if c is None and s is None:
return
co = co if co is not S.One else None
return co, c, s
# get the parts
m = pow_cos_sin(a, two)
if m is None:
return
coa, ca, sa = m
m = pow_cos_sin(b, two)
if m is None:
return
cob, cb, sb = m
# check them
if (not ca) and cb or ca and isinstance(ca, sin):
coa, ca, sa, cob, cb, sb = cob, cb, sb, coa, ca, sa
n1, n2 = n2, n1
if not two: # need cos(x) and cos(y) or sin(x) and sin(y)
c = ca or sa
s = cb or sb
if not isinstance(c, s.func):
return None
return gcd, n1, n2, c.args[0], s.args[0], isinstance(c, cos)
else:
if not coa and not cob:
if (ca and cb and sa and sb):
if isinstance(ca, sa.func) is not isinstance(cb, sb.func):
return
args = {j.args for j in (ca, sa)}
if not all(i.args in args for i in (cb, sb)):
return
return gcd, n1, n2, ca.args[0], sa.args[0], isinstance(ca, sa.func)
if ca and sa or cb and sb or \
two and (ca is None and sa is None or cb is None and sb is None):
return
c = ca or sa
s = cb or sb
if c.args != s.args:
return
if not coa:
coa = S.One
if not cob:
cob = S.One
if coa is cob:
gcd *= _ROOT2
return gcd, n1, n2, c.args[0], pi/4, False
elif coa/cob == _ROOT3:
gcd *= 2*cob
return gcd, n1, n2, c.args[0], pi/3, False
elif coa/cob == _invROOT3:
gcd *= 2*coa
return gcd, n1, n2, c.args[0], pi/6, False
def as_f_sign_1(e):
"""If ``e`` is a sum that can be written as ``g*(a + s)`` where
``s`` is ``+/-1``, return ``g``, ``a``, and ``s`` where ``a`` does
not have a leading negative coefficient.
Examples
========
>>> from sympy.simplify.fu import as_f_sign_1
>>> from sympy.abc import x
>>> as_f_sign_1(x + 1)
(1, x, 1)
>>> as_f_sign_1(x - 1)
(1, x, -1)
>>> as_f_sign_1(-x + 1)
(-1, x, -1)
>>> as_f_sign_1(-x - 1)
(-1, x, 1)
>>> as_f_sign_1(2*x + 2)
(2, x, 1)
"""
if not e.is_Add or len(e.args) != 2:
return
# exact match
a, b = e.args
if a in (S.NegativeOne, S.One):
g = S.One
if b.is_Mul and b.args[0].is_Number and b.args[0] < 0:
a, b = -a, -b
g = -g
return g, b, a
# gcd match
a, b = [Factors(i) for i in e.args]
ua, ub = a.normal(b)
gcd = a.gcd(b).as_expr()
if S.NegativeOne in ua.factors:
ua = ua.quo(S.NegativeOne)
n1 = -1
n2 = 1
elif S.NegativeOne in ub.factors:
ub = ub.quo(S.NegativeOne)
n1 = 1
n2 = -1
else:
n1 = n2 = 1
a, b = [i.as_expr() for i in (ua, ub)]
if a is S.One:
a, b = b, a
n1, n2 = n2, n1
if n1 == -1:
gcd = -gcd
n2 = -n2
if b is S.One:
return gcd, a, n2
def _osborne(e, d):
"""Replace all hyperbolic functions with trig functions using
the Osborne rule.
Notes
=====
``d`` is a dummy variable to prevent automatic evaluation
of trigonometric/hyperbolic functions.
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
"""
def f(rv):
if not isinstance(rv, HyperbolicFunction):
return rv
a = rv.args[0]
a = a*d if not a.is_Add else Add._from_args([i*d for i in a.args])
if isinstance(rv, sinh):
return I*sin(a)
elif isinstance(rv, cosh):
return cos(a)
elif isinstance(rv, tanh):
return I*tan(a)
elif isinstance(rv, coth):
return cot(a)/I
elif isinstance(rv, sech):
return sec(a)
elif isinstance(rv, csch):
return csc(a)/I
else:
raise NotImplementedError('unhandled %s' % rv.func)
return bottom_up(e, f)
def _osbornei(e, d):
"""Replace all trig functions with hyperbolic functions using
the Osborne rule.
Notes
=====
``d`` is a dummy variable to prevent automatic evaluation
of trigonometric/hyperbolic functions.
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
"""
def f(rv):
if not isinstance(rv, TrigonometricFunction):
return rv
const, x = rv.args[0].as_independent(d, as_Add=True)
a = x.xreplace({d: S.One}) + const*I
if isinstance(rv, sin):
return sinh(a)/I
elif isinstance(rv, cos):
return cosh(a)
elif isinstance(rv, tan):
return tanh(a)/I
elif isinstance(rv, cot):
return coth(a)*I
elif isinstance(rv, sec):
return sech(a)
elif isinstance(rv, csc):
return csch(a)*I
else:
raise NotImplementedError('unhandled %s' % rv.func)
return bottom_up(e, f)
def hyper_as_trig(rv):
"""Return an expression containing hyperbolic functions in terms
of trigonometric functions. Any trigonometric functions initially
present are replaced with Dummy symbols and the function to undo
the masking and the conversion back to hyperbolics is also returned. It
should always be true that::
t, f = hyper_as_trig(expr)
expr == f(t)
Examples
========
>>> from sympy.simplify.fu import hyper_as_trig, fu
>>> from sympy.abc import x
>>> from sympy import cosh, sinh
>>> eq = sinh(x)**2 + cosh(x)**2
>>> t, f = hyper_as_trig(eq)
>>> f(fu(t))
cosh(2*x)
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
"""
from sympy.simplify.simplify import signsimp
from sympy.simplify.radsimp import collect
# mask off trig functions
trigs = rv.atoms(TrigonometricFunction)
reps = [(t, Dummy()) for t in trigs]
masked = rv.xreplace(dict(reps))
# get inversion substitutions in place
reps = [(v, k) for k, v in reps]
d = Dummy()
return _osborne(masked, d), lambda x: collect(signsimp(
_osbornei(x, d).xreplace(dict(reps))), S.ImaginaryUnit)
def sincos_to_sum(expr):
"""Convert products and powers of sin and cos to sums.
Explanation
===========
Applied power reduction TRpower first, then expands products, and
converts products to sums with TR8.
Examples
========
>>> from sympy.simplify.fu import sincos_to_sum
>>> from sympy.abc import x
>>> from sympy import cos, sin
>>> sincos_to_sum(16*sin(x)**3*cos(2*x)**2)
7*sin(x) - 5*sin(3*x) + 3*sin(5*x) - sin(7*x)
"""
if not expr.has(cos, sin):
return expr
else:
return TR8(expand_mul(TRpower(expr)))
|
55e107182ad69fdd4bea9061664d2eaebfee9062558cb6462b580df38506facc | """ This module cooks up a docstring when imported. Its only purpose is to
be displayed in the sphinx documentation. """
from sympy import latex, Eq, hyper
from sympy.simplify.hyperexpand import FormulaCollection
c = FormulaCollection()
doc = ""
for f in c.formulae:
obj = Eq(hyper(f.func.ap, f.func.bq, f.z),
f.closed_form.rewrite('nonrepsmall'))
doc += ".. math::\n %s\n" % latex(obj)
__doc__ = doc
|
1ab8af4bff62a9ce031d149a748a0b74ec49773e9eb9dcad3d90fc3c5830a346 | from itertools import combinations_with_replacement
from sympy.core import symbols, Add, Dummy
from sympy.core.numbers import Rational
from sympy.polys import cancel, ComputationFailed, parallel_poly_from_expr, reduced, Poly
from sympy.polys.monomials import Monomial, monomial_div
from sympy.polys.polyerrors import DomainError, PolificationFailed
from sympy.utilities.misc import debug
def ratsimp(expr):
"""
Put an expression over a common denominator, cancel and reduce.
Examples
========
>>> from sympy import ratsimp
>>> from sympy.abc import x, y
>>> ratsimp(1/x + 1/y)
(x + y)/(x*y)
"""
f, g = cancel(expr).as_numer_denom()
try:
Q, r = reduced(f, [g], field=True, expand=False)
except ComputationFailed:
return f/g
return Add(*Q) + cancel(r/g)
def ratsimpmodprime(expr, G, *gens, quick=True, polynomial=False, **args):
"""
Simplifies a rational expression ``expr`` modulo the prime ideal
generated by ``G``. ``G`` should be a Groebner basis of the
ideal.
Examples
========
>>> from sympy.simplify.ratsimp import ratsimpmodprime
>>> from sympy.abc import x, y
>>> eq = (x + y**5 + y)/(x - y)
>>> ratsimpmodprime(eq, [x*y**5 - x - y], x, y, order='lex')
(-x**2 - x*y - x - y)/(-x**2 + x*y)
If ``polynomial`` is ``False``, the algorithm computes a rational
simplification which minimizes the sum of the total degrees of
the numerator and the denominator.
If ``polynomial`` is ``True``, this function just brings numerator and
denominator into a canonical form. This is much faster, but has
potentially worse results.
References
==========
.. [1] M. Monagan, R. Pearce, Rational Simplification Modulo a Polynomial
Ideal, http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.163.6984
(specifically, the second algorithm)
"""
from sympy import solve
debug('ratsimpmodprime', expr)
# usual preparation of polynomials:
num, denom = cancel(expr).as_numer_denom()
try:
polys, opt = parallel_poly_from_expr([num, denom] + G, *gens, **args)
except PolificationFailed:
return expr
domain = opt.domain
if domain.has_assoc_Field:
opt.domain = domain.get_field()
else:
raise DomainError(
"can't compute rational simplification over %s" % domain)
# compute only once
leading_monomials = [g.LM(opt.order) for g in polys[2:]]
tested = set()
def staircase(n):
"""
Compute all monomials with degree less than ``n`` that are
not divisible by any element of ``leading_monomials``.
"""
if n == 0:
return [1]
S = []
for mi in combinations_with_replacement(range(len(opt.gens)), n):
m = [0]*len(opt.gens)
for i in mi:
m[i] += 1
if all([monomial_div(m, lmg) is None for lmg in
leading_monomials]):
S.append(m)
return [Monomial(s).as_expr(*opt.gens) for s in S] + staircase(n - 1)
def _ratsimpmodprime(a, b, allsol, N=0, D=0):
r"""
Computes a rational simplification of ``a/b`` which minimizes
the sum of the total degrees of the numerator and the denominator.
Explanation
===========
The algorithm proceeds by looking at ``a * d - b * c`` modulo
the ideal generated by ``G`` for some ``c`` and ``d`` with degree
less than ``a`` and ``b`` respectively.
The coefficients of ``c`` and ``d`` are indeterminates and thus
the coefficients of the normalform of ``a * d - b * c`` are
linear polynomials in these indeterminates.
If these linear polynomials, considered as system of
equations, have a nontrivial solution, then `\frac{a}{b}
\equiv \frac{c}{d}` modulo the ideal generated by ``G``. So,
by construction, the degree of ``c`` and ``d`` is less than
the degree of ``a`` and ``b``, so a simpler representation
has been found.
After a simpler representation has been found, the algorithm
tries to reduce the degree of the numerator and denominator
and returns the result afterwards.
As an extension, if quick=False, we look at all possible degrees such
that the total degree is less than *or equal to* the best current
solution. We retain a list of all solutions of minimal degree, and try
to find the best one at the end.
"""
c, d = a, b
steps = 0
maxdeg = a.total_degree() + b.total_degree()
if quick:
bound = maxdeg - 1
else:
bound = maxdeg
while N + D <= bound:
if (N, D) in tested:
break
tested.add((N, D))
M1 = staircase(N)
M2 = staircase(D)
debug('%s / %s: %s, %s' % (N, D, M1, M2))
Cs = symbols("c:%d" % len(M1), cls=Dummy)
Ds = symbols("d:%d" % len(M2), cls=Dummy)
ng = Cs + Ds
c_hat = Poly(
sum([Cs[i] * M1[i] for i in range(len(M1))]), opt.gens + ng)
d_hat = Poly(
sum([Ds[i] * M2[i] for i in range(len(M2))]), opt.gens + ng)
r = reduced(a * d_hat - b * c_hat, G, opt.gens + ng,
order=opt.order, polys=True)[1]
S = Poly(r, gens=opt.gens).coeffs()
sol = solve(S, Cs + Ds, particular=True, quick=True)
if sol and not all([s == 0 for s in sol.values()]):
c = c_hat.subs(sol)
d = d_hat.subs(sol)
# The "free" variables occurring before as parameters
# might still be in the substituted c, d, so set them
# to the value chosen before:
c = c.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds))))))
d = d.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds))))))
c = Poly(c, opt.gens)
d = Poly(d, opt.gens)
if d == 0:
raise ValueError('Ideal not prime?')
allsol.append((c_hat, d_hat, S, Cs + Ds))
if N + D != maxdeg:
allsol = [allsol[-1]]
break
steps += 1
N += 1
D += 1
if steps > 0:
c, d, allsol = _ratsimpmodprime(c, d, allsol, N, D - steps)
c, d, allsol = _ratsimpmodprime(c, d, allsol, N - steps, D)
return c, d, allsol
# preprocessing. this improves performance a bit when deg(num)
# and deg(denom) are large:
num = reduced(num, G, opt.gens, order=opt.order)[1]
denom = reduced(denom, G, opt.gens, order=opt.order)[1]
if polynomial:
return (num/denom).cancel()
c, d, allsol = _ratsimpmodprime(
Poly(num, opt.gens, domain=opt.domain), Poly(denom, opt.gens, domain=opt.domain), [])
if not quick and allsol:
debug('Looking for best minimal solution. Got: %s' % len(allsol))
newsol = []
for c_hat, d_hat, S, ng in allsol:
sol = solve(S, ng, particular=True, quick=False)
newsol.append((c_hat.subs(sol), d_hat.subs(sol)))
c, d = min(newsol, key=lambda x: len(x[0].terms()) + len(x[1].terms()))
if not domain.is_Field:
cn, c = c.clear_denoms(convert=True)
dn, d = d.clear_denoms(convert=True)
r = Rational(cn, dn)
else:
r = Rational(1)
return (c*r.q)/(d*r.p)
|
cab556bc3f59419547746c326c3d37caedeaaa73a68a721e961ad6c1290d80bf | from typing import Any, Set
from itertools import permutations
from sympy.combinatorics import Permutation
from sympy.core import (
Basic, Expr, Function, diff,
Pow, Mul, Add, Atom, Lambda, S, Tuple, Dict
)
from sympy.core.cache import cacheit
from sympy.core.compatibility import reduce
from sympy.core.symbol import Symbol, Dummy
from sympy.core.symbol import Str
from sympy.core.sympify import _sympify
from sympy.functions import factorial
from sympy.matrices import ImmutableDenseMatrix as Matrix
from sympy.simplify import simplify
from sympy.solvers import solve
from sympy.utilities.exceptions import SymPyDeprecationWarning
# TODO you are a bit excessive in the use of Dummies
# TODO dummy point, literal field
# TODO too often one needs to call doit or simplify on the output, check the
# tests and find out why
from sympy.tensor.array import ImmutableDenseNDimArray
class Manifold(Atom):
"""A mathematical manifold.
Explanation
===========
A manifold is a topological space that locally resembles
Euclidean space near each point [1].
This class does not provide any means to study the topological
characteristics of the manifold that it represents, though.
Parameters
==========
name : str
The name of the manifold.
dim : int
The dimension of the manifold.
Examples
========
>>> from sympy.diffgeom import Manifold
>>> m = Manifold('M', 2)
>>> m
M
>>> m.dim
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Manifold
"""
def __new__(cls, name, dim, **kwargs):
if not isinstance(name, Str):
name = Str(name)
dim = _sympify(dim)
obj = super().__new__(cls, name, dim)
obj.patches = _deprecated_list(
"Manifold.patches",
"external container for registry",
19321,
"1.7",
[]
)
return obj
@property
def name(self):
return self.args[0]
@property
def dim(self):
return self.args[1]
class Patch(Atom):
"""A patch on a manifold.
Explanation
===========
Coordinate patch, or patch in short, is a simply-connected open set around a point
in the manifold [1]. On a manifold one can have many patches that do not always
include the whole manifold. On these patches coordinate charts can be defined that
permit the parameterization of any point on the patch in terms of a tuple of
real numbers (the coordinates).
This class does not provide any means to study the topological
characteristics of the patch that it represents.
Parameters
==========
name : str
The name of the patch.
manifold : Manifold
The manifold on which the patch is defined.
Examples
========
>>> from sympy.diffgeom import Manifold, Patch
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> p
P
>>> p.dim
2
References
==========
.. [1] G. Sussman, J. Wisdom, W. Farr, Functional Differential Geometry (2013)
"""
def __new__(cls, name, manifold, **kwargs):
if not isinstance(name, Str):
name = Str(name)
obj = super().__new__(cls, name, manifold)
obj.manifold.patches.append(obj) # deprecated
obj.coord_systems = _deprecated_list(
"Patch.coord_systems",
"external container for registry",
19321,
"1.7",
[]
)
return obj
@property
def name(self):
return self.args[0]
@property
def manifold(self):
return self.args[1]
@property
def dim(self):
return self.manifold.dim
class CoordSystem(Atom):
"""A coordinate system defined on the patch.
Explanation
===========
Coordinate system is a system that uses one or more coordinates to uniquely determine
the position of the points or other geometric elements on a manifold [1].
By passing Symbols to *symbols* parameter, user can define the name and assumptions
of coordinate symbols of the coordinate system. If not passed, these symbols are
generated automatically and are assumed to be real valued.
By passing *relations* parameter, user can define the tranform relations of coordinate
systems. Inverse transformation and indirect transformation can be found automatically.
If this parameter is not passed, coordinate transformation cannot be done.
Parameters
==========
name : str
The name of the coordinate system.
patch : Patch
The patch where the coordinate system is defined.
symbols : list of Symbols, optional
Defines the names and assumptions of coordinate symbols.
relations : dict, optional
- key : tuple of two strings, who are the names of systems where
the coordinates transform from and transform to.
- value : Lambda returning the transformed coordinates.
Examples
========
>>> from sympy import symbols, pi, Lambda, Matrix, sqrt, atan2, cos, sin
>>> from sympy.diffgeom import Manifold, Patch, CoordSystem
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> x, y = symbols('x y', real=True)
>>> r, theta = symbols('r theta', nonnegative=True)
>>> relation_dict = {
... ('Car2D', 'Pol'): Lambda((x, y), Matrix([sqrt(x**2 + y**2), atan2(y, x)])),
... ('Pol', 'Car2D'): Lambda((r, theta), Matrix([r*cos(theta), r*sin(theta)]))
... }
>>> Car2D = CoordSystem('Car2D', p, [x, y], relation_dict)
>>> Pol = CoordSystem('Pol', p, [r, theta], relation_dict)
>>> Car2D
Car2D
>>> Car2D.dim
2
>>> Car2D.symbols
[x, y]
>>> Car2D.transformation(Pol)
Lambda((x, y), Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]]))
>>> Car2D.transform(Pol)
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> Car2D.transform(Pol, [1, 2])
Matrix([
[sqrt(5)],
[atan(2)]])
>>> Pol.jacobian(Car2D)
Matrix([
[cos(theta), -r*sin(theta)],
[sin(theta), r*cos(theta)]])
>>> Pol.jacobian(Car2D, [1, pi/2])
Matrix([
[0, -1],
[1, 0]])
References
==========
.. [1] https://en.wikipedia.org/wiki/Coordinate_system
"""
def __new__(cls, name, patch, symbols=None, relations={}, **kwargs):
if not isinstance(name, Str):
name = Str(name)
# canonicallize the symbols
if symbols is None:
names = kwargs.get('names', None)
if names is None:
symbols = Tuple(
*[Symbol('%s_%s' % (name.name, i), real=True) for i in range(patch.dim)]
)
else:
SymPyDeprecationWarning(
feature="Class signature 'names' of CoordSystem",
useinstead="class signature 'symbols'",
issue=19321,
deprecated_since_version="1.7"
).warn()
symbols = Tuple(
*[Symbol(n, real=True) for n in names]
)
else:
syms = []
for s in symbols:
if isinstance(s, Symbol):
syms.append(Symbol(s.name, **s._assumptions.generator))
elif isinstance(s, str):
SymPyDeprecationWarning(
feature="Passing str as coordinate symbol's name",
useinstead="Symbol which contains the name and assumption for coordinate symbol",
issue=19321,
deprecated_since_version="1.7"
).warn()
syms.append(Symbol(s, real=True))
symbols = Tuple(*syms)
# canonicallize the relations
rel_temp = {}
for k,v in relations.items():
s1, s2 = k
if not isinstance(s1, Str):
s1 = Str(s1)
if not isinstance(s2, Str):
s2 = Str(s2)
key = Tuple(s1, s2)
rel_temp[key] = v
relations = Dict(rel_temp)
# construct the object
obj = super().__new__(cls, name, patch, symbols, relations)
# Add deprecated attributes
obj.transforms = _deprecated_dict(
"Mutable CoordSystem.transforms",
"'relations' parameter in class signature",
19321,
"1.7",
{}
)
obj._names = [str(n) for n in symbols]
obj.patch.coord_systems.append(obj) # deprecated
obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated
obj._dummy = Dummy()
return obj
@property
def name(self):
return self.args[0]
@property
def patch(self):
return self.args[1]
@property
def manifold(self):
return self.patch.manifold
@property
def symbols(self):
return [
CoordinateSymbol(
self, i, **s._assumptions.generator
) for i,s in enumerate(self.args[2])
]
@property
def relations(self):
return self.args[3]
@property
def dim(self):
return self.patch.dim
##########################################################################
# Finding transformation relation
##########################################################################
def transformation(self, sys):
"""
Return coordinate transform relation from *self* to *sys* as Lambda.
"""
if self.relations != sys.relations:
raise TypeError(
"Two coordinate systems have different relations")
key = Tuple(self.name, sys.name)
if key in self.relations:
return self.relations[key]
elif key[::-1] in self.relations:
return self._inverse_transformation(sys, self)
else:
return self._indirect_transformation(self, sys)
@staticmethod
def _inverse_transformation(sys1, sys2):
# Find the transformation relation from sys2 to sys1
forward_transform = sys1.transform(sys2)
forward_syms, forward_results = forward_transform.args
inv_syms = [i.as_dummy() for i in forward_syms]
inv_results = solve(
[t[0] - t[1] for t in zip(inv_syms, forward_results)],
list(forward_syms), dict=True)[0]
inv_results = [inv_results[s] for s in forward_syms]
signature = tuple(inv_syms)
expr = Matrix(inv_results)
return Lambda(signature, expr)
@classmethod
@cacheit
def _indirect_transformation(cls, sys1, sys2):
# Find the transformation relation between two indirectly connected coordinate systems
path = cls._dijkstra(sys1, sys2)
Lambdas = []
for i in range(len(path) - 1):
s1, s2 = path[i], path[i + 1]
Lambdas.append(s1.transformation(s2))
syms = Lambdas[-1].signature
expr = syms
for l in reversed(Lambdas):
expr = l(*expr)
return Lambda(syms, expr)
@staticmethod
def _dijkstra(sys1, sys2):
# Use Dijkstra algorithm to find the shortest path between two indirectly-connected
# coordinate systems
relations = sys1.relations
graph = {}
for s1, s2 in relations.keys():
if s1 not in graph:
graph[s1] = {s2}
else:
graph[s1].add(s2)
if s2 not in graph:
graph[s2] = {s1}
else:
graph[s2].add(s1)
path_dict = {sys:[0, [], 0] for sys in graph} # minimum distance, path, times of visited
def visit(sys):
path_dict[sys][2] = 1
for newsys in graph[sys]:
distance = path_dict[sys][0] + 1
if path_dict[newsys][0] >= distance or not path_dict[newsys][1]:
path_dict[newsys][0] = distance
path_dict[newsys][1] = [i for i in path_dict[sys][1]]
path_dict[newsys][1].append(sys)
visit(sys1)
while True:
min_distance = max(path_dict.values(), key=lambda x:x[0])[0]
newsys = None
for sys, lst in path_dict.items():
if 0 < lst[0] <= min_distance and not lst[2]:
min_distance = lst[0]
newsys = sys
if newsys is None:
break
visit(newsys)
result = path_dict[sys2][1]
result.append(sys2)
if result == [sys2]:
raise KeyError("Two coordinate systems are not connected.")
return result
def connect_to(self, to_sys, from_coords, to_exprs, inverse=True, fill_in_gaps=False):
SymPyDeprecationWarning(
feature="CoordSystem.connect_to",
useinstead="new instance generated with new 'transforms' parameter",
issue=19321,
deprecated_since_version="1.7"
).warn()
from_coords, to_exprs = dummyfy(from_coords, to_exprs)
self.transforms[to_sys] = Matrix(from_coords), Matrix(to_exprs)
if inverse:
to_sys.transforms[self] = self._inv_transf(from_coords, to_exprs)
if fill_in_gaps:
self._fill_gaps_in_transformations()
@staticmethod
def _inv_transf(from_coords, to_exprs):
# Will be removed when connect_to is removed
inv_from = [i.as_dummy() for i in from_coords]
inv_to = solve(
[t[0] - t[1] for t in zip(inv_from, to_exprs)],
list(from_coords), dict=True)[0]
inv_to = [inv_to[fc] for fc in from_coords]
return Matrix(inv_from), Matrix(inv_to)
@staticmethod
def _fill_gaps_in_transformations():
# Will be removed when connect_to is removed
raise NotImplementedError
##########################################################################
# Coordinate transformations
##########################################################################
def transform(self, sys, coordinates=None):
"""
Return the result of coordinate transformation from *self* to *sys*.
If coordinates are not given, coordinate symbols of *self* are used.
"""
if coordinates is None:
coordinates = Matrix(self.symbols)
else:
coordinates = Matrix(coordinates)
if self != sys:
transf = self.transformation(sys)
coordinates = transf(*coordinates)
return coordinates
def coord_tuple_transform_to(self, to_sys, coords):
"""Transform ``coords`` to coord system ``to_sys``."""
SymPyDeprecationWarning(
feature="CoordSystem.coord_tuple_transform_to",
useinstead="CoordSystem.transform",
issue=19321,
deprecated_since_version="1.7"
).warn()
coords = Matrix(coords)
if self != to_sys:
transf = self.transforms[to_sys]
coords = transf[1].subs(list(zip(transf[0], coords)))
return coords
def jacobian(self, sys, coordinates=None):
"""
Return the jacobian matrix of a transformation.
"""
result = self.transform(sys).jacobian(self.symbols)
if coordinates is not None:
result = result.subs(list(zip(self.symbols, coordinates)))
return result
jacobian_matrix = jacobian
def jacobian_determinant(self, sys, coordinates=None):
"""Return the jacobian determinant of a transformation."""
return self.jacobian(sys, coordinates).det()
##########################################################################
# Points
##########################################################################
def point(self, coords):
"""Create a ``Point`` with coordinates given in this coord system."""
return Point(self, coords)
def point_to_coords(self, point):
"""Calculate the coordinates of a point in this coord system."""
return point.coords(self)
##########################################################################
# Base fields.
##########################################################################
def base_scalar(self, coord_index):
"""Return ``BaseScalarField`` that takes a point and returns one of the coordinates."""
return BaseScalarField(self, coord_index)
coord_function = base_scalar
def base_scalars(self):
"""Returns a list of all coordinate functions.
For more details see the ``base_scalar`` method of this class."""
return [self.base_scalar(i) for i in range(self.dim)]
coord_functions = base_scalars
def base_vector(self, coord_index):
"""Return a basis vector field.
The basis vector field for this coordinate system. It is also an
operator on scalar fields."""
return BaseVectorField(self, coord_index)
def base_vectors(self):
"""Returns a list of all base vectors.
For more details see the ``base_vector`` method of this class."""
return [self.base_vector(i) for i in range(self.dim)]
def base_oneform(self, coord_index):
"""Return a basis 1-form field.
The basis one-form field for this coordinate system. It is also an
operator on vector fields."""
return Differential(self.coord_function(coord_index))
def base_oneforms(self):
"""Returns a list of all base oneforms.
For more details see the ``base_oneform`` method of this class."""
return [self.base_oneform(i) for i in range(self.dim)]
class CoordinateSymbol(Symbol):
"""A symbol which denotes an abstract value of i-th coordinate of
the coordinate system with given context.
Explanation
===========
Each coordinates in coordinate system are represented by unique symbol,
such as x, y, z in Cartesian coordinate system.
You may not construct this class directly. Instead, use `symbols` method
of CoordSystem.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import symbols
>>> from sympy.diffgeom import Manifold, Patch, CoordSystem
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> _x, _y = symbols('x y', nonnegative=True)
>>> C = CoordSystem('C', p, [_x, _y])
>>> x, y = C.symbols
>>> x.name
'x'
>>> x.coord_sys == C
True
>>> x.index
0
>>> x.is_nonnegative
True
"""
def __new__(cls, coord_sys, index, **assumptions):
name = coord_sys.args[2][index].name
obj = super().__new__(cls, name, **assumptions)
obj.coord_sys = coord_sys
obj.index = index
return obj
def __getnewargs__(self):
return (self.coord_sys, self.index)
def _hashable_content(self):
return (
self.coord_sys, self.index
) + tuple(sorted(self.assumptions0.items()))
class Point(Basic):
"""Point defined in a coordinate system.
Explanation
===========
Mathematically, point is defined in the manifold and does not have any coordinates
by itself. Coordinate system is what imbues the coordinates to the point by coordinate
chart. However, due to the difficulty of realizing such logic, you must supply
a coordinate system and coordinates to define a Point here.
The usage of this object after its definition is independent of the
coordinate system that was used in order to define it, however due to
limitations in the simplification routines you can arrive at complicated
expressions if you use inappropriate coordinate systems.
Parameters
==========
coord_sys : CoordSystem
coords : list
The coordinates of the point.
Examples
========
>>> from sympy import pi
>>> from sympy.diffgeom import Point
>>> from sympy.diffgeom.rn import R2, R2_r, R2_p
>>> rho, theta = R2_p.symbols
>>> p = Point(R2_p, [rho, 3*pi/4])
>>> p.manifold == R2
True
>>> p.coords()
Matrix([
[ rho],
[3*pi/4]])
>>> p.coords(R2_r)
Matrix([
[-sqrt(2)*rho/2],
[ sqrt(2)*rho/2]])
"""
def __new__(cls, coord_sys, coords, **kwargs):
coords = Matrix(coords)
obj = super().__new__(cls, coord_sys, coords)
obj._coord_sys = coord_sys
obj._coords = coords
return obj
@property
def patch(self):
return self._coord_sys.patch
@property
def manifold(self):
return self._coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def coords(self, sys=None):
"""
Coordinates of the point in given coordinate system. If coordinate system
is not passed, it returns the coordinates in the coordinate system in which
the poin was defined.
"""
if sys is None:
return self._coords
else:
return self._coord_sys.transform(sys, self._coords)
@property
def free_symbols(self):
return self._coords.free_symbols
class BaseScalarField(Expr):
"""Base scalar field over a manifold for a given coordinate system.
Explanation
===========
A scalar field takes a point as an argument and returns a scalar.
A base scalar field of a coordinate system takes a point and returns one of
the coordinates of that point in the coordinate system in question.
To define a scalar field you need to choose the coordinate system and the
index of the coordinate.
The use of the scalar field after its definition is independent of the
coordinate system in which it was defined, however due to limitations in
the simplification routines you may arrive at more complicated
expression if you use unappropriate coordinate systems.
You can build complicated scalar fields by just building up SymPy
expressions containing ``BaseScalarField`` instances.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import Function, pi
>>> from sympy.diffgeom import BaseScalarField
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> rho, _ = R2_p.symbols
>>> point = R2_p.point([rho, 0])
>>> fx, fy = R2_r.base_scalars()
>>> ftheta = BaseScalarField(R2_r, 1)
>>> fx(point)
rho
>>> fy(point)
0
>>> (fx**2+fy**2).rcall(point)
rho**2
>>> g = Function('g')
>>> fg = g(ftheta-pi)
>>> fg.rcall(point)
g(-pi)
"""
is_commutative = True
def __new__(cls, coord_sys, index, **kwargs):
index = _sympify(index)
obj = super().__new__(cls, coord_sys, index)
obj._coord_sys = coord_sys
obj._index = index
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def patch(self):
return self.coord_sys.patch
@property
def manifold(self):
return self.coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def __call__(self, *args):
"""Evaluating the field at a point or doing nothing.
If the argument is a ``Point`` instance, the field is evaluated at that
point. The field is returned itself if the argument is any other
object. It is so in order to have working recursive calling mechanics
for all fields (check the ``__call__`` method of ``Expr``).
"""
point = args[0]
if len(args) != 1 or not isinstance(point, Point):
return self
coords = point.coords(self._coord_sys)
# XXX Calling doit is necessary with all the Subs expressions
# XXX Calling simplify is necessary with all the trig expressions
return simplify(coords[self._index]).doit()
# XXX Workaround for limitations on the content of args
free_symbols = set() # type: Set[Any]
def doit(self):
return self
class BaseVectorField(Expr):
r"""Base vector field over a manifold for a given coordinate system.
Explanation
===========
A vector field is an operator taking a scalar field and returning a
directional derivative (which is also a scalar field).
A base vector field is the same type of operator, however the derivation is
specifically done with respect to a chosen coordinate.
To define a base vector field you need to choose the coordinate system and
the index of the coordinate.
The use of the vector field after its definition is independent of the
coordinate system in which it was defined, however due to limitations in the
simplification routines you may arrive at more complicated expression if you
use unappropriate coordinate systems.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import Function
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import BaseVectorField
>>> from sympy import pprint
>>> x, y = R2_r.symbols
>>> rho, theta = R2_p.symbols
>>> fx, fy = R2_r.base_scalars()
>>> point_p = R2_p.point([rho, theta])
>>> point_r = R2_r.point([x, y])
>>> g = Function('g')
>>> s_field = g(fx, fy)
>>> v = BaseVectorField(R2_r, 1)
>>> pprint(v(s_field))
/ d \|
|---(g(x, xi))||
\dxi /|xi=y
>>> pprint(v(s_field).rcall(point_r).doit())
d
--(g(x, y))
dy
>>> pprint(v(s_field).rcall(point_p))
/ d \|
|---(g(rho*cos(theta), xi))||
\dxi /|xi=rho*sin(theta)
"""
is_commutative = False
def __new__(cls, coord_sys, index, **kwargs):
index = _sympify(index)
obj = super().__new__(cls, coord_sys, index)
obj._coord_sys = coord_sys
obj._index = index
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def patch(self):
return self.coord_sys.patch
@property
def manifold(self):
return self.coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def __call__(self, scalar_field):
"""Apply on a scalar field.
The action of a vector field on a scalar field is a directional
differentiation.
If the argument is not a scalar field an error is raised.
"""
if covariant_order(scalar_field) or contravariant_order(scalar_field):
raise ValueError('Only scalar fields can be supplied as arguments to vector fields.')
if scalar_field is None:
return self
base_scalars = list(scalar_field.atoms(BaseScalarField))
# First step: e_x(x+r**2) -> e_x(x) + 2*r*e_x(r)
d_var = self._coord_sys._dummy
# TODO: you need a real dummy function for the next line
d_funcs = [Function('_#_%s' % i)(d_var) for i,
b in enumerate(base_scalars)]
d_result = scalar_field.subs(list(zip(base_scalars, d_funcs)))
d_result = d_result.diff(d_var)
# Second step: e_x(x) -> 1 and e_x(r) -> cos(atan2(x, y))
coords = self._coord_sys.symbols
d_funcs_deriv = [f.diff(d_var) for f in d_funcs]
d_funcs_deriv_sub = []
for b in base_scalars:
jac = self._coord_sys.jacobian(b._coord_sys, coords)
d_funcs_deriv_sub.append(jac[b._index, self._index])
d_result = d_result.subs(list(zip(d_funcs_deriv, d_funcs_deriv_sub)))
# Remove the dummies
result = d_result.subs(list(zip(d_funcs, base_scalars)))
result = result.subs(list(zip(coords, self._coord_sys.coord_functions())))
return result.doit()
def _find_coords(expr):
# Finds CoordinateSystems existing in expr
fields = expr.atoms(BaseScalarField, BaseVectorField)
result = set()
for f in fields:
result.add(f._coord_sys)
return result
class Commutator(Expr):
r"""Commutator of two vector fields.
Explanation
===========
The commutator of two vector fields `v_1` and `v_2` is defined as the
vector field `[v_1, v_2]` that evaluated on each scalar field `f` is equal
to `v_1(v_2(f)) - v_2(v_1(f))`.
Examples
========
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import Commutator
>>> from sympy.simplify import simplify
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> e_r = R2_p.base_vector(0)
>>> c_xy = Commutator(e_x, e_y)
>>> c_xr = Commutator(e_x, e_r)
>>> c_xy
0
Unfortunately, the current code is not able to compute everything:
>>> c_xr
Commutator(e_x, e_rho)
>>> simplify(c_xr(fy**2))
-2*cos(theta)*y**2/(x**2 + y**2)
"""
def __new__(cls, v1, v2):
if (covariant_order(v1) or contravariant_order(v1) != 1
or covariant_order(v2) or contravariant_order(v2) != 1):
raise ValueError(
'Only commutators of vector fields are supported.')
if v1 == v2:
return S.Zero
coord_sys = set().union(*[_find_coords(v) for v in (v1, v2)])
if len(coord_sys) == 1:
# Only one coordinate systems is used, hence it is easy enough to
# actually evaluate the commutator.
if all(isinstance(v, BaseVectorField) for v in (v1, v2)):
return S.Zero
bases_1, bases_2 = [list(v.atoms(BaseVectorField))
for v in (v1, v2)]
coeffs_1 = [v1.expand().coeff(b) for b in bases_1]
coeffs_2 = [v2.expand().coeff(b) for b in bases_2]
res = 0
for c1, b1 in zip(coeffs_1, bases_1):
for c2, b2 in zip(coeffs_2, bases_2):
res += c1*b1(c2)*b2 - c2*b2(c1)*b1
return res
else:
obj = super().__new__(cls, v1, v2)
obj._v1 = v1 # deprecated assignment
obj._v2 = v2 # deprecated assignment
return obj
@property
def v1(self):
return self.args[0]
@property
def v2(self):
return self.args[1]
def __call__(self, scalar_field):
"""Apply on a scalar field.
If the argument is not a scalar field an error is raised.
"""
return self.v1(self.v2(scalar_field)) - self.v2(self.v1(scalar_field))
class Differential(Expr):
r"""Return the differential (exterior derivative) of a form field.
Explanation
===========
The differential of a form (i.e. the exterior derivative) has a complicated
definition in the general case.
The differential `df` of the 0-form `f` is defined for any vector field `v`
as `df(v) = v(f)`.
Examples
========
>>> from sympy import Function
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import Differential
>>> from sympy import pprint
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> g = Function('g')
>>> s_field = g(fx, fy)
>>> dg = Differential(s_field)
>>> dg
d(g(x, y))
>>> pprint(dg(e_x))
/ d \|
|---(g(xi, y))||
\dxi /|xi=x
>>> pprint(dg(e_y))
/ d \|
|---(g(x, xi))||
\dxi /|xi=y
Applying the exterior derivative operator twice always results in:
>>> Differential(dg)
0
"""
is_commutative = False
def __new__(cls, form_field):
if contravariant_order(form_field):
raise ValueError(
'A vector field was supplied as an argument to Differential.')
if isinstance(form_field, Differential):
return S.Zero
else:
obj = super().__new__(cls, form_field)
obj._form_field = form_field # deprecated assignment
return obj
@property
def form_field(self):
return self.args[0]
def __call__(self, *vector_fields):
"""Apply on a list of vector_fields.
Explanation
===========
If the number of vector fields supplied is not equal to 1 + the order of
the form field inside the differential the result is undefined.
For 1-forms (i.e. differentials of scalar fields) the evaluation is
done as `df(v)=v(f)`. However if `v` is ``None`` instead of a vector
field, the differential is returned unchanged. This is done in order to
permit partial contractions for higher forms.
In the general case the evaluation is done by applying the form field
inside the differential on a list with one less elements than the number
of elements in the original list. Lowering the number of vector fields
is achieved through replacing each pair of fields by their
commutator.
If the arguments are not vectors or ``None``s an error is raised.
"""
if any((contravariant_order(a) != 1 or covariant_order(a)) and a is not None
for a in vector_fields):
raise ValueError('The arguments supplied to Differential should be vector fields or Nones.')
k = len(vector_fields)
if k == 1:
if vector_fields[0]:
return vector_fields[0].rcall(self._form_field)
return self
else:
# For higher form it is more complicated:
# Invariant formula:
# https://en.wikipedia.org/wiki/Exterior_derivative#Invariant_formula
# df(v1, ... vn) = +/- vi(f(v1..no i..vn))
# +/- f([vi,vj],v1..no i, no j..vn)
f = self._form_field
v = vector_fields
ret = 0
for i in range(k):
t = v[i].rcall(f.rcall(*v[:i] + v[i + 1:]))
ret += (-1)**i*t
for j in range(i + 1, k):
c = Commutator(v[i], v[j])
if c: # TODO this is ugly - the Commutator can be Zero and
# this causes the next line to fail
t = f.rcall(*(c,) + v[:i] + v[i + 1:j] + v[j + 1:])
ret += (-1)**(i + j)*t
return ret
class TensorProduct(Expr):
"""Tensor product of forms.
Explanation
===========
The tensor product permits the creation of multilinear functionals (i.e.
higher order tensors) out of lower order fields (e.g. 1-forms and vector
fields). However, the higher tensors thus created lack the interesting
features provided by the other type of product, the wedge product, namely
they are not antisymmetric and hence are not form fields.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> TensorProduct(dx, dy)(e_x, e_y)
1
>>> TensorProduct(dx, dy)(e_y, e_x)
0
>>> TensorProduct(dx, fx*dy)(fx*e_x, e_y)
x**2
>>> TensorProduct(e_x, e_y)(fx**2, fy**2)
4*x*y
>>> TensorProduct(e_y, dx)(fy)
dx
You can nest tensor products.
>>> tp1 = TensorProduct(dx, dy)
>>> TensorProduct(tp1, dx)(e_x, e_y, e_x)
1
You can make partial contraction for instance when 'raising an index'.
Putting ``None`` in the second argument of ``rcall`` means that the
respective position in the tensor product is left as it is.
>>> TP = TensorProduct
>>> metric = TP(dx, dx) + 3*TP(dy, dy)
>>> metric.rcall(e_y, None)
3*dy
Or automatically pad the args with ``None`` without specifying them.
>>> metric.rcall(e_y)
3*dy
"""
def __new__(cls, *args):
scalar = Mul(*[m for m in args if covariant_order(m) + contravariant_order(m) == 0])
multifields = [m for m in args if covariant_order(m) + contravariant_order(m)]
if multifields:
if len(multifields) == 1:
return scalar*multifields[0]
return scalar*super().__new__(cls, *multifields)
else:
return scalar
def __call__(self, *fields):
"""Apply on a list of fields.
If the number of input fields supplied is not equal to the order of
the tensor product field, the list of arguments is padded with ``None``'s.
The list of arguments is divided in sublists depending on the order of
the forms inside the tensor product. The sublists are provided as
arguments to these forms and the resulting expressions are given to the
constructor of ``TensorProduct``.
"""
tot_order = covariant_order(self) + contravariant_order(self)
tot_args = len(fields)
if tot_args != tot_order:
fields = list(fields) + [None]*(tot_order - tot_args)
orders = [covariant_order(f) + contravariant_order(f) for f in self._args]
indices = [sum(orders[:i + 1]) for i in range(len(orders) - 1)]
fields = [fields[i:j] for i, j in zip([0] + indices, indices + [None])]
multipliers = [t[0].rcall(*t[1]) for t in zip(self._args, fields)]
return TensorProduct(*multipliers)
class WedgeProduct(TensorProduct):
"""Wedge product of forms.
Explanation
===========
In the context of integration only completely antisymmetric forms make
sense. The wedge product permits the creation of such forms.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import WedgeProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> WedgeProduct(dx, dy)(e_x, e_y)
1
>>> WedgeProduct(dx, dy)(e_y, e_x)
-1
>>> WedgeProduct(dx, fx*dy)(fx*e_x, e_y)
x**2
>>> WedgeProduct(e_x, e_y)(fy, None)
-e_x
You can nest wedge products.
>>> wp1 = WedgeProduct(dx, dy)
>>> WedgeProduct(wp1, dx)(e_x, e_y, e_x)
0
"""
# TODO the calculation of signatures is slow
# TODO you do not need all these permutations (neither the prefactor)
def __call__(self, *fields):
"""Apply on a list of vector_fields.
The expression is rewritten internally in terms of tensor products and evaluated."""
orders = (covariant_order(e) + contravariant_order(e) for e in self.args)
mul = 1/Mul(*(factorial(o) for o in orders))
perms = permutations(fields)
perms_par = (Permutation(
p).signature() for p in permutations(list(range(len(fields)))))
tensor_prod = TensorProduct(*self.args)
return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)])
class LieDerivative(Expr):
"""Lie derivative with respect to a vector field.
Explanation
===========
The transport operator that defines the Lie derivative is the pushforward of
the field to be derived along the integral curve of the field with respect
to which one derives.
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> from sympy.diffgeom import (LieDerivative, TensorProduct)
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> e_rho, e_theta = R2_p.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> LieDerivative(e_x, fy)
0
>>> LieDerivative(e_x, fx)
1
>>> LieDerivative(e_x, e_x)
0
The Lie derivative of a tensor field by another tensor field is equal to
their commutator:
>>> LieDerivative(e_x, e_rho)
Commutator(e_x, e_rho)
>>> LieDerivative(e_x + e_y, fx)
1
>>> tp = TensorProduct(dx, dy)
>>> LieDerivative(e_x, tp)
LieDerivative(e_x, TensorProduct(dx, dy))
>>> LieDerivative(e_x, tp)
LieDerivative(e_x, TensorProduct(dx, dy))
"""
def __new__(cls, v_field, expr):
expr_form_ord = covariant_order(expr)
if contravariant_order(v_field) != 1 or covariant_order(v_field):
raise ValueError('Lie derivatives are defined only with respect to'
' vector fields. The supplied argument was not a '
'vector field.')
if expr_form_ord > 0:
obj = super().__new__(cls, v_field, expr)
# deprecated assignments
obj._v_field = v_field
obj._expr = expr
return obj
if expr.atoms(BaseVectorField):
return Commutator(v_field, expr)
else:
return v_field.rcall(expr)
@property
def v_field(self):
return self.args[0]
@property
def expr(self):
return self.args[1]
def __call__(self, *args):
v = self.v_field
expr = self.expr
lead_term = v(expr(*args))
rest = Add(*[Mul(*args[:i] + (Commutator(v, args[i]),) + args[i + 1:])
for i in range(len(args))])
return lead_term - rest
class BaseCovarDerivativeOp(Expr):
"""Covariant derivative operator with respect to a base vector.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import BaseCovarDerivativeOp
>>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct
>>> TP = TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy))
>>> ch
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> cvd = BaseCovarDerivativeOp(R2_r, 0, ch)
>>> cvd(fx)
1
>>> cvd(fx*e_x)
e_x
"""
def __new__(cls, coord_sys, index, christoffel):
index = _sympify(index)
christoffel = ImmutableDenseNDimArray(christoffel)
obj = super().__new__(cls, coord_sys, index, christoffel)
# deprecated assignments
obj._coord_sys = coord_sys
obj._index = index
obj._christoffel = christoffel
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def christoffel(self):
return self.args[2]
def __call__(self, field):
"""Apply on a scalar field.
The action of a vector field on a scalar field is a directional
differentiation.
If the argument is not a scalar field the behaviour is undefined.
"""
if covariant_order(field) != 0:
raise NotImplementedError()
field = vectors_in_basis(field, self._coord_sys)
wrt_vector = self._coord_sys.base_vector(self._index)
wrt_scalar = self._coord_sys.coord_function(self._index)
vectors = list(field.atoms(BaseVectorField))
# First step: replace all vectors with something susceptible to
# derivation and do the derivation
# TODO: you need a real dummy function for the next line
d_funcs = [Function('_#_%s' % i)(wrt_scalar) for i,
b in enumerate(vectors)]
d_result = field.subs(list(zip(vectors, d_funcs)))
d_result = wrt_vector(d_result)
# Second step: backsubstitute the vectors in
d_result = d_result.subs(list(zip(d_funcs, vectors)))
# Third step: evaluate the derivatives of the vectors
derivs = []
for v in vectors:
d = Add(*[(self._christoffel[k, wrt_vector._index, v._index]
*v._coord_sys.base_vector(k))
for k in range(v._coord_sys.dim)])
derivs.append(d)
to_subs = [wrt_vector(d) for d in d_funcs]
# XXX: This substitution can fail when there are Dummy symbols and the
# cache is disabled: https://github.com/sympy/sympy/issues/17794
result = d_result.subs(list(zip(to_subs, derivs)))
# Remove the dummies
result = result.subs(list(zip(d_funcs, vectors)))
return result.doit()
class CovarDerivativeOp(Expr):
"""Covariant derivative operator.
Examples
========
>>> from sympy.diffgeom.rn import R2_r
>>> from sympy.diffgeom import CovarDerivativeOp
>>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct
>>> TP = TensorProduct
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> ch = metric_to_Christoffel_2nd(TP(dx, dx) + TP(dy, dy))
>>> ch
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> cvd = CovarDerivativeOp(fx*e_x, ch)
>>> cvd(fx)
x
>>> cvd(fx*e_x)
x*e_x
"""
def __new__(cls, wrt, christoffel):
if len({v._coord_sys for v in wrt.atoms(BaseVectorField)}) > 1:
raise NotImplementedError()
if contravariant_order(wrt) != 1 or covariant_order(wrt):
raise ValueError('Covariant derivatives are defined only with '
'respect to vector fields. The supplied argument '
'was not a vector field.')
obj = super().__new__(cls, wrt, christoffel)
# deprecated assigments
obj._wrt = wrt
obj._christoffel = christoffel
return obj
@property
def wrt(self):
return self.args[0]
@property
def christoffel(self):
return self.args[1]
def __call__(self, field):
vectors = list(self._wrt.atoms(BaseVectorField))
base_ops = [BaseCovarDerivativeOp(v._coord_sys, v._index, self._christoffel)
for v in vectors]
return self._wrt.subs(list(zip(vectors, base_ops))).rcall(field)
###############################################################################
# Integral curves on vector fields
###############################################################################
def intcurve_series(vector_field, param, start_point, n=6, coord_sys=None, coeffs=False):
r"""Return the series expansion for an integral curve of the field.
Explanation
===========
Integral curve is a function `\gamma` taking a parameter in `R` to a point
in the manifold. It verifies the equation:
`V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)`
where the given ``vector_field`` is denoted as `V`. This holds for any
value `t` for the parameter and any scalar field `f`.
This equation can also be decomposed of a basis of coordinate functions
`V(f_i)\big(\gamma(t)\big) = \frac{d}{dt}f_i\big(\gamma(t)\big) \quad \forall i`
This function returns a series expansion of `\gamma(t)` in terms of the
coordinate system ``coord_sys``. The equations and expansions are necessarily
done in coordinate-system-dependent way as there is no other way to
represent movement between points on the manifold (i.e. there is no such
thing as a difference of points for a general manifold).
Parameters
==========
vector_field
the vector field for which an integral curve will be given
param
the argument of the function `\gamma` from R to the curve
start_point
the point which corresponds to `\gamma(0)`
n
the order to which to expand
coord_sys
the coordinate system in which to expand
coeffs (default False) - if True return a list of elements of the expansion
Examples
========
Use the predefined R2 manifold:
>>> from sympy.abc import t, x, y
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import intcurve_series
Specify a starting point and a vector field:
>>> start_point = R2_r.point([x, y])
>>> vector_field = R2_r.e_x
Calculate the series:
>>> intcurve_series(vector_field, t, start_point, n=3)
Matrix([
[t + x],
[ y]])
Or get the elements of the expansion in a list:
>>> series = intcurve_series(vector_field, t, start_point, n=3, coeffs=True)
>>> series[0]
Matrix([
[x],
[y]])
>>> series[1]
Matrix([
[t],
[0]])
>>> series[2]
Matrix([
[0],
[0]])
The series in the polar coordinate system:
>>> series = intcurve_series(vector_field, t, start_point,
... n=3, coord_sys=R2_p, coeffs=True)
>>> series[0]
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> series[1]
Matrix([
[t*x/sqrt(x**2 + y**2)],
[ -t*y/(x**2 + y**2)]])
>>> series[2]
Matrix([
[t**2*(-x**2/(x**2 + y**2)**(3/2) + 1/sqrt(x**2 + y**2))/2],
[ t**2*x*y/(x**2 + y**2)**2]])
See Also
========
intcurve_diffequ
"""
if contravariant_order(vector_field) != 1 or covariant_order(vector_field):
raise ValueError('The supplied field was not a vector field.')
def iter_vfield(scalar_field, i):
"""Return ``vector_field`` called `i` times on ``scalar_field``."""
return reduce(lambda s, v: v.rcall(s), [vector_field, ]*i, scalar_field)
def taylor_terms_per_coord(coord_function):
"""Return the series for one of the coordinates."""
return [param**i*iter_vfield(coord_function, i).rcall(start_point)/factorial(i)
for i in range(n)]
coord_sys = coord_sys if coord_sys else start_point._coord_sys
coord_functions = coord_sys.coord_functions()
taylor_terms = [taylor_terms_per_coord(f) for f in coord_functions]
if coeffs:
return [Matrix(t) for t in zip(*taylor_terms)]
else:
return Matrix([sum(c) for c in taylor_terms])
def intcurve_diffequ(vector_field, param, start_point, coord_sys=None):
r"""Return the differential equation for an integral curve of the field.
Explanation
===========
Integral curve is a function `\gamma` taking a parameter in `R` to a point
in the manifold. It verifies the equation:
`V(f)\big(\gamma(t)\big) = \frac{d}{dt}f\big(\gamma(t)\big)`
where the given ``vector_field`` is denoted as `V`. This holds for any
value `t` for the parameter and any scalar field `f`.
This function returns the differential equation of `\gamma(t)` in terms of the
coordinate system ``coord_sys``. The equations and expansions are necessarily
done in coordinate-system-dependent way as there is no other way to
represent movement between points on the manifold (i.e. there is no such
thing as a difference of points for a general manifold).
Parameters
==========
vector_field
the vector field for which an integral curve will be given
param
the argument of the function `\gamma` from R to the curve
start_point
the point which corresponds to `\gamma(0)`
coord_sys
the coordinate system in which to give the equations
Returns
=======
a tuple of (equations, initial conditions)
Examples
========
Use the predefined R2 manifold:
>>> from sympy.abc import t
>>> from sympy.diffgeom.rn import R2, R2_p, R2_r
>>> from sympy.diffgeom import intcurve_diffequ
Specify a starting point and a vector field:
>>> start_point = R2_r.point([0, 1])
>>> vector_field = -R2.y*R2.e_x + R2.x*R2.e_y
Get the equation:
>>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point)
>>> equations
[f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)]
>>> init_cond
[f_0(0), f_1(0) - 1]
The series in the polar coordinate system:
>>> equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p)
>>> equations
[Derivative(f_0(t), t), Derivative(f_1(t), t) - 1]
>>> init_cond
[f_0(0) - 1, f_1(0) - pi/2]
See Also
========
intcurve_series
"""
if contravariant_order(vector_field) != 1 or covariant_order(vector_field):
raise ValueError('The supplied field was not a vector field.')
coord_sys = coord_sys if coord_sys else start_point._coord_sys
gammas = [Function('f_%d' % i)(param) for i in range(
start_point._coord_sys.dim)]
arbitrary_p = Point(coord_sys, gammas)
coord_functions = coord_sys.coord_functions()
equations = [simplify(diff(cf.rcall(arbitrary_p), param) - vector_field.rcall(cf).rcall(arbitrary_p))
for cf in coord_functions]
init_cond = [simplify(cf.rcall(arbitrary_p).subs(param, 0) - cf.rcall(start_point))
for cf in coord_functions]
return equations, init_cond
###############################################################################
# Helpers
###############################################################################
def dummyfy(args, exprs):
# TODO Is this a good idea?
d_args = Matrix([s.as_dummy() for s in args])
reps = dict(zip(args, d_args))
d_exprs = Matrix([_sympify(expr).subs(reps) for expr in exprs])
return d_args, d_exprs
###############################################################################
# Helpers
###############################################################################
def contravariant_order(expr, _strict=False):
"""Return the contravariant order of an expression.
Examples
========
>>> from sympy.diffgeom import contravariant_order
>>> from sympy.diffgeom.rn import R2
>>> from sympy.abc import a
>>> contravariant_order(a)
0
>>> contravariant_order(a*R2.x + 2)
0
>>> contravariant_order(a*R2.x*R2.e_y + R2.e_x)
1
"""
# TODO move some of this to class methods.
# TODO rewrite using the .as_blah_blah methods
if isinstance(expr, Add):
orders = [contravariant_order(e) for e in expr.args]
if len(set(orders)) != 1:
raise ValueError('Misformed expression containing contravariant fields of varying order.')
return orders[0]
elif isinstance(expr, Mul):
orders = [contravariant_order(e) for e in expr.args]
not_zero = [o for o in orders if o != 0]
if len(not_zero) > 1:
raise ValueError('Misformed expression containing multiplication between vectors.')
return 0 if not not_zero else not_zero[0]
elif isinstance(expr, Pow):
if covariant_order(expr.base) or covariant_order(expr.exp):
raise ValueError(
'Misformed expression containing a power of a vector.')
return 0
elif isinstance(expr, BaseVectorField):
return 1
elif isinstance(expr, TensorProduct):
return sum(contravariant_order(a) for a in expr.args)
elif not _strict or expr.atoms(BaseScalarField):
return 0
else: # If it does not contain anything related to the diffgeom module and it is _strict
return -1
def covariant_order(expr, _strict=False):
"""Return the covariant order of an expression.
Examples
========
>>> from sympy.diffgeom import covariant_order
>>> from sympy.diffgeom.rn import R2
>>> from sympy.abc import a
>>> covariant_order(a)
0
>>> covariant_order(a*R2.x + 2)
0
>>> covariant_order(a*R2.x*R2.dy + R2.dx)
1
"""
# TODO move some of this to class methods.
# TODO rewrite using the .as_blah_blah methods
if isinstance(expr, Add):
orders = [covariant_order(e) for e in expr.args]
if len(set(orders)) != 1:
raise ValueError('Misformed expression containing form fields of varying order.')
return orders[0]
elif isinstance(expr, Mul):
orders = [covariant_order(e) for e in expr.args]
not_zero = [o for o in orders if o != 0]
if len(not_zero) > 1:
raise ValueError('Misformed expression containing multiplication between forms.')
return 0 if not not_zero else not_zero[0]
elif isinstance(expr, Pow):
if covariant_order(expr.base) or covariant_order(expr.exp):
raise ValueError(
'Misformed expression containing a power of a form.')
return 0
elif isinstance(expr, Differential):
return covariant_order(*expr.args) + 1
elif isinstance(expr, TensorProduct):
return sum(covariant_order(a) for a in expr.args)
elif not _strict or expr.atoms(BaseScalarField):
return 0
else: # If it does not contain anything related to the diffgeom module and it is _strict
return -1
###############################################################################
# Coordinate transformation functions
###############################################################################
def vectors_in_basis(expr, to_sys):
"""Transform all base vectors in base vectors of a specified coord basis.
While the new base vectors are in the new coordinate system basis, any
coefficients are kept in the old system.
Examples
========
>>> from sympy.diffgeom import vectors_in_basis
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> vectors_in_basis(R2_r.e_x, R2_p)
-y*e_theta/(x**2 + y**2) + x*e_rho/sqrt(x**2 + y**2)
>>> vectors_in_basis(R2_p.e_r, R2_r)
sin(theta)*e_y + cos(theta)*e_x
"""
vectors = list(expr.atoms(BaseVectorField))
new_vectors = []
for v in vectors:
cs = v._coord_sys
jac = cs.jacobian(to_sys, cs.coord_functions())
new = (jac.T*Matrix(to_sys.base_vectors()))[v._index]
new_vectors.append(new)
return expr.subs(list(zip(vectors, new_vectors)))
###############################################################################
# Coordinate-dependent functions
###############################################################################
def twoform_to_matrix(expr):
"""Return the matrix representing the twoform.
For the twoform `w` return the matrix `M` such that `M[i,j]=w(e_i, e_j)`,
where `e_i` is the i-th base vector field for the coordinate system in
which the expression of `w` is given.
Examples
========
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import twoform_to_matrix, TensorProduct
>>> TP = TensorProduct
>>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
Matrix([
[1, 0],
[0, 1]])
>>> twoform_to_matrix(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
Matrix([
[x, 0],
[0, 1]])
>>> twoform_to_matrix(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy) - TP(R2.dx, R2.dy)/2)
Matrix([
[ 1, 0],
[-1/2, 1]])
"""
if covariant_order(expr) != 2 or contravariant_order(expr):
raise ValueError('The input expression is not a two-form.')
coord_sys = _find_coords(expr)
if len(coord_sys) != 1:
raise ValueError('The input expression concerns more than one '
'coordinate systems, hence there is no unambiguous '
'way to choose a coordinate system for the matrix.')
coord_sys = coord_sys.pop()
vectors = coord_sys.base_vectors()
expr = expr.expand()
matrix_content = [[expr.rcall(v1, v2) for v1 in vectors]
for v2 in vectors]
return Matrix(matrix_content)
def metric_to_Christoffel_1st(expr):
"""Return the nested list of Christoffel symbols for the given metric.
This returns the Christoffel symbol of first kind that represents the
Levi-Civita connection for the given metric.
Examples
========
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Christoffel_1st, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Christoffel_1st(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> metric_to_Christoffel_1st(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[1/2, 0], [0, 0]], [[0, 0], [0, 0]]]
"""
matrix = twoform_to_matrix(expr)
if not matrix.is_symmetric():
raise ValueError(
'The two-form representing the metric is not symmetric.')
coord_sys = _find_coords(expr).pop()
deriv_matrices = [matrix.applyfunc(lambda a: d(a))
for d in coord_sys.base_vectors()]
indices = list(range(coord_sys.dim))
christoffel = [[[(deriv_matrices[k][i, j] + deriv_matrices[j][i, k] - deriv_matrices[i][j, k])/2
for k in indices]
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(christoffel)
def metric_to_Christoffel_2nd(expr):
"""Return the nested list of Christoffel symbols for the given metric.
This returns the Christoffel symbol of second kind that represents the
Levi-Civita connection for the given metric.
Examples
========
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Christoffel_2nd, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]
>>> metric_to_Christoffel_2nd(R2.x*TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[1/(2*x), 0], [0, 0]], [[0, 0], [0, 0]]]
"""
ch_1st = metric_to_Christoffel_1st(expr)
coord_sys = _find_coords(expr).pop()
indices = list(range(coord_sys.dim))
# XXX workaround, inverting a matrix does not work if it contains non
# symbols
#matrix = twoform_to_matrix(expr).inv()
matrix = twoform_to_matrix(expr)
s_fields = set()
for e in matrix:
s_fields.update(e.atoms(BaseScalarField))
s_fields = list(s_fields)
dums = coord_sys.symbols
matrix = matrix.subs(list(zip(s_fields, dums))).inv().subs(list(zip(dums, s_fields)))
# XXX end of workaround
christoffel = [[[Add(*[matrix[i, l]*ch_1st[l, j, k] for l in indices])
for k in indices]
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(christoffel)
def metric_to_Riemann_components(expr):
"""Return the components of the Riemann tensor expressed in a given basis.
Given a metric it calculates the components of the Riemann tensor in the
canonical basis of the coordinate system in which the metric expression is
given.
Examples
========
>>> from sympy import exp
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Riemann_components, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Riemann_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]
>>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \
R2.r**2*TP(R2.dtheta, R2.dtheta)
>>> non_trivial_metric
exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta)
>>> riemann = metric_to_Riemann_components(non_trivial_metric)
>>> riemann[0, :, :, :]
[[[0, 0], [0, 0]], [[0, exp(-2*rho)*rho], [-exp(-2*rho)*rho, 0]]]
>>> riemann[1, :, :, :]
[[[0, -1/rho], [1/rho, 0]], [[0, 0], [0, 0]]]
"""
ch_2nd = metric_to_Christoffel_2nd(expr)
coord_sys = _find_coords(expr).pop()
indices = list(range(coord_sys.dim))
deriv_ch = [[[[d(ch_2nd[i, j, k])
for d in coord_sys.base_vectors()]
for k in indices]
for j in indices]
for i in indices]
riemann_a = [[[[deriv_ch[rho][sig][nu][mu] - deriv_ch[rho][sig][mu][nu]
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
riemann_b = [[[[Add(*[ch_2nd[rho, l, mu]*ch_2nd[l, sig, nu] - ch_2nd[rho, l, nu]*ch_2nd[l, sig, mu] for l in indices])
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
riemann = [[[[riemann_a[rho][sig][mu][nu] + riemann_b[rho][sig][mu][nu]
for nu in indices]
for mu in indices]
for sig in indices]
for rho in indices]
return ImmutableDenseNDimArray(riemann)
def metric_to_Ricci_components(expr):
"""Return the components of the Ricci tensor expressed in a given basis.
Given a metric it calculates the components of the Ricci tensor in the
canonical basis of the coordinate system in which the metric expression is
given.
Examples
========
>>> from sympy import exp
>>> from sympy.diffgeom.rn import R2
>>> from sympy.diffgeom import metric_to_Ricci_components, TensorProduct
>>> TP = TensorProduct
>>> metric_to_Ricci_components(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
[[0, 0], [0, 0]]
>>> non_trivial_metric = exp(2*R2.r)*TP(R2.dr, R2.dr) + \
R2.r**2*TP(R2.dtheta, R2.dtheta)
>>> non_trivial_metric
exp(2*rho)*TensorProduct(drho, drho) + rho**2*TensorProduct(dtheta, dtheta)
>>> metric_to_Ricci_components(non_trivial_metric)
[[1/rho, 0], [0, exp(-2*rho)*rho]]
"""
riemann = metric_to_Riemann_components(expr)
coord_sys = _find_coords(expr).pop()
indices = list(range(coord_sys.dim))
ricci = [[Add(*[riemann[k, i, k, j] for k in indices])
for j in indices]
for i in indices]
return ImmutableDenseNDimArray(ricci)
###############################################################################
# Classes for deprecation
###############################################################################
class _deprecated_container:
# This class gives deprecation warning.
# When deprecated features are completely deleted, this should be removed as well.
# See https://github.com/sympy/sympy/pull/19368
def __init__(self, feature, useinstead, issue, version, data):
super().__init__(data)
self.feature = feature
self.useinstead = useinstead
self.issue = issue
self.version = version
def warn(self):
SymPyDeprecationWarning(
feature=self.feature,
useinstead=self.useinstead,
issue=self.issue,
deprecated_since_version=self.version).warn()
def __iter__(self):
self.warn()
return super().__iter__()
def __getitem__(self, key):
self.warn()
return super().__getitem__(key)
def __contains__(self, key):
self.warn()
return super().__contains__(key)
class _deprecated_list(_deprecated_container, list):
pass
class _deprecated_dict(_deprecated_container, dict):
pass
|
e41fcf0c8e414118cb92fc5bd29ab382457035653c415a29bdf5ac158d9c48c9 | """
AST nodes specific to the C family of languages
"""
from sympy.codegen.ast import (
Attribute, Declaration, Node, String, Token, Type, none,
FunctionCall, CodeBlock
)
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.sympify import sympify
void = Type('void')
restrict = Attribute('restrict') # guarantees no pointer aliasing
volatile = Attribute('volatile')
static = Attribute('static')
def alignof(arg):
""" Generate of FunctionCall instance for calling 'alignof' """
return FunctionCall('alignof', [String(arg) if isinstance(arg, str) else arg])
def sizeof(arg):
""" Generate of FunctionCall instance for calling 'sizeof'
Examples
========
>>> from sympy.codegen.ast import real
>>> from sympy.codegen.cnodes import sizeof
>>> from sympy.printing import ccode
>>> ccode(sizeof(real))
'sizeof(double)'
"""
return FunctionCall('sizeof', [String(arg) if isinstance(arg, str) else arg])
class CommaOperator(Basic):
""" Represents the comma operator in C """
def __new__(cls, *args):
return Basic.__new__(cls, *[sympify(arg) for arg in args])
class Label(Node):
""" Label for use with e.g. goto statement.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.cnodes import Label, PreIncrement
>>> from sympy.printing import ccode
>>> print(ccode(Label('foo')))
foo:
>>> print(ccode(Label('bar', [PreIncrement(Symbol('a'))])))
bar:
++(a);
"""
__slots__ = ('name', 'body')
defaults = {'body': none}
_construct_name = String
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class goto(Token):
""" Represents goto in C """
__slots__ = ('label',)
_construct_label = Label
class PreDecrement(Basic):
""" Represents the pre-decrement operator
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cnodes import PreDecrement
>>> from sympy.printing import ccode
>>> ccode(PreDecrement(x))
'--(x)'
"""
nargs = 1
class PostDecrement(Basic):
""" Represents the post-decrement operator """
nargs = 1
class PreIncrement(Basic):
""" Represents the pre-increment operator """
nargs = 1
class PostIncrement(Basic):
""" Represents the post-increment operator """
nargs = 1
class struct(Node):
""" Represents a struct in C """
__slots__ = ('name', 'declarations')
defaults = {'name': none}
_construct_name = String
@classmethod
def _construct_declarations(cls, args):
return Tuple(*[Declaration(arg) for arg in args])
class union(struct):
""" Represents a union in C """
|
7ea671bab2fca64a2d592c8f137a7460a29848eb7c40afa0cb1c0176202f1380 | import bisect
import itertools
from functools import reduce
from collections import defaultdict
from sympy import Indexed, IndexedBase, Tuple, Sum, Add, S, Integer, diagonalize_vector, DiagMatrix
from sympy.combinatorics import Permutation
from sympy.core.basic import Basic
from sympy.core.compatibility import accumulate, default_sort_key
from sympy.core.mul import Mul
from sympy.core.sympify import _sympify
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.matrices.expressions import (MatAdd, MatMul, Trace, Transpose,
MatrixSymbol)
from sympy.matrices.expressions.matexpr import MatrixExpr, MatrixElement
from sympy.tensor.array import NDimArray
class _CodegenArrayAbstract(Basic):
@property
def subranks(self):
"""
Returns the ranks of the objects in the uppermost tensor product inside
the current object. In case no tensor products are contained, return
the atomic ranks.
Examples
========
>>> from sympy.codegen.array_utils import CodegenArrayTensorProduct, CodegenArrayContraction
>>> from sympy import MatrixSymbol
>>> M = MatrixSymbol("M", 3, 3)
>>> N = MatrixSymbol("N", 3, 3)
>>> P = MatrixSymbol("P", 3, 3)
Important: do not confuse the rank of the matrix with the rank of an array.
>>> tp = CodegenArrayTensorProduct(M, N, P)
>>> tp.subranks
[2, 2, 2]
>>> co = CodegenArrayContraction(tp, (1, 2), (3, 4))
>>> co.subranks
[2, 2, 2]
"""
return self._subranks[:]
def subrank(self):
"""
The sum of ``subranks``.
"""
return sum(self.subranks)
@property
def shape(self):
return self._shape
class CodegenArrayContraction(_CodegenArrayAbstract):
r"""
This class is meant to represent contractions of arrays in a form easily
processable by the code printers.
"""
def __new__(cls, expr, *contraction_indices, **kwargs):
contraction_indices = _sort_contraction_indices(contraction_indices)
expr = _sympify(expr)
if len(contraction_indices) == 0:
return expr
if isinstance(expr, CodegenArrayContraction):
return cls._flatten(expr, *contraction_indices)
obj = Basic.__new__(cls, expr, *contraction_indices)
obj._subranks = _get_subranks(expr)
obj._mapping = _get_mapping_from_subranks(obj._subranks)
free_indices_to_position = {i: i for i in range(sum(obj._subranks)) if all([i not in cind for cind in contraction_indices])}
obj._free_indices_to_position = free_indices_to_position
shape = expr.shape
cls._validate(expr, *contraction_indices)
if shape:
shape = tuple(shp for i, shp in enumerate(shape) if not any(i in j for j in contraction_indices))
obj._shape = shape
return obj
def __mul__(self, other):
if other == 1:
return self
else:
raise NotImplementedError("Product of N-dim arrays is not uniquely defined. Use another method.")
def __rmul__(self, other):
if other == 1:
return self
else:
raise NotImplementedError("Product of N-dim arrays is not uniquely defined. Use another method.")
@staticmethod
def _validate(expr, *contraction_indices):
shape = expr.shape
if shape is None:
return
# Check that no contraction happens when the shape is mismatched:
for i in contraction_indices:
if len({shape[j] for j in i if shape[j] != -1}) != 1:
raise ValueError("contracting indices of different dimensions")
@classmethod
def _push_indices_down(cls, contraction_indices, indices):
flattened_contraction_indices = [j for i in contraction_indices for j in i]
flattened_contraction_indices.sort()
transform = _build_push_indices_down_func_transformation(flattened_contraction_indices)
return _apply_recursively_over_nested_lists(transform, indices)
@classmethod
def _push_indices_up(cls, contraction_indices, indices):
flattened_contraction_indices = [j for i in contraction_indices for j in i]
flattened_contraction_indices.sort()
transform = _build_push_indices_up_func_transformation(flattened_contraction_indices)
return _apply_recursively_over_nested_lists(transform, indices)
def split_multiple_contractions(self):
"""
Recognize multiple contractions and attempt at rewriting them as paired-contractions.
"""
from sympy import ask, Q
contraction_indices = self.contraction_indices
if isinstance(self.expr, CodegenArrayTensorProduct):
args = list(self.expr.args)
else:
args = [self.expr]
# TODO: unify API, best location in CodegenArrayTensorProduct
subranks = [get_rank(i) for i in args]
# TODO: unify API
mapping = _get_mapping_from_subranks(subranks)
reverse_mapping = {v:k for k, v in mapping.items()}
new_contraction_indices = []
for indl, links in enumerate(contraction_indices):
if len(links) <= 2:
new_contraction_indices.append(links)
continue
# Check multiple contractions:
#
# Examples:
#
# * `A_ij b_j0 C_jk` ===> `A*DiagMatrix(b)*C`
#
# Care for:
# - matrix being diagonalized (i.e. `A_ii`)
# - vectors being diagonalized (i.e. `a_i0`)
# Also consider the case of diagonal matrices being contracted:
current_dimension = self.expr.shape[links[0]]
tuple_links = [mapping[i] for i in links]
arg_indices, arg_positions = zip(*tuple_links)
args_updates = {}
if len(arg_indices) != len(set(arg_indices)):
# Maybe trace should be supported?
raise NotImplementedError
not_vectors = []
vectors = []
for arg_ind, arg_pos in tuple_links:
mat = args[arg_ind]
other_arg_pos = 1-arg_pos
other_arg_abs = reverse_mapping[arg_ind, other_arg_pos]
if (((1 not in mat.shape) and (not ask(Q.diagonal(mat)))) or
((current_dimension == 1) is True and mat.shape != (1, 1)) or
any([other_arg_abs in l for li, l in enumerate(contraction_indices) if li != indl])
):
not_vectors.append((arg_ind, arg_pos))
continue
args_updates[arg_ind] = diagonalize_vector(mat)
vectors.append((arg_ind, arg_pos))
vectors.append((arg_ind, 1-arg_pos))
if len(not_vectors) > 2:
new_contraction_indices.append(links)
continue
if len(not_vectors) == 0:
new_sequence = vectors[:1] + vectors[2:]
elif len(not_vectors) == 1:
new_sequence = not_vectors[:1] + vectors[:-1]
else:
new_sequence = not_vectors[:1] + vectors + not_vectors[1:]
for i in range(0, len(new_sequence) - 1, 2):
arg1, pos1 = new_sequence[i]
arg2, pos2 = new_sequence[i+1]
if arg1 == arg2:
raise NotImplementedError
continue
abspos1 = reverse_mapping[arg1, pos1]
abspos2 = reverse_mapping[arg2, pos2]
new_contraction_indices.append((abspos1, abspos2))
for ind, newarg in args_updates.items():
args[ind] = newarg
return CodegenArrayContraction(
CodegenArrayTensorProduct(*args),
*new_contraction_indices
)
def flatten_contraction_of_diagonal(self):
if not isinstance(self.expr, CodegenArrayDiagonal):
return self
contraction_down = self.expr._push_indices_down(self.expr.diagonal_indices, self.contraction_indices)
new_contraction_indices = []
diagonal_indices = self.expr.diagonal_indices[:]
for i in contraction_down:
contraction_group = list(i)
for j in i:
diagonal_with = [k for k in diagonal_indices if j in k]
contraction_group.extend([l for k in diagonal_with for l in k])
diagonal_indices = [k for k in diagonal_indices if k not in diagonal_with]
new_contraction_indices.append(sorted(set(contraction_group)))
new_contraction_indices = CodegenArrayDiagonal._push_indices_up(diagonal_indices, new_contraction_indices)
return CodegenArrayContraction(
CodegenArrayDiagonal(
self.expr.expr,
*diagonal_indices
),
*new_contraction_indices
)
@staticmethod
def _get_free_indices_to_position_map(free_indices, contraction_indices):
free_indices_to_position = {}
flattened_contraction_indices = [j for i in contraction_indices for j in i]
counter = 0
for ind in free_indices:
while counter in flattened_contraction_indices:
counter += 1
free_indices_to_position[ind] = counter
counter += 1
return free_indices_to_position
@staticmethod
def _get_index_shifts(expr):
"""
Get the mapping of indices at the positions before the contraction
occurs.
Examples
========
>>> from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct
>>> from sympy import MatrixSymbol
>>> M = MatrixSymbol("M", 3, 3)
>>> N = MatrixSymbol("N", 3, 3)
>>> cg = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), [1, 2])
>>> cg._get_index_shifts(cg)
[0, 2]
Indeed, ``cg`` after the contraction has two dimensions, 0 and 1. They
need to be shifted by 0 and 2 to get the corresponding positions before
the contraction (that is, 0 and 3).
"""
inner_contraction_indices = expr.contraction_indices
all_inner = [j for i in inner_contraction_indices for j in i]
all_inner.sort()
# TODO: add API for total rank and cumulative rank:
total_rank = _get_subrank(expr)
inner_rank = len(all_inner)
outer_rank = total_rank - inner_rank
shifts = [0 for i in range(outer_rank)]
counter = 0
pointer = 0
for i in range(outer_rank):
while pointer < inner_rank and counter >= all_inner[pointer]:
counter += 1
pointer += 1
shifts[i] += pointer
counter += 1
return shifts
@staticmethod
def _convert_outer_indices_to_inner_indices(expr, *outer_contraction_indices):
shifts = CodegenArrayContraction._get_index_shifts(expr)
outer_contraction_indices = tuple(tuple(shifts[j] + j for j in i) for i in outer_contraction_indices)
return outer_contraction_indices
@staticmethod
def _flatten(expr, *outer_contraction_indices):
inner_contraction_indices = expr.contraction_indices
outer_contraction_indices = CodegenArrayContraction._convert_outer_indices_to_inner_indices(expr, *outer_contraction_indices)
contraction_indices = inner_contraction_indices + outer_contraction_indices
return CodegenArrayContraction(expr.expr, *contraction_indices)
def _get_contraction_tuples(self):
r"""
Return tuples containing the argument index and position within the
argument of the index position.
Examples
========
>>> from sympy import MatrixSymbol
>>> from sympy.abc import N
>>> from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> cg = CodegenArrayContraction(CodegenArrayTensorProduct(A, B), (1, 2))
>>> cg._get_contraction_tuples()
[[(0, 1), (1, 0)]]
Here the contraction pair `(1, 2)` meaning that the 2nd and 3rd indices
of the tensor product `A\otimes B` are contracted, has been transformed
into `(0, 1)` and `(1, 0)`, identifying the same indices in a different
notation. `(0, 1)` is the second index (1) of the first argument (i.e.
0 or `A`). `(1, 0)` is the first index (i.e. 0) of the second
argument (i.e. 1 or `B`).
"""
mapping = self._mapping
return [[mapping[j] for j in i] for i in self.contraction_indices]
@staticmethod
def _contraction_tuples_to_contraction_indices(expr, contraction_tuples):
# TODO: check that `expr` has `.subranks`:
ranks = expr.subranks
cumulative_ranks = [0] + list(accumulate(ranks))
return [tuple(cumulative_ranks[j]+k for j, k in i) for i in contraction_tuples]
@property
def free_indices(self):
return self._free_indices[:]
@property
def free_indices_to_position(self):
return dict(self._free_indices_to_position)
@property
def expr(self):
return self.args[0]
@property
def contraction_indices(self):
return self.args[1:]
def _contraction_indices_to_components(self):
expr = self.expr
if not isinstance(expr, CodegenArrayTensorProduct):
raise NotImplementedError("only for contractions of tensor products")
ranks = expr.subranks
mapping = {}
counter = 0
for i, rank in enumerate(ranks):
for j in range(rank):
mapping[counter] = (i, j)
counter += 1
return mapping
def sort_args_by_name(self):
"""
Sort arguments in the tensor product so that their order is lexicographical.
Examples
========
>>> from sympy import MatrixSymbol
>>> from sympy.abc import N
>>> from sympy.codegen.array_utils import parse_matrix_expression
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> C = MatrixSymbol("C", N, N)
>>> D = MatrixSymbol("D", N, N)
>>> cg = parse_matrix_expression(C*D*A*B)
>>> cg
CodegenArrayContraction(CodegenArrayTensorProduct(C, D, A, B), (1, 2), (3, 4), (5, 6))
>>> cg.sort_args_by_name()
CodegenArrayContraction(CodegenArrayTensorProduct(A, B, C, D), (0, 7), (1, 2), (5, 6))
"""
expr = self.expr
if not isinstance(expr, CodegenArrayTensorProduct):
return self
args = expr.args
sorted_data = sorted(enumerate(args), key=lambda x: default_sort_key(x[1]))
pos_sorted, args_sorted = zip(*sorted_data)
reordering_map = {i: pos_sorted.index(i) for i, arg in enumerate(args)}
contraction_tuples = self._get_contraction_tuples()
contraction_tuples = [[(reordering_map[j], k) for j, k in i] for i in contraction_tuples]
c_tp = CodegenArrayTensorProduct(*args_sorted)
new_contr_indices = self._contraction_tuples_to_contraction_indices(
c_tp,
contraction_tuples
)
return CodegenArrayContraction(c_tp, *new_contr_indices)
def _get_contraction_links(self):
r"""
Returns a dictionary of links between arguments in the tensor product
being contracted.
See the example for an explanation of the values.
Examples
========
>>> from sympy import MatrixSymbol
>>> from sympy.abc import N
>>> from sympy.codegen.array_utils import parse_matrix_expression
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> C = MatrixSymbol("C", N, N)
>>> D = MatrixSymbol("D", N, N)
Matrix multiplications are pairwise contractions between neighboring
matrices:
`A_{ij} B_{jk} C_{kl} D_{lm}`
>>> cg = parse_matrix_expression(A*B*C*D)
>>> cg
CodegenArrayContraction(CodegenArrayTensorProduct(A, B, C, D), (1, 2), (3, 4), (5, 6))
>>> cg._get_contraction_links()
{0: {1: (1, 0)}, 1: {0: (0, 1), 1: (2, 0)}, 2: {0: (1, 1), 1: (3, 0)}, 3: {0: (2, 1)}}
This dictionary is interpreted as follows: argument in position 0 (i.e.
matrix `A`) has its second index (i.e. 1) contracted to `(1, 0)`, that
is argument in position 1 (matrix `B`) on the first index slot of `B`,
this is the contraction provided by the index `j` from `A`.
The argument in position 1 (that is, matrix `B`) has two contractions,
the ones provided by the indices `j` and `k`, respectively the first
and second indices (0 and 1 in the sub-dict). The link `(0, 1)` and
`(2, 0)` respectively. `(0, 1)` is the index slot 1 (the 2nd) of
argument in position 0 (that is, `A_{\ldot j}`), and so on.
"""
args, dlinks = _get_contraction_links([self], self.subranks, *self.contraction_indices)
return dlinks
def get_shape(expr):
if hasattr(expr, "shape"):
return expr.shape
return ()
class CodegenArrayTensorProduct(_CodegenArrayAbstract):
r"""
Class to represent the tensor product of array-like objects.
"""
def __new__(cls, *args):
args = [_sympify(arg) for arg in args]
args = cls._flatten(args)
ranks = [_get_subrank(arg) for arg in args]
if len(args) == 1:
return args[0]
# If there are contraction objects inside, transform the whole
# expression into `CodegenArrayContraction`:
contractions = {i: arg for i, arg in enumerate(args) if isinstance(arg, CodegenArrayContraction)}
if contractions:
cumulative_ranks = list(accumulate([0] + ranks))[:-1]
tp = cls(*[arg.expr if isinstance(arg, CodegenArrayContraction) else arg for arg in args])
contraction_indices = [tuple(cumulative_ranks[i] + k for k in j) for i, arg in contractions.items() for j in arg.contraction_indices]
return CodegenArrayContraction(tp, *contraction_indices)
#newargs = [i for i in args if hasattr(i, "shape")]
#coeff = reduce(lambda x, y: x*y, [i for i in args if not hasattr(i, "shape")], S.One)
#newargs[0] *= coeff
obj = Basic.__new__(cls, *args)
obj._subranks = ranks
shapes = [get_shape(i) for i in args]
if any(i is None for i in shapes):
obj._shape = None
else:
obj._shape = tuple(j for i in shapes for j in i)
return obj
@classmethod
def _flatten(cls, args):
args = [i for arg in args for i in (arg.args if isinstance(arg, cls) else [arg])]
return args
class CodegenArrayElementwiseAdd(_CodegenArrayAbstract):
r"""
Class for elementwise array additions.
"""
def __new__(cls, *args):
args = [_sympify(arg) for arg in args]
obj = Basic.__new__(cls, *args)
ranks = [get_rank(arg) for arg in args]
ranks = list(set(ranks))
if len(ranks) != 1:
raise ValueError("summing arrays of different ranks")
obj._subranks = ranks
shapes = [arg.shape for arg in args]
if len({i for i in shapes if i is not None}) > 1:
raise ValueError("mismatching shapes in addition")
if any(i is None for i in shapes):
obj._shape = None
else:
obj._shape = shapes[0]
return obj
class CodegenArrayPermuteDims(_CodegenArrayAbstract):
r"""
Class to represent permutation of axes of arrays.
Examples
========
>>> from sympy.codegen.array_utils import CodegenArrayPermuteDims
>>> from sympy import MatrixSymbol
>>> M = MatrixSymbol("M", 3, 3)
>>> cg = CodegenArrayPermuteDims(M, [1, 0])
The object ``cg`` represents the transposition of ``M``, as the permutation
``[1, 0]`` will act on its indices by switching them:
`M_{ij} \Rightarrow M_{ji}`
This is evident when transforming back to matrix form:
>>> from sympy.codegen.array_utils import recognize_matrix_expression
>>> recognize_matrix_expression(cg)
M.T
>>> N = MatrixSymbol("N", 3, 2)
>>> cg = CodegenArrayPermuteDims(N, [1, 0])
>>> cg.shape
(2, 3)
"""
def __new__(cls, expr, permutation):
from sympy.combinatorics import Permutation
expr = _sympify(expr)
permutation = Permutation(permutation)
plist = permutation.array_form
if plist == sorted(plist):
return expr
obj = Basic.__new__(cls, expr, permutation)
obj._subranks = [get_rank(expr)]
shape = expr.shape
if shape is None:
obj._shape = None
else:
obj._shape = tuple(shape[permutation(i)] for i in range(len(shape)))
return obj
@property
def expr(self):
return self.args[0]
@property
def permutation(self):
return self.args[1]
def nest_permutation(self):
r"""
Nest the permutation down the expression tree.
Examples
========
>>> from sympy.codegen.array_utils import (CodegenArrayPermuteDims, CodegenArrayTensorProduct, nest_permutation)
>>> from sympy import MatrixSymbol
>>> M = MatrixSymbol("M", 3, 3)
>>> N = MatrixSymbol("N", 3, 3)
>>> cg = CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N), [1, 0, 3, 2])
>>> cg
CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N), (0 1)(2 3))
>>> nest_permutation(cg)
CodegenArrayTensorProduct(CodegenArrayPermuteDims(M, (0 1)), CodegenArrayPermuteDims(N, (0 1)))
In ``cg`` both ``M`` and ``N`` are transposed. The cyclic
representation of the permutation after the tensor product is
`(0 1)(2 3)`. After nesting it down the expression tree, the usual
transposition permutation `(0 1)` appears.
"""
expr = self.expr
if isinstance(expr, CodegenArrayTensorProduct):
# Check if the permutation keeps the subranks separated:
subranks = expr.subranks
subrank = expr.subrank()
l = list(range(subrank))
p = [self.permutation(i) for i in l]
dargs = {}
counter = 0
for i, arg in zip(subranks, expr.args):
p0 = p[counter:counter+i]
counter += i
s0 = sorted(p0)
if not all([s0[j+1]-s0[j] == 1 for j in range(len(s0)-1)]):
# Cross-argument permutations, impossible to nest the object:
return self
subpermutation = [p0.index(j) for j in s0]
dargs[s0[0]] = CodegenArrayPermuteDims(arg, subpermutation)
# Read the arguments sorting the according to the keys of the dict:
args = [dargs[i] for i in sorted(dargs)]
return CodegenArrayTensorProduct(*args)
elif isinstance(expr, CodegenArrayContraction):
# Invert tree hierarchy: put the contraction above.
cycles = self.permutation.cyclic_form
newcycles = CodegenArrayContraction._convert_outer_indices_to_inner_indices(expr, *cycles)
newpermutation = Permutation(newcycles)
new_contr_indices = [tuple(newpermutation(j) for j in i) for i in expr.contraction_indices]
return CodegenArrayContraction(CodegenArrayPermuteDims(expr.expr, newpermutation), *new_contr_indices)
elif isinstance(expr, CodegenArrayElementwiseAdd):
return CodegenArrayElementwiseAdd(*[CodegenArrayPermuteDims(arg, self.permutation) for arg in expr.args])
return self
def nest_permutation(expr):
if isinstance(expr, CodegenArrayPermuteDims):
return expr.nest_permutation()
else:
return expr
class CodegenArrayDiagonal(_CodegenArrayAbstract):
r"""
Class to represent the diagonal operator.
In a 2-dimensional array it returns the diagonal, this looks like the
operation:
`A_{ij} \rightarrow A_{ii}`
The diagonal over axes 1 and 2 (the second and third) of the tensor product
of two 2-dimensional arrays `A \otimes B` is
`\Big[ A_{ab} B_{cd} \Big]_{abcd} \rightarrow \Big[ A_{ai} B_{id} \Big]_{adi}`
In this last example the array expression has been reduced from
4-dimensional to 3-dimensional. Notice that no contraction has occurred,
rather there is a new index `i` for the diagonal, contraction would have
reduced the array to 2 dimensions.
Notice that the diagonalized out dimensions are added as new dimensions at
the end of the indices.
"""
def __new__(cls, expr, *diagonal_indices):
expr = _sympify(expr)
diagonal_indices = [Tuple(*sorted(i)) for i in diagonal_indices]
if isinstance(expr, CodegenArrayDiagonal):
return cls._flatten(expr, *diagonal_indices)
shape = expr.shape
if shape is not None:
diagonal_indices = [i for i in diagonal_indices if len(i) > 1]
cls._validate(expr, *diagonal_indices)
#diagonal_indices = cls._remove_trivial_dimensions(shape, *diagonal_indices)
# Get new shape:
shp1 = tuple(shp for i,shp in enumerate(shape) if not any(i in j for j in diagonal_indices))
shp2 = tuple(shape[i[0]] for i in diagonal_indices)
shape = shp1 + shp2
if len(diagonal_indices) == 0:
return expr
obj = Basic.__new__(cls, expr, *diagonal_indices)
obj._subranks = _get_subranks(expr)
obj._shape = shape
return obj
@staticmethod
def _validate(expr, *diagonal_indices):
# Check that no diagonalization happens on indices with mismatched
# dimensions:
shape = expr.shape
for i in diagonal_indices:
if len({shape[j] for j in i}) != 1:
raise ValueError("diagonalizing indices of different dimensions")
@staticmethod
def _remove_trivial_dimensions(shape, *diagonal_indices):
return [tuple(j for j in i) for i in diagonal_indices if shape[i[0]] != 1]
@property
def expr(self):
return self.args[0]
@property
def diagonal_indices(self):
return self.args[1:]
@staticmethod
def _flatten(expr, *outer_diagonal_indices):
inner_diagonal_indices = expr.diagonal_indices
all_inner = [j for i in inner_diagonal_indices for j in i]
all_inner.sort()
# TODO: add API for total rank and cumulative rank:
total_rank = _get_subrank(expr)
inner_rank = len(all_inner)
outer_rank = total_rank - inner_rank
shifts = [0 for i in range(outer_rank)]
counter = 0
pointer = 0
for i in range(outer_rank):
while pointer < inner_rank and counter >= all_inner[pointer]:
counter += 1
pointer += 1
shifts[i] += pointer
counter += 1
outer_diagonal_indices = tuple(tuple(shifts[j] + j for j in i) for i in outer_diagonal_indices)
diagonal_indices = inner_diagonal_indices + outer_diagonal_indices
return CodegenArrayDiagonal(expr.expr, *diagonal_indices)
@classmethod
def _push_indices_down(cls, diagonal_indices, indices):
flattened_contraction_indices = [j for i in diagonal_indices for j in i[1:]]
flattened_contraction_indices.sort()
transform = _build_push_indices_down_func_transformation(flattened_contraction_indices)
return _apply_recursively_over_nested_lists(transform, indices)
@classmethod
def _push_indices_up(cls, diagonal_indices, indices):
flattened_contraction_indices = [j for i in diagonal_indices for j in i[1:]]
flattened_contraction_indices.sort()
transform = _build_push_indices_up_func_transformation(flattened_contraction_indices)
return _apply_recursively_over_nested_lists(transform, indices)
def transform_to_product(self):
from sympy import ask, Q
diagonal_indices = self.diagonal_indices
if isinstance(self.expr, CodegenArrayContraction):
# invert Diagonal and Contraction:
diagonal_down = CodegenArrayContraction._push_indices_down(
self.expr.contraction_indices,
diagonal_indices
)
newexpr = CodegenArrayDiagonal(
self.expr.expr,
*diagonal_down
).transform_to_product()
contraction_up = newexpr._push_indices_up(
diagonal_down,
self.expr.contraction_indices
)
return CodegenArrayContraction(
newexpr,
*contraction_up
)
if not isinstance(self.expr, CodegenArrayTensorProduct):
return self
args = list(self.expr.args)
# TODO: unify API
subranks = [get_rank(i) for i in args]
# TODO: unify API
mapping = _get_mapping_from_subranks(subranks)
new_contraction_indices = []
drop_diagonal_indices = []
for indl, links in enumerate(diagonal_indices):
if len(links) > 2:
continue
# Also consider the case of diagonal matrices being contracted:
current_dimension = self.expr.shape[links[0]]
if current_dimension == 1:
drop_diagonal_indices.append(indl)
continue
tuple_links = [mapping[i] for i in links]
arg_indices, arg_positions = zip(*tuple_links)
if len(arg_indices) != len(set(arg_indices)):
# Maybe trace should be supported?
raise NotImplementedError
args_updates = {}
count_nondiagonal = 0
last = None
expression_is_square = False
# Check that all args are vectors:
for arg_ind, arg_pos in tuple_links:
mat = args[arg_ind]
if 1 in mat.shape and mat.shape != (1, 1):
args_updates[arg_ind] = DiagMatrix(mat)
last = arg_ind
else:
expression_is_square = True
if not ask(Q.diagonal(mat)):
count_nondiagonal += 1
if count_nondiagonal > 1:
break
if count_nondiagonal > 1:
continue
# TODO: if count_nondiagonal == 0 then the sub-expression can be recognized as HadamardProduct.
for arg_ind, newmat in args_updates.items():
if not expression_is_square and arg_ind == last:
continue
#pass
args[arg_ind] = newmat
drop_diagonal_indices.append(indl)
new_contraction_indices.append(links)
new_diagonal_indices = CodegenArrayContraction._push_indices_up(
new_contraction_indices,
[e for i, e in enumerate(diagonal_indices) if i not in drop_diagonal_indices]
)
return CodegenArrayDiagonal(
CodegenArrayContraction(
CodegenArrayTensorProduct(*args),
*new_contraction_indices
),
*new_diagonal_indices
)
def get_rank(expr):
if isinstance(expr, (MatrixExpr, MatrixElement)):
return 2
if isinstance(expr, _CodegenArrayAbstract):
return len(expr.shape)
if isinstance(expr, NDimArray):
return expr.rank()
if isinstance(expr, Indexed):
return expr.rank
if isinstance(expr, IndexedBase):
shape = expr.shape
if shape is None:
return -1
else:
return len(shape)
if isinstance(expr, _RecognizeMatOp):
return expr.rank()
if isinstance(expr, _RecognizeMatMulLines):
return expr.rank()
return 0
def _get_subrank(expr):
if isinstance(expr, _CodegenArrayAbstract):
return expr.subrank()
return get_rank(expr)
def _get_subranks(expr):
if isinstance(expr, _CodegenArrayAbstract):
return expr.subranks
else:
return [get_rank(expr)]
def _get_mapping_from_subranks(subranks):
mapping = {}
counter = 0
for i, rank in enumerate(subranks):
for j in range(rank):
mapping[counter] = (i, j)
counter += 1
return mapping
def _get_contraction_links(args, subranks, *contraction_indices):
mapping = _get_mapping_from_subranks(subranks)
contraction_tuples = [[mapping[j] for j in i] for i in contraction_indices]
dlinks = defaultdict(dict)
for links in contraction_tuples:
if len(links) == 2:
(arg1, pos1), (arg2, pos2) = links
dlinks[arg1][pos1] = (arg2, pos2)
dlinks[arg2][pos2] = (arg1, pos1)
continue
return args, dict(dlinks)
def _sort_contraction_indices(pairing_indices):
pairing_indices = [Tuple(*sorted(i)) for i in pairing_indices]
pairing_indices.sort(key=lambda x: min(x))
return pairing_indices
def _get_diagonal_indices(flattened_indices):
axes_contraction = defaultdict(list)
for i, ind in enumerate(flattened_indices):
if isinstance(ind, (int, Integer)):
# If the indices is a number, there can be no diagonal operation:
continue
axes_contraction[ind].append(i)
axes_contraction = {k: v for k, v in axes_contraction.items() if len(v) > 1}
# Put the diagonalized indices at the end:
ret_indices = [i for i in flattened_indices if i not in axes_contraction]
diag_indices = list(axes_contraction)
diag_indices.sort(key=lambda x: flattened_indices.index(x))
diagonal_indices = [tuple(axes_contraction[i]) for i in diag_indices]
ret_indices += diag_indices
ret_indices = tuple(ret_indices)
return diagonal_indices, ret_indices
def _get_argindex(subindices, ind):
for i, sind in enumerate(subindices):
if ind == sind:
return i
if isinstance(sind, (set, frozenset)) and ind in sind:
return i
raise IndexError("%s not found in %s" % (ind, subindices))
def _codegen_array_parse(expr):
if isinstance(expr, Sum):
function = expr.function
summation_indices = expr.variables
subexpr, subindices = _codegen_array_parse(function)
# Check dimensional consistency:
shape = subexpr.shape
if shape:
for ind, istart, iend in expr.limits:
i = _get_argindex(subindices, ind)
if istart != 0 or iend+1 != shape[i]:
raise ValueError("summation index and array dimension mismatch: %s" % ind)
contraction_indices = []
subindices = list(subindices)
if isinstance(subexpr, CodegenArrayDiagonal):
diagonal_indices = list(subexpr.diagonal_indices)
dindices = subindices[-len(diagonal_indices):]
subindices = subindices[:-len(diagonal_indices)]
for index in summation_indices:
if index in dindices:
position = dindices.index(index)
contraction_indices.append(diagonal_indices[position])
diagonal_indices[position] = None
diagonal_indices = [i for i in diagonal_indices if i is not None]
for i, ind in enumerate(subindices):
if ind in summation_indices:
pass
if diagonal_indices:
subexpr = CodegenArrayDiagonal(subexpr.expr, *diagonal_indices)
else:
subexpr = subexpr.expr
axes_contraction = defaultdict(list)
for i, ind in enumerate(subindices):
if ind in summation_indices:
axes_contraction[ind].append(i)
subindices[i] = None
for k, v in axes_contraction.items():
contraction_indices.append(tuple(v))
free_indices = [i for i in subindices if i is not None]
indices_ret = list(free_indices)
indices_ret.sort(key=lambda x: free_indices.index(x))
return CodegenArrayContraction(
subexpr,
*contraction_indices,
free_indices=free_indices
), tuple(indices_ret)
if isinstance(expr, Mul):
args, indices = zip(*[_codegen_array_parse(arg) for arg in expr.args])
# Check if there are KroneckerDelta objects:
kronecker_delta_repl = {}
for arg in args:
if not isinstance(arg, KroneckerDelta):
continue
# Diagonalize two indices:
i, j = arg.indices
kindices = set(arg.indices)
if i in kronecker_delta_repl:
kindices.update(kronecker_delta_repl[i])
if j in kronecker_delta_repl:
kindices.update(kronecker_delta_repl[j])
kindices = frozenset(kindices)
for index in kindices:
kronecker_delta_repl[index] = kindices
# Remove KroneckerDelta objects, their relations should be handled by
# CodegenArrayDiagonal:
newargs = []
newindices = []
for arg, loc_indices in zip(args, indices):
if isinstance(arg, KroneckerDelta):
continue
newargs.append(arg)
newindices.append(loc_indices)
flattened_indices = [kronecker_delta_repl.get(j, j) for i in newindices for j in i]
diagonal_indices, ret_indices = _get_diagonal_indices(flattened_indices)
tp = CodegenArrayTensorProduct(*newargs)
if diagonal_indices:
return (CodegenArrayDiagonal(tp, *diagonal_indices), ret_indices)
else:
return tp, ret_indices
if isinstance(expr, MatrixElement):
indices = expr.args[1:]
diagonal_indices, ret_indices = _get_diagonal_indices(indices)
if diagonal_indices:
return (CodegenArrayDiagonal(expr.args[0], *diagonal_indices), ret_indices)
else:
return expr.args[0], ret_indices
if isinstance(expr, Indexed):
indices = expr.indices
diagonal_indices, ret_indices = _get_diagonal_indices(indices)
if diagonal_indices:
return (CodegenArrayDiagonal(expr.base, *diagonal_indices), ret_indices)
else:
return expr.args[0], ret_indices
if isinstance(expr, IndexedBase):
raise NotImplementedError
if isinstance(expr, KroneckerDelta):
return expr, expr.indices
if isinstance(expr, Add):
args, indices = zip(*[_codegen_array_parse(arg) for arg in expr.args])
args = list(args)
# Check if all indices are compatible. Otherwise expand the dimensions:
index0set = set(indices[0])
index0 = indices[0]
for i in range(1, len(args)):
if set(indices[i]) != index0set:
raise NotImplementedError("indices must be the same")
permutation = Permutation([index0.index(j) for j in indices[i]])
# Perform index permutations:
args[i] = CodegenArrayPermuteDims(args[i], permutation)
return CodegenArrayElementwiseAdd(*args), index0
return expr, ()
def parse_matrix_expression(expr: MatrixExpr) -> Basic:
if isinstance(expr, MatMul):
args_nonmat = []
args = []
for arg in expr.args:
if isinstance(arg, MatrixExpr):
args.append(arg)
else:
args_nonmat.append(arg)
contractions = [(2*i+1, 2*i+2) for i in range(len(args)-1)]
scalar = Mul.fromiter(args_nonmat)
if scalar == 1:
tprod = CodegenArrayTensorProduct(
*[parse_matrix_expression(arg) for arg in args])
else:
tprod = CodegenArrayTensorProduct(
scalar,
*[parse_matrix_expression(arg) for arg in args])
return CodegenArrayContraction(
tprod,
*contractions
)
elif isinstance(expr, MatAdd):
return CodegenArrayElementwiseAdd(
*[parse_matrix_expression(arg) for arg in expr.args]
)
elif isinstance(expr, Transpose):
return CodegenArrayPermuteDims(
parse_matrix_expression(expr.args[0]), [1, 0]
)
elif isinstance(expr, Trace):
inner_expr = parse_matrix_expression(expr.arg)
return CodegenArrayContraction(inner_expr, (0, len(inner_expr.shape) - 1))
else:
return expr
def parse_indexed_expression(expr, first_indices=None):
r"""
Parse indexed expression into a form useful for code generation.
Examples
========
>>> from sympy.codegen.array_utils import parse_indexed_expression
>>> from sympy import MatrixSymbol, Sum, symbols
>>> i, j, k, d = symbols("i j k d")
>>> M = MatrixSymbol("M", d, d)
>>> N = MatrixSymbol("N", d, d)
Recognize the trace in summation form:
>>> expr = Sum(M[i, i], (i, 0, d-1))
>>> parse_indexed_expression(expr)
CodegenArrayContraction(M, (0, 1))
Recognize the extraction of the diagonal by using the same index `i` on
both axes of the matrix:
>>> expr = M[i, i]
>>> parse_indexed_expression(expr)
CodegenArrayDiagonal(M, (0, 1))
This function can help perform the transformation expressed in two
different mathematical notations as:
`\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}`
Recognize the matrix multiplication in summation form:
>>> expr = Sum(M[i, j]*N[j, k], (j, 0, d-1))
>>> parse_indexed_expression(expr)
CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 2))
Specify that ``k`` has to be the starting index:
>>> parse_indexed_expression(expr, first_indices=[k])
CodegenArrayPermuteDims(CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 2)), (0 1))
"""
result, indices = _codegen_array_parse(expr)
if not first_indices:
return result
for i in first_indices:
if i not in indices:
first_indices.remove(i)
#raise ValueError("index %s not found or not a free index" % i)
first_indices.extend([i for i in indices if i not in first_indices])
permutation = [first_indices.index(i) for i in indices]
return CodegenArrayPermuteDims(result, permutation)
def _has_multiple_lines(expr):
if isinstance(expr, _RecognizeMatMulLines):
return True
if isinstance(expr, _RecognizeMatOp):
return expr.multiple_lines
return False
class _RecognizeMatOp:
"""
Class to help parsing matrix multiplication lines.
"""
def __init__(self, operator, args):
self.operator = operator
self.args = args
if any(_has_multiple_lines(arg) for arg in args):
multiple_lines = True
else:
multiple_lines = False
self.multiple_lines = multiple_lines
def rank(self):
if self.operator == Trace:
return 0
# TODO: check
return 2
def __repr__(self):
op = self.operator
if op == MatMul:
s = "*"
elif op == MatAdd:
s = "+"
else:
s = op.__name__
return "_RecognizeMatOp(%s, %s)" % (s, repr(self.args))
return "_RecognizeMatOp(%s)" % (s.join(repr(i) for i in self.args))
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
if self.operator != other.operator:
return False
if self.args != other.args:
return False
return True
def __iter__(self):
return iter(self.args)
class _RecognizeMatMulLines(list):
"""
This class handles multiple parsed multiplication lines.
"""
def __new__(cls, args):
if len(args) == 1:
return args[0]
return list.__new__(cls, args)
def rank(self):
return reduce(lambda x, y: x*y, [get_rank(i) for i in self], S.One)
def __repr__(self):
return "_RecognizeMatMulLines(%s)" % super().__repr__()
def _support_function_tp1_recognize(contraction_indices, args):
if not isinstance(args, list):
args = [args]
subranks = [get_rank(i) for i in args]
coeff = reduce(lambda x, y: x*y, [arg for arg, srank in zip(args, subranks) if srank == 0], S.One)
mapping = _get_mapping_from_subranks(subranks)
reverse_mapping = {v:k for k, v in mapping.items()}
args, dlinks = _get_contraction_links(args, subranks, *contraction_indices)
flatten_contractions = [j for i in contraction_indices for j in i]
total_rank = sum(subranks)
# TODO: turn `free_indices` into a list?
free_indices = {i: i for i in range(total_rank) if i not in flatten_contractions}
return_list = []
while dlinks:
if free_indices:
first_index, starting_argind = min(free_indices.items(), key=lambda x: x[1])
free_indices.pop(first_index)
starting_argind, starting_pos = mapping[starting_argind]
else:
# Maybe a Trace
first_index = None
starting_argind = min(dlinks)
starting_pos = 0
current_argind, current_pos = starting_argind, starting_pos
matmul_args = []
last_index = None
while True:
elem = args[current_argind]
if current_pos == 1:
elem = _RecognizeMatOp(Transpose, [elem])
matmul_args.append(elem)
other_pos = 1 - current_pos
if current_argind not in dlinks:
other_absolute = reverse_mapping[current_argind, other_pos]
free_indices.pop(other_absolute, None)
break
link_dict = dlinks.pop(current_argind)
if other_pos not in link_dict:
if free_indices:
last_index = [i for i, j in free_indices.items() if mapping[j] == (current_argind, other_pos)][0]
else:
last_index = None
break
if len(link_dict) > 2:
raise NotImplementedError("not a matrix multiplication line")
# Get the last element of `link_dict` as the next link. The last
# element is the correct start for trace expressions:
current_argind, current_pos = link_dict[other_pos]
if current_argind == starting_argind:
# This is a trace:
if len(matmul_args) > 1:
matmul_args = [_RecognizeMatOp(Trace, [_RecognizeMatOp(MatMul, matmul_args)])]
elif args[current_argind].shape != (1, 1):
matmul_args = [_RecognizeMatOp(Trace, matmul_args)]
break
dlinks.pop(starting_argind, None)
free_indices.pop(last_index, None)
return_list.append(_RecognizeMatOp(MatMul, matmul_args))
if coeff != 1:
# Let's inject the coefficient:
return_list[0].args.insert(0, coeff)
return _RecognizeMatMulLines(return_list)
def recognize_matrix_expression(expr):
r"""
Recognize matrix expressions in codegen objects.
If more than one matrix multiplication line have been detected, return a
list with the matrix expressions.
Examples
========
>>> from sympy import MatrixSymbol, Sum
>>> from sympy.abc import i, j, k, l, N
>>> from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct
>>> from sympy.codegen.array_utils import recognize_matrix_expression, parse_indexed_expression, parse_matrix_expression
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> C = MatrixSymbol("C", N, N)
>>> D = MatrixSymbol("D", N, N)
>>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
A*B
>>> cg = parse_indexed_expression(expr, first_indices=[k])
>>> recognize_matrix_expression(cg)
(A*B).T
Transposition is detected:
>>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
A.T*B
>>> cg = parse_indexed_expression(expr, first_indices=[k])
>>> recognize_matrix_expression(cg)
(A.T*B).T
Detect the trace:
>>> expr = Sum(A[i, i], (i, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
Trace(A)
Recognize some more complex traces:
>>> expr = Sum(A[i, j]*B[j, i], (i, 0, N-1), (j, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
Trace(A*B)
More complicated expressions:
>>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
A*B.T*A.T
Expressions constructed from matrix expressions do not contain literal
indices, the positions of free indices are returned instead:
>>> expr = A*B
>>> cg = parse_matrix_expression(expr)
>>> recognize_matrix_expression(cg)
A*B
If more than one line of matrix multiplications is detected, return
separate matrix multiplication factors:
>>> cg = CodegenArrayContraction(CodegenArrayTensorProduct(A, B, C, D), (1, 2), (5, 6))
>>> recognize_matrix_expression(cg)
[A*B, C*D]
The two lines have free indices at axes 0, 3 and 4, 7, respectively.
"""
# TODO: expr has to be a CodegenArray... type
rec = _recognize_matrix_expression(expr)
return _unfold_recognized_expr(rec)
def _recognize_matrix_expression(expr):
if isinstance(expr, CodegenArrayContraction):
# Apply some transformations:
expr = expr.flatten_contraction_of_diagonal()
expr = expr.split_multiple_contractions()
args = _recognize_matrix_expression(expr.expr)
contraction_indices = expr.contraction_indices
if isinstance(args, _RecognizeMatOp) and args.operator == MatAdd:
addends = []
for arg in args.args:
addends.append(_support_function_tp1_recognize(contraction_indices, arg))
return _RecognizeMatOp(MatAdd, addends)
elif isinstance(args, _RecognizeMatMulLines):
return _support_function_tp1_recognize(contraction_indices, args)
return _support_function_tp1_recognize(contraction_indices, [args])
elif isinstance(expr, CodegenArrayElementwiseAdd):
add_args = []
for arg in expr.args:
add_args.append(_recognize_matrix_expression(arg))
return _RecognizeMatOp(MatAdd, add_args)
elif isinstance(expr, (MatrixSymbol, IndexedBase)):
return expr
elif isinstance(expr, CodegenArrayPermuteDims):
if expr.permutation.array_form == [1, 0]:
return _RecognizeMatOp(Transpose, [_recognize_matrix_expression(expr.expr)])
elif isinstance(expr.expr, CodegenArrayTensorProduct):
ranks = expr.expr.subranks
newrange = [expr.permutation(i) for i in range(sum(ranks))]
newpos = []
counter = 0
for rank in ranks:
newpos.append(newrange[counter:counter+rank])
counter += rank
newargs = []
for pos, arg in zip(newpos, expr.expr.args):
if pos == sorted(pos):
newargs.append((_recognize_matrix_expression(arg), pos[0]))
elif len(pos) == 2:
newargs.append((_RecognizeMatOp(Transpose, [_recognize_matrix_expression(arg)]), pos[0]))
else:
raise NotImplementedError
newargs.sort(key=lambda x: x[1])
newargs = [i[0] for i in newargs]
return _RecognizeMatMulLines(newargs)
else:
raise NotImplementedError
elif isinstance(expr, CodegenArrayTensorProduct):
args = [_recognize_matrix_expression(arg) for arg in expr.args]
multiple_lines = [_has_multiple_lines(arg) for arg in args]
if any(multiple_lines):
if any(a.operator != MatAdd for i, a in enumerate(args) if multiple_lines[i] and isinstance(a, _RecognizeMatOp)):
raise NotImplementedError
getargs = lambda x: x.args if isinstance(x, _RecognizeMatOp) else list(x)
expand_args = [getargs(arg) if multiple_lines[i] else [arg] for i, arg in enumerate(args)]
it = itertools.product(*expand_args)
ret = _RecognizeMatOp(MatAdd, [_RecognizeMatMulLines([k for j in i for k in (j if isinstance(j, _RecognizeMatMulLines) else [j])]) for i in it])
return ret
return _RecognizeMatMulLines(args)
elif isinstance(expr, CodegenArrayDiagonal):
pexpr = expr.transform_to_product()
if expr == pexpr:
return expr
return _recognize_matrix_expression(pexpr)
elif isinstance(expr, Transpose):
return expr
elif isinstance(expr, MatrixExpr):
return expr
return expr
def _suppress_trivial_dims_in_tensor_product(mat_list):
# Recognize expressions like [x, y] with shape (k, 1, k, 1) as `x*y.T`.
# The matrix expression has to be equivalent to the tensor product of the matrices, with trivial dimensions (i.e. dim=1) dropped.
# That is, add contractions over trivial dimensions:
mat_11 = []
mat_k1 = []
for mat in mat_list:
if mat.shape == (1, 1):
mat_11.append(mat)
elif 1 in mat.shape:
if mat.shape[0] == 1:
mat_k1.append(mat.T)
else:
mat_k1.append(mat)
else:
return mat_list
if len(mat_k1) > 2:
return mat_list
a = MatMul.fromiter(mat_k1[:1])
b = MatMul.fromiter(mat_k1[1:])
x = MatMul.fromiter(mat_11)
return a*x*b.T
def _unfold_recognized_expr(expr):
if isinstance(expr, _RecognizeMatOp):
return expr.operator(*[_unfold_recognized_expr(i) for i in expr.args])
elif isinstance(expr, _RecognizeMatMulLines):
unfolded = [_unfold_recognized_expr(i) for i in expr]
mat_list = [i for i in unfolded if isinstance(i, MatrixExpr)]
scalar_list = [i for i in unfolded if i not in mat_list]
scalar = Mul.fromiter(scalar_list)
mat_list = [i.doit() for i in mat_list]
mat_list = [i for i in mat_list if not (i.shape == (1, 1) and i.is_Identity)]
if mat_list:
mat_list[0] *= scalar
if len(mat_list) == 1:
return mat_list[0].doit()
else:
return _suppress_trivial_dims_in_tensor_product(mat_list)
else:
return scalar
else:
return expr
def _apply_recursively_over_nested_lists(func, arr):
if isinstance(arr, (tuple, list, Tuple)):
return tuple(_apply_recursively_over_nested_lists(func, i) for i in arr)
elif isinstance(arr, Tuple):
return Tuple.fromiter(_apply_recursively_over_nested_lists(func, i) for i in arr)
else:
return func(arr)
def _build_push_indices_up_func_transformation(flattened_contraction_indices):
shifts = {0: 0}
i = 0
cumulative = 0
while i < len(flattened_contraction_indices):
j = 1
while i+j < len(flattened_contraction_indices):
if flattened_contraction_indices[i] + j != flattened_contraction_indices[i+j]:
break
j += 1
cumulative += j
shifts[flattened_contraction_indices[i]] = cumulative
i += j
shift_keys = sorted(shifts.keys())
def func(idx):
return shifts[shift_keys[bisect.bisect_right(shift_keys, idx)-1]]
def transform(j):
if j in flattened_contraction_indices:
return None
else:
return j - func(j)
return transform
def _build_push_indices_down_func_transformation(flattened_contraction_indices):
N = flattened_contraction_indices[-1]+2
shifts = [i for i in range(N) if i not in flattened_contraction_indices]
def transform(j):
if j < len(shifts):
return shifts[j]
else:
return j + shifts[-1] - len(shifts) + 1
return transform
|
9a00e866fba102613418c91f5e9295fef7da15cefed39ca180fb376eea1fb675 | """
Additional AST nodes for operations on matrices. The nodes in this module
are meant to represent optimization of matrix expressions within codegen's
target languages that cannot be represented by SymPy expressions.
As an example, we can use :meth:`sympy.codegen.rewriting.optimize` and the
``matin_opt`` optimization provided in :mod:`sympy.codegen.rewriting` to
transform matrix multiplication under certain assumptions:
>>> from sympy import symbols, MatrixSymbol
>>> n = symbols('n', integer=True)
>>> A = MatrixSymbol('A', n, n)
>>> x = MatrixSymbol('x', n, 1)
>>> expr = A**(-1) * x
>>> from sympy.assumptions import assuming, Q
>>> from sympy.codegen.rewriting import matinv_opt, optimize
>>> with assuming(Q.fullrank(A)):
... optimize(expr, [matinv_opt])
MatrixSolve(A, vector=x)
"""
from .ast import Token
from sympy.matrices import MatrixExpr
from sympy.core.sympify import sympify
class MatrixSolve(Token, MatrixExpr):
"""Represents an operation to solve a linear matrix equation.
Parameters
==========
matrix : MatrixSymbol
Matrix representing the coefficients of variables in the linear
equation. This matrix must be square and full-rank (i.e. all columns must
be linearly independent) for the solving operation to be valid.
vector : MatrixSymbol
One-column matrix representing the solutions to the equations
represented in ``matrix``.
Examples
========
>>> from sympy import symbols, MatrixSymbol
>>> from sympy.codegen.matrix_nodes import MatrixSolve
>>> n = symbols('n', integer=True)
>>> A = MatrixSymbol('A', n, n)
>>> x = MatrixSymbol('x', n, 1)
>>> from sympy.printing.pycode import NumPyPrinter
>>> NumPyPrinter().doprint(MatrixSolve(A, x))
'numpy.linalg.solve(A, x)'
>>> from sympy.printing import octave_code
>>> octave_code(MatrixSolve(A, x))
'A \\\\ x'
"""
__slots__ = ('matrix', 'vector')
_construct_matrix = staticmethod(sympify)
@property
def shape(self):
return self.vector.shape
|
af2a3731ef0028c090a6a0908bd2ba653a6c17a26cec5b11a4473d1ab5369c2b | """
This file contains some classical ciphers and routines
implementing a linear-feedback shift register (LFSR)
and the Diffie-Hellman key exchange.
.. warning::
This module is intended for educational purposes only. Do not use the
functions in this module for real cryptographic applications. If you wish
to encrypt real data, we recommend using something like the `cryptography
<https://cryptography.io/en/latest/>`_ module.
"""
from string import whitespace, ascii_uppercase as uppercase, printable
from functools import reduce
import warnings
from itertools import cycle
from sympy import nextprime
from sympy.core import Rational, Symbol
from sympy.core.numbers import igcdex, mod_inverse, igcd
from sympy.core.compatibility import as_int
from sympy.matrices import Matrix
from sympy.ntheory import isprime, primitive_root, factorint
from sympy.polys.domains import FF
from sympy.polys.polytools import gcd, Poly
from sympy.utilities.misc import filldedent, translate
from sympy.utilities.iterables import uniq, multiset
from sympy.testing.randtest import _randrange, _randint
class NonInvertibleCipherWarning(RuntimeWarning):
"""A warning raised if the cipher is not invertible."""
def __init__(self, msg):
self.fullMessage = msg
def __str__(self):
return '\n\t' + self.fullMessage
def warn(self, stacklevel=2):
warnings.warn(self, stacklevel=stacklevel)
def AZ(s=None):
"""Return the letters of ``s`` in uppercase. In case more than
one string is passed, each of them will be processed and a list
of upper case strings will be returned.
Examples
========
>>> from sympy.crypto.crypto import AZ
>>> AZ('Hello, world!')
'HELLOWORLD'
>>> AZ('Hello, world!'.split())
['HELLO', 'WORLD']
See Also
========
check_and_join
"""
if not s:
return uppercase
t = type(s) is str
if t:
s = [s]
rv = [check_and_join(i.upper().split(), uppercase, filter=True)
for i in s]
if t:
return rv[0]
return rv
bifid5 = AZ().replace('J', '')
bifid6 = AZ() + '0123456789'
bifid10 = printable
def padded_key(key, symbols):
"""Return a string of the distinct characters of ``symbols`` with
those of ``key`` appearing first. A ValueError is raised if
a) there are duplicate characters in ``symbols`` or
b) there are characters in ``key`` that are not in ``symbols``.
Examples
========
>>> from sympy.crypto.crypto import padded_key
>>> padded_key('PUPPY', 'OPQRSTUVWXY')
'PUYOQRSTVWX'
>>> padded_key('RSA', 'ARTIST')
Traceback (most recent call last):
...
ValueError: duplicate characters in symbols: T
"""
syms = list(uniq(symbols))
if len(syms) != len(symbols):
extra = ''.join(sorted({
i for i in symbols if symbols.count(i) > 1}))
raise ValueError('duplicate characters in symbols: %s' % extra)
extra = set(key) - set(syms)
if extra:
raise ValueError(
'characters in key but not symbols: %s' % ''.join(
sorted(extra)))
key0 = ''.join(list(uniq(key)))
# remove from syms characters in key0
return key0 + translate(''.join(syms), None, key0)
def check_and_join(phrase, symbols=None, filter=None):
"""
Joins characters of ``phrase`` and if ``symbols`` is given, raises
an error if any character in ``phrase`` is not in ``symbols``.
Parameters
==========
phrase
String or list of strings to be returned as a string.
symbols
Iterable of characters allowed in ``phrase``.
If ``symbols`` is ``None``, no checking is performed.
Examples
========
>>> from sympy.crypto.crypto import check_and_join
>>> check_and_join('a phrase')
'a phrase'
>>> check_and_join('a phrase'.upper().split())
'APHRASE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True)
'ARAE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE')
Traceback (most recent call last):
...
ValueError: characters in phrase but not symbols: "!HPS"
"""
rv = ''.join(''.join(phrase))
if symbols is not None:
symbols = check_and_join(symbols)
missing = ''.join(list(sorted(set(rv) - set(symbols))))
if missing:
if not filter:
raise ValueError(
'characters in phrase but not symbols: "%s"' % missing)
rv = translate(rv, None, missing)
return rv
def _prep(msg, key, alp, default=None):
if not alp:
if not default:
alp = AZ()
msg = AZ(msg)
key = AZ(key)
else:
alp = default
else:
alp = ''.join(alp)
key = check_and_join(key, alp, filter=True)
msg = check_and_join(msg, alp, filter=True)
return msg, key, alp
def cycle_list(k, n):
"""
Returns the elements of the list ``range(n)`` shifted to the
left by ``k`` (so the list starts with ``k`` (mod ``n``)).
Examples
========
>>> from sympy.crypto.crypto import cycle_list
>>> cycle_list(3, 10)
[3, 4, 5, 6, 7, 8, 9, 0, 1, 2]
"""
k = k % n
return list(range(k, n)) + list(range(k))
######## shift cipher examples ############
def encipher_shift(msg, key, symbols=None):
"""
Performs shift cipher encryption on plaintext msg, and returns the
ciphertext.
Parameters
==========
key : int
The secret key.
msg : str
Plaintext of upper-case letters.
Returns
=======
str
Ciphertext of upper-case letters.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
There is also a convenience function that does this with the
original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
Notes
=====
ALGORITHM:
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
adding ``(k mod 26)`` to each element in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
The shift cipher is also called the Caesar cipher, after
Julius Caesar, who, according to Suetonius, used it with a
shift of three to protect messages of military significance.
Caesar's nephew Augustus reportedly used a similar cipher, but
with a right shift of 1.
References
==========
.. [1] https://en.wikipedia.org/wiki/Caesar_cipher
.. [2] http://mathworld.wolfram.com/CaesarsMethod.html
See Also
========
decipher_shift
"""
msg, _, A = _prep(msg, '', symbols)
shift = len(A) - key % len(A)
key = A[shift:] + A[:shift]
return translate(msg, key, A)
def decipher_shift(msg, key, symbols=None):
"""
Return the text by shifting the characters of ``msg`` to the
left by the amount given by ``key``.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
Or use this function with the original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
"""
return encipher_shift(msg, -key, symbols)
def encipher_rot13(msg, symbols=None):
"""
Performs the ROT13 encryption on a given plaintext ``msg``.
Explanation
===========
ROT13 is a substitution cipher which substitutes each letter
in the plaintext message for the letter furthest away from it
in the English alphabet.
Equivalently, it is just a Caeser (shift) cipher with a shift
key of 13 (midway point of the alphabet).
References
==========
.. [1] https://en.wikipedia.org/wiki/ROT13
See Also
========
decipher_rot13
encipher_shift
"""
return encipher_shift(msg, 13, symbols)
def decipher_rot13(msg, symbols=None):
"""
Performs the ROT13 decryption on a given plaintext ``msg``.
Explanation
============
``decipher_rot13`` is equivalent to ``encipher_rot13`` as both
``decipher_shift`` with a key of 13 and ``encipher_shift`` key with a
key of 13 will return the same results. Nonetheless,
``decipher_rot13`` has nonetheless been explicitly defined here for
consistency.
Examples
========
>>> from sympy.crypto.crypto import encipher_rot13, decipher_rot13
>>> msg = 'GONAVYBEATARMY'
>>> ciphertext = encipher_rot13(msg);ciphertext
'TBANILORNGNEZL'
>>> decipher_rot13(ciphertext)
'GONAVYBEATARMY'
>>> encipher_rot13(msg) == decipher_rot13(msg)
True
>>> msg == decipher_rot13(ciphertext)
True
"""
return decipher_shift(msg, 13, symbols)
######## affine cipher examples ############
def encipher_affine(msg, key, symbols=None, _inverse=False):
r"""
Performs the affine cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Explanation
===========
Encryption is based on the map `x \rightarrow ax+b` (mod `N`)
where ``N`` is the number of characters in the alphabet.
Decryption is based on the map `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
In particular, for the map to be invertible, we need
`\mathrm{gcd}(a, N) = 1` and an error will be raised if this is
not true.
Parameters
==========
msg : str
Characters that appear in ``symbols``.
a, b : int, int
A pair integers, with ``gcd(a, N) = 1`` (the secret key).
symbols
String of characters (default = uppercase letters).
When no symbols are given, ``msg`` is converted to upper case
letters and all other characters are ignored.
Returns
=======
ct
String of characters (the ciphertext message)
Notes
=====
ALGORITHM:
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
replacing ``x`` by ``a*x + b (mod N)``, for each element
``x`` in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
This is a straightforward generalization of the shift cipher with
the added complexity of requiring 2 characters to be deciphered in
order to recover the key.
References
==========
.. [1] https://en.wikipedia.org/wiki/Affine_cipher
See Also
========
decipher_affine
"""
msg, _, A = _prep(msg, '', symbols)
N = len(A)
a, b = key
assert gcd(a, N) == 1
if _inverse:
c = mod_inverse(a, N)
d = -b*c
a, b = c, d
B = ''.join([A[(a*i + b) % N] for i in range(N)])
return translate(msg, A, B)
def decipher_affine(msg, key, symbols=None):
r"""
Return the deciphered text that was made from the mapping,
`x \rightarrow ax+b` (mod `N`), where ``N`` is the
number of characters in the alphabet. Deciphering is done by
reciphering with a new key: `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
Examples
========
>>> from sympy.crypto.crypto import encipher_affine, decipher_affine
>>> msg = "GO NAVY BEAT ARMY"
>>> key = (3, 1)
>>> encipher_affine(msg, key)
'TROBMVENBGBALV'
>>> decipher_affine(_, key)
'GONAVYBEATARMY'
See Also
========
encipher_affine
"""
return encipher_affine(msg, key, symbols, _inverse=True)
def encipher_atbash(msg, symbols=None):
r"""
Enciphers a given ``msg`` into its Atbash ciphertext and returns it.
Explanation
===========
Atbash is a substitution cipher originally used to encrypt the Hebrew
alphabet. Atbash works on the principle of mapping each alphabet to its
reverse / counterpart (i.e. a would map to z, b to y etc.)
Atbash is functionally equivalent to the affine cipher with ``a = 25``
and ``b = 25``
See Also
========
decipher_atbash
"""
return encipher_affine(msg, (25, 25), symbols)
def decipher_atbash(msg, symbols=None):
r"""
Deciphers a given ``msg`` using Atbash cipher and returns it.
Explanation
===========
``decipher_atbash`` is functionally equivalent to ``encipher_atbash``.
However, it has still been added as a separate function to maintain
consistency.
Examples
========
>>> from sympy.crypto.crypto import encipher_atbash, decipher_atbash
>>> msg = 'GONAVYBEATARMY'
>>> encipher_atbash(msg)
'TLMZEBYVZGZINB'
>>> decipher_atbash(msg)
'TLMZEBYVZGZINB'
>>> encipher_atbash(msg) == decipher_atbash(msg)
True
>>> msg == encipher_atbash(encipher_atbash(msg))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Atbash
See Also
========
encipher_atbash
"""
return decipher_affine(msg, (25, 25), symbols)
#################### substitution cipher ###########################
def encipher_substitution(msg, old, new=None):
r"""
Returns the ciphertext obtained by replacing each character that
appears in ``old`` with the corresponding character in ``new``.
If ``old`` is a mapping, then new is ignored and the replacements
defined by ``old`` are used.
Explanation
===========
This is a more general than the affine cipher in that the key can
only be recovered by determining the mapping for each symbol.
Though in practice, once a few symbols are recognized the mappings
for other characters can be quickly guessed.
Examples
========
>>> from sympy.crypto.crypto import encipher_substitution, AZ
>>> old = 'OEYAG'
>>> new = '034^6'
>>> msg = AZ("go navy! beat army!")
>>> ct = encipher_substitution(msg, old, new); ct
'60N^V4B3^T^RM4'
To decrypt a substitution, reverse the last two arguments:
>>> encipher_substitution(ct, new, old)
'GONAVYBEATARMY'
In the special case where ``old`` and ``new`` are a permutation of
order 2 (representing a transposition of characters) their order
is immaterial:
>>> old = 'NAVY'
>>> new = 'ANYV'
>>> encipher = lambda x: encipher_substitution(x, old, new)
>>> encipher('NAVY')
'ANYV'
>>> encipher(_)
'NAVY'
The substitution cipher, in general, is a method
whereby "units" (not necessarily single characters) of plaintext
are replaced with ciphertext according to a regular system.
>>> ords = dict(zip('abc', ['\\%i' % ord(i) for i in 'abc']))
>>> print(encipher_substitution('abc', ords))
\97\98\99
References
==========
.. [1] https://en.wikipedia.org/wiki/Substitution_cipher
"""
return translate(msg, old, new)
######################################################################
#################### Vigenere cipher examples ########################
######################################################################
def encipher_vigenere(msg, key, symbols=None):
"""
Performs the Vigenere cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Examples
========
>>> from sympy.crypto.crypto import encipher_vigenere, AZ
>>> key = "encrypt"
>>> msg = "meet me on monday"
>>> encipher_vigenere(msg, key)
'QRGKKTHRZQEBPR'
Section 1 of the Kryptos sculpture at the CIA headquarters
uses this cipher and also changes the order of the the
alphabet [2]_. Here is the first line of that section of
the sculpture:
>>> from sympy.crypto.crypto import decipher_vigenere, padded_key
>>> alp = padded_key('KRYPTOS', AZ())
>>> key = 'PALIMPSEST'
>>> msg = 'EMUFPHZLRFAXYUSDJKZLDKRNSHGNFIVJ'
>>> decipher_vigenere(msg, key, alp)
'BETWEENSUBTLESHADINGANDTHEABSENC'
Explanation
===========
The Vigenere cipher is named after Blaise de Vigenere, a sixteenth
century diplomat and cryptographer, by a historical accident.
Vigenere actually invented a different and more complicated cipher.
The so-called *Vigenere cipher* was actually invented
by Giovan Batista Belaso in 1553.
This cipher was used in the 1800's, for example, during the American
Civil War. The Confederacy used a brass cipher disk to implement the
Vigenere cipher (now on display in the NSA Museum in Fort
Meade) [1]_.
The Vigenere cipher is a generalization of the shift cipher.
Whereas the shift cipher shifts each letter by the same amount
(that amount being the key of the shift cipher) the Vigenere
cipher shifts a letter by an amount determined by the key (which is
a word or phrase known only to the sender and receiver).
For example, if the key was a single letter, such as "C", then the
so-called Vigenere cipher is actually a shift cipher with a
shift of `2` (since "C" is the 2nd letter of the alphabet, if
you start counting at `0`). If the key was a word with two
letters, such as "CA", then the so-called Vigenere cipher will
shift letters in even positions by `2` and letters in odd positions
are left alone (shifted by `0`, since "A" is the 0th letter, if
you start counting at `0`).
ALGORITHM:
INPUT:
``msg``: string of characters that appear in ``symbols``
(the plaintext)
``key``: a string of characters that appear in ``symbols``
(the secret key)
``symbols``: a string of letters defining the alphabet
OUTPUT:
``ct``: string of characters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``key`` a list ``L1`` of
corresponding integers. Let ``n1 = len(L1)``.
2. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
3. Break ``L2`` up sequentially into sublists of size
``n1``; the last sublist may be smaller than ``n1``
4. For each of these sublists ``L`` of ``L2``, compute a
new list ``C`` given by ``C[i] = L[i] + L1[i] (mod N)``
to the ``i``-th element in the sublist, for each ``i``.
5. Assemble these lists ``C`` by concatenation into a new
list of length ``n2``.
6. Compute from the new list a string ``ct`` of
corresponding letters.
Once it is known that the key is, say, `n` characters long,
frequency analysis can be applied to every `n`-th letter of
the ciphertext to determine the plaintext. This method is
called *Kasiski examination* (although it was first discovered
by Babbage). If they key is as long as the message and is
comprised of randomly selected characters -- a one-time pad -- the
message is theoretically unbreakable.
The cipher Vigenere actually discovered is an "auto-key" cipher
described as follows.
ALGORITHM:
INPUT:
``key``: a string of letters (the secret key)
``msg``: string of letters (the plaintext message)
OUTPUT:
``ct``: string of upper-case letters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
2. Let ``n1`` be the length of the key. Append to the
string ``key`` the first ``n2 - n1`` characters of
the plaintext message. Compute from this string (also of
length ``n2``) a list ``L1`` of integers corresponding
to the letter numbers in the first step.
3. Compute a new list ``C`` given by
``C[i] = L1[i] + L2[i] (mod N)``.
4. Compute from the new list a string ``ct`` of letters
corresponding to the new integers.
To decipher the auto-key ciphertext, the key is used to decipher
the first ``n1`` characters and then those characters become the
key to decipher the next ``n1`` characters, etc...:
>>> m = AZ('go navy, beat army! yes you can'); m
'GONAVYBEATARMYYESYOUCAN'
>>> key = AZ('gold bug'); n1 = len(key); n2 = len(m)
>>> auto_key = key + m[:n2 - n1]; auto_key
'GOLDBUGGONAVYBEATARMYYE'
>>> ct = encipher_vigenere(m, auto_key); ct
'MCYDWSHKOGAMKZCELYFGAYR'
>>> n1 = len(key)
>>> pt = []
>>> while ct:
... part, ct = ct[:n1], ct[n1:]
... pt.append(decipher_vigenere(part, key))
... key = pt[-1]
...
>>> ''.join(pt) == m
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Vigenere_cipher
.. [2] http://web.archive.org/web/20071116100808/
.. [3] http://filebox.vt.edu/users/batman/kryptos.html
(short URL: https://goo.gl/ijr22d)
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
key = [map[c] for c in key]
N = len(map)
k = len(key)
rv = []
for i, m in enumerate(msg):
rv.append(A[(map[m] + key[i % k]) % N])
rv = ''.join(rv)
return rv
def decipher_vigenere(msg, key, symbols=None):
"""
Decode using the Vigenere cipher.
Examples
========
>>> from sympy.crypto.crypto import decipher_vigenere
>>> key = "encrypt"
>>> ct = "QRGK kt HRZQE BPR"
>>> decipher_vigenere(ct, key)
'MEETMEONMONDAY'
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
N = len(A) # normally, 26
K = [map[c] for c in key]
n = len(K)
C = [map[c] for c in msg]
rv = ''.join([A[(-K[i % n] + c) % N] for i, c in enumerate(C)])
return rv
#################### Hill cipher ########################
def encipher_hill(msg, key, symbols=None, pad="Q"):
r"""
Return the Hill cipher encryption of ``msg``.
Explanation
===========
The Hill cipher [1]_, invented by Lester S. Hill in the 1920's [2]_,
was the first polygraphic cipher in which it was practical
(though barely) to operate on more than three symbols at once.
The following discussion assumes an elementary knowledge of
matrices.
First, each letter is first encoded as a number starting with 0.
Suppose your message `msg` consists of `n` capital letters, with no
spaces. This may be regarded an `n`-tuple M of elements of
`Z_{26}` (if the letters are those of the English alphabet). A key
in the Hill cipher is a `k x k` matrix `K`, all of whose entries
are in `Z_{26}`, such that the matrix `K` is invertible (i.e., the
linear transformation `K: Z_{N}^k \rightarrow Z_{N}^k`
is one-to-one).
Parameters
==========
msg
Plaintext message of `n` upper-case letters.
key
A `k \times k` invertible matrix `K`, all of whose entries are
in `Z_{26}` (or whatever number of symbols are being used).
pad
Character (default "Q") to use to make length of text be a
multiple of ``k``.
Returns
=======
ct
Ciphertext of upper-case letters.
Notes
=====
ALGORITHM:
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L`` of
corresponding integers. Let ``n = len(L)``.
2. Break the list ``L`` up into ``t = ceiling(n/k)``
sublists ``L_1``, ..., ``L_t`` of size ``k`` (with
the last list "padded" to ensure its size is
``k``).
3. Compute new list ``C_1``, ..., ``C_t`` given by
``C[i] = K*L_i`` (arithmetic is done mod N), for each
``i``.
4. Concatenate these into a list ``C = C_1 + ... + C_t``.
5. Compute from ``C`` a string ``ct`` of corresponding
letters. This has length ``k*t``.
References
==========
.. [1] https://en.wikipedia.org/wiki/Hill_cipher
.. [2] Lester S. Hill, Cryptography in an Algebraic Alphabet,
The American Mathematical Monthly Vol.36, June-July 1929,
pp.306-312.
See Also
========
decipher_hill
"""
assert key.is_square
assert len(pad) == 1
msg, pad, A = _prep(msg, pad, symbols)
map = {c: i for i, c in enumerate(A)}
P = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(P)
m, r = divmod(n, k)
if r:
P = P + [map[pad]]*(k - r)
m += 1
rv = ''.join([A[c % N] for j in range(m) for c in
list(key*Matrix(k, 1, [P[i]
for i in range(k*j, k*(j + 1))]))])
return rv
def decipher_hill(msg, key, symbols=None):
"""
Deciphering is the same as enciphering but using the inverse of the
key matrix.
Examples
========
>>> from sympy.crypto.crypto import encipher_hill, decipher_hill
>>> from sympy import Matrix
>>> key = Matrix([[1, 2], [3, 5]])
>>> encipher_hill("meet me on monday", key)
'UEQDUEODOCTCWQ'
>>> decipher_hill(_, key)
'MEETMEONMONDAY'
When the length of the plaintext (stripped of invalid characters)
is not a multiple of the key dimension, extra characters will
appear at the end of the enciphered and deciphered text. In order to
decipher the text, those characters must be included in the text to
be deciphered. In the following, the key has a dimension of 4 but
the text is 2 short of being a multiple of 4 so two characters will
be added.
>>> key = Matrix([[1, 1, 1, 2], [0, 1, 1, 0],
... [2, 2, 3, 4], [1, 1, 0, 1]])
>>> msg = "ST"
>>> encipher_hill(msg, key)
'HJEB'
>>> decipher_hill(_, key)
'STQQ'
>>> encipher_hill(msg, key, pad="Z")
'ISPK'
>>> decipher_hill(_, key)
'STZZ'
If the last two characters of the ciphertext were ignored in
either case, the wrong plaintext would be recovered:
>>> decipher_hill("HD", key)
'ORMV'
>>> decipher_hill("IS", key)
'UIKY'
See Also
========
encipher_hill
"""
assert key.is_square
msg, _, A = _prep(msg, '', symbols)
map = {c: i for i, c in enumerate(A)}
C = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(C)
m, r = divmod(n, k)
if r:
C = C + [0]*(k - r)
m += 1
key_inv = key.inv_mod(N)
rv = ''.join([A[p % N] for j in range(m) for p in
list(key_inv*Matrix(
k, 1, [C[i] for i in range(k*j, k*(j + 1))]))])
return rv
#################### Bifid cipher ########################
def encipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses an `n \times n`
Polybius square.
Parameters
==========
msg
Plaintext string.
key
Short string for key.
Duplicate characters are ignored and then it is padded with the
characters in ``symbols`` that were not in the short key.
symbols
`n \times n` characters defining the alphabet.
(default is string.printable)
Returns
=======
ciphertext
Ciphertext using Bifid5 cipher without spaces.
See Also
========
decipher_bifid, encipher_bifid5, encipher_bifid6
References
==========
.. [1] https://en.wikipedia.org/wiki/Bifid_cipher
"""
msg, key, A = _prep(msg, key, symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the fractionalization
row_col = {ch: divmod(i, N) for i, ch in enumerate(long_key)}
r, c = zip(*[row_col[x] for x in msg])
rc = r + c
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join(ch[i] for i in zip(rc[::2], rc[1::2]))
return rv
def decipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `n \times n`
Polybius square.
Parameters
==========
msg
Ciphertext string.
key
Short string for key.
Duplicate characters are ignored and then it is padded with the
characters in symbols that were not in the short key.
symbols
`n \times n` characters defining the alphabet.
(default=string.printable, a `10 \times 10` matrix)
Returns
=======
deciphered
Deciphered text.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid, decipher_bifid, AZ)
Do an encryption using the bifid5 alphabet:
>>> alp = AZ().replace('J', '')
>>> ct = AZ("meet me on monday!")
>>> key = AZ("gold bug")
>>> encipher_bifid(ct, key, alp)
'IEILHHFSTSFQYE'
When entering the text or ciphertext, spaces are ignored so it
can be formatted as desired. Re-entering the ciphertext from the
preceding, putting 4 characters per line and padding with an extra
J, does not cause problems for the deciphering:
>>> decipher_bifid('''
... IEILH
... HFSTS
... FQYEJ''', key, alp)
'MEETMEONMONDAY'
When no alphabet is given, all 100 printable characters will be
used:
>>> key = ''
>>> encipher_bifid('hello world!', key)
'bmtwmg-bIo*w'
>>> decipher_bifid(_, key)
'hello world!'
If the key is changed, a different encryption is obtained:
>>> key = 'gold bug'
>>> encipher_bifid('hello world!', 'gold_bug')
'hg2sfuei7t}w'
And if the key used to decrypt the message is not exact, the
original text will not be perfectly obtained:
>>> decipher_bifid(_, 'gold pug')
'heldo~wor6d!'
"""
msg, _, A = _prep(msg, '', symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the reverse fractionalization
row_col = {
ch: divmod(i, N) for i, ch in enumerate(long_key)}
rc = [i for c in msg for i in row_col[c]]
n = len(msg)
rc = zip(*(rc[:n], rc[n:]))
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join(ch[i] for i in rc)
return rv
def bifid_square(key):
"""Return characters of ``key`` arranged in a square.
Examples
========
>>> from sympy.crypto.crypto import (
... bifid_square, AZ, padded_key, bifid5)
>>> bifid_square(AZ().replace('J', ''))
Matrix([
[A, B, C, D, E],
[F, G, H, I, K],
[L, M, N, O, P],
[Q, R, S, T, U],
[V, W, X, Y, Z]])
>>> bifid_square(padded_key(AZ('gold bug!'), bifid5))
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
See Also
========
padded_key
"""
A = ''.join(uniq(''.join(key)))
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
n = int(n)
f = lambda i, j: Symbol(A[n*i + j])
rv = Matrix(n, n, f)
return rv
def encipher_bifid5(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Explanation
===========
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square. The letter "J" is ignored so it must be replaced
with something else (traditionally an "I") before encryption.
ALGORITHM: (5x5 case)
STEPS:
0. Create the `5 \times 5` Polybius square ``S`` associated
to ``key`` as follows:
a) moving from left-to-right, top-to-bottom,
place the letters of the key into a `5 \times 5`
matrix,
b) if the key has less than 25 letters, add the
letters of the alphabet not in the key until the
`5 \times 5` square is filled.
1. Create a list ``P`` of pairs of numbers which are the
coordinates in the Polybius square of the letters in
``msg``.
2. Let ``L1`` be the list of all first coordinates of ``P``
(length of ``L1 = n``), let ``L2`` be the list of all
second coordinates of ``P`` (so the length of ``L2``
is also ``n``).
3. Let ``L`` be the concatenation of ``L1`` and ``L2``
(length ``L = 2*n``), except that consecutive numbers
are paired ``(L[2*i], L[2*i + 1])``. You can regard
``L`` as a list of pairs of length ``n``.
4. Let ``C`` be the list of all letters which are of the
form ``S[i, j]``, for all ``(i, j)`` in ``L``. As a
string, this is the ciphertext of ``msg``.
Parameters
==========
msg : str
Plaintext string.
Converted to upper case and filtered of anything but all letters
except J.
key
Short string for key; non-alphabetic letters, J and duplicated
characters are ignored and then, if the length is less than 25
characters, it is padded with other letters of the alphabet
(in alphabetical order).
Returns
=======
ct
Ciphertext (all caps, no spaces).
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid5, decipher_bifid5)
"J" will be omitted unless it is replaced with something else:
>>> round_trip = lambda m, k: \
... decipher_bifid5(encipher_bifid5(m, k), k)
>>> key = 'a'
>>> msg = "JOSIE"
>>> round_trip(msg, key)
'OSIE'
>>> round_trip(msg.replace("J", "I"), key)
'IOSIE'
>>> j = "QIQ"
>>> round_trip(msg.replace("J", j), key).replace(j, "J")
'JOSIE'
Notes
=====
The Bifid cipher was invented around 1901 by Felix Delastelle.
It is a *fractional substitution* cipher, where letters are
replaced by pairs of symbols from a smaller alphabet. The
cipher uses a `5 \times 5` square filled with some ordering of the
alphabet, except that "J" is replaced with "I" (this is a so-called
Polybius square; there is a `6 \times 6` analog if you add back in
"J" and also append onto the usual 26 letter alphabet, the digits
0, 1, ..., 9).
According to Helen Gaines' book *Cryptanalysis*, this type of cipher
was used in the field by the German Army during World War I.
See Also
========
decipher_bifid5, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return encipher_bifid(msg, '', key)
def decipher_bifid5(msg, key):
r"""
Return the Bifid cipher decryption of ``msg``.
Explanation
===========
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square; the letter "J" is ignored unless a ``key`` of
length 25 is used.
Parameters
==========
msg
Ciphertext string.
key
Short string for key; duplicated characters are ignored and if
the length is less then 25 characters, it will be padded with
other letters from the alphabet omitting "J".
Non-alphabetic characters are ignored.
Returns
=======
plaintext
Plaintext from Bifid5 cipher (all caps, no spaces).
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid5, decipher_bifid5
>>> key = "gold bug"
>>> encipher_bifid5('meet me on friday', key)
'IEILEHFSTSFXEE'
>>> encipher_bifid5('meet me on monday', key)
'IEILHHFSTSFQYE'
>>> decipher_bifid5(_, key)
'MEETMEONMONDAY'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return decipher_bifid(msg, '', key)
def bifid5_square(key=None):
r"""
5x5 Polybius square.
Produce the Polybius square for the `5 \times 5` Bifid cipher.
Examples
========
>>> from sympy.crypto.crypto import bifid5_square
>>> bifid5_square("gold bug")
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
"""
if not key:
key = bifid5
else:
_, key, _ = _prep('', key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return bifid_square(key)
def encipher_bifid6(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
Parameters
==========
msg
Plaintext string (digits okay).
key
Short string for key (digits okay).
If ``key`` is less than 36 characters long, the square will be
filled with letters A through Z and digits 0 through 9.
Returns
=======
ciphertext
Ciphertext from Bifid cipher (all caps, no spaces).
See Also
========
decipher_bifid6, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return encipher_bifid(msg, '', key)
def decipher_bifid6(msg, key):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
Parameters
==========
msg
Ciphertext string (digits okay); converted to upper case
key
Short string for key (digits okay).
If ``key`` is less than 36 characters long, the square will be
filled with letters A through Z and digits 0 through 9.
All letters are converted to uppercase.
Returns
=======
plaintext
Plaintext from Bifid cipher (all caps, no spaces).
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid6, decipher_bifid6
>>> key = "gold bug"
>>> encipher_bifid6('meet me on monday at 8am', key)
'KFKLJJHF5MMMKTFRGPL'
>>> decipher_bifid6(_, key)
'MEETMEONMONDAYAT8AM'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return decipher_bifid(msg, '', key)
def bifid6_square(key=None):
r"""
6x6 Polybius square.
Produces the Polybius square for the `6 \times 6` Bifid cipher.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9".
Examples
========
>>> from sympy.crypto.crypto import bifid6_square
>>> key = "gold bug"
>>> bifid6_square(key)
Matrix([
[G, O, L, D, B, U],
[A, C, E, F, H, I],
[J, K, M, N, P, Q],
[R, S, T, V, W, X],
[Y, Z, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9]])
"""
if not key:
key = bifid6
else:
_, key, _ = _prep('', key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return bifid_square(key)
#################### RSA #############################
def _decipher_rsa_crt(i, d, factors):
"""Decipher RSA using chinese remainder theorem from the information
of the relatively-prime factors of the modulus.
Parameters
==========
i : integer
Ciphertext
d : integer
The exponent component.
factors : list of relatively-prime integers
The integers given must be coprime and the product must equal
the modulus component of the original RSA key.
Examples
========
How to decrypt RSA with CRT:
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
>>> primes = [61, 53]
>>> e = 17
>>> args = primes + [e]
>>> puk = rsa_public_key(*args)
>>> prk = rsa_private_key(*args)
>>> from sympy.crypto.crypto import encipher_rsa, _decipher_rsa_crt
>>> msg = 65
>>> crt_primes = primes
>>> encrypted = encipher_rsa(msg, puk)
>>> decrypted = _decipher_rsa_crt(encrypted, prk[1], primes)
>>> decrypted
65
"""
from sympy.ntheory.modular import crt
moduluses = [pow(i, d, p) for p in factors]
result = crt(factors, moduluses)
if not result:
raise ValueError("CRT failed")
return result[0]
def _rsa_key(*args, public=True, private=True, totient='Euler', index=None, multipower=None):
r"""A private subroutine to generate RSA key
Parameters
==========
public, private : bool, optional
Flag to generate either a public key, a private key.
totient : 'Euler' or 'Carmichael'
Different notation used for totient.
multipower : bool, optional
Flag to bypass warning for multipower RSA.
"""
from sympy.ntheory import totient as _euler
from sympy.ntheory import reduced_totient as _carmichael
if len(args) < 2:
return False
if totient not in ('Euler', 'Carmichael'):
raise ValueError(
"The argument totient={} should either be " \
"'Euler', 'Carmichalel'." \
.format(totient))
if totient == 'Euler':
_totient = _euler
else:
_totient = _carmichael
if index is not None:
index = as_int(index)
if totient != 'Carmichael':
raise ValueError(
"Setting the 'index' keyword argument requires totient"
"notation to be specified as 'Carmichael'.")
primes, e = args[:-1], args[-1]
if any(not isprime(p) for p in primes):
new_primes = []
for i in primes:
new_primes.extend(factorint(i, multiple=True))
primes = new_primes
n = reduce(lambda i, j: i*j, primes)
tally = multiset(primes)
if all(v == 1 for v in tally.values()):
multiple = list(tally.keys())
phi = _totient._from_distinct_primes(*multiple)
else:
if not multipower:
NonInvertibleCipherWarning(
'Non-distinctive primes found in the factors {}. '
'The cipher may not be decryptable for some numbers '
'in the complete residue system Z[{}], but the cipher '
'can still be valid if you restrict the domain to be '
'the reduced residue system Z*[{}]. You can pass '
'the flag multipower=True if you want to suppress this '
'warning.'
.format(primes, n, n)
).warn()
phi = _totient._from_factors(tally)
if igcd(e, phi) == 1:
if public and not private:
if isinstance(index, int):
e = e % phi
e += index * phi
return n, e
if private and not public:
d = mod_inverse(e, phi)
if isinstance(index, int):
d += index * phi
return n, d
return False
def rsa_public_key(*args, **kwargs):
r"""Return the RSA *public key* pair, `(n, e)`
Parameters
==========
args : naturals
If specified as `p, q, e` where `p` and `q` are distinct primes
and `e` is a desired public exponent of the RSA, `n = p q` and
`e` will be verified against the totient
`\phi(n)` (Euler totient) or `\lambda(n)` (Carmichael totient)
to be `\gcd(e, \phi(n)) = 1` or `\gcd(e, \lambda(n)) = 1`.
If specified as `p_1, p_2, ..., p_n, e` where
`p_1, p_2, ..., p_n` are specified as primes,
and `e` is specified as a desired public exponent of the RSA,
it will be able to form a multi-prime RSA, which is a more
generalized form of the popular 2-prime RSA.
It can also be possible to form a single-prime RSA by specifying
the argument as `p, e`, which can be considered a trivial case
of a multiprime RSA.
Furthermore, it can be possible to form a multi-power RSA by
specifying two or more pairs of the primes to be same.
However, unlike the two-distinct prime RSA or multi-prime
RSA, not every numbers in the complete residue system
(`\mathbb{Z}_n`) will be decryptable since the mapping
`\mathbb{Z}_{n} \rightarrow \mathbb{Z}_{n}`
will not be bijective.
(Only except for the trivial case when
`e = 1`
or more generally,
.. math::
e \in \left \{ 1 + k \lambda(n)
\mid k \in \mathbb{Z} \land k \geq 0 \right \}
when RSA reduces to the identity.)
However, the RSA can still be decryptable for the numbers in the
reduced residue system (`\mathbb{Z}_n^{\times}`), since the
mapping
`\mathbb{Z}_{n}^{\times} \rightarrow \mathbb{Z}_{n}^{\times}`
can still be bijective.
If you pass a non-prime integer to the arguments
`p_1, p_2, ..., p_n`, the particular number will be
prime-factored and it will become either a multi-prime RSA or a
multi-power RSA in its canonical form, depending on whether the
product equals its radical or not.
`p_1 p_2 ... p_n = \text{rad}(p_1 p_2 ... p_n)`
totient : bool, optional
If ``'Euler'``, it uses Euler's totient `\phi(n)` which is
:meth:`sympy.ntheory.factor_.totient` in SymPy.
If ``'Carmichael'``, it uses Carmichael's totient `\lambda(n)`
which is :meth:`sympy.ntheory.factor_.reduced_totient` in SymPy.
Unlike private key generation, this is a trivial keyword for
public key generation because
`\gcd(e, \phi(n)) = 1 \iff \gcd(e, \lambda(n)) = 1`.
index : nonnegative integer, optional
Returns an arbitrary solution of a RSA public key at the index
specified at `0, 1, 2, ...`. This parameter needs to be
specified along with ``totient='Carmichael'``.
Similarly to the non-uniquenss of a RSA private key as described
in the ``index`` parameter documentation in
:meth:`rsa_private_key`, RSA public key is also not unique and
there is an infinite number of RSA public exponents which
can behave in the same manner.
From any given RSA public exponent `e`, there are can be an
another RSA public exponent `e + k \lambda(n)` where `k` is an
integer, `\lambda` is a Carmichael's totient function.
However, considering only the positive cases, there can be
a principal solution of a RSA public exponent `e_0` in
`0 < e_0 < \lambda(n)`, and all the other solutions
can be canonicalzed in a form of `e_0 + k \lambda(n)`.
``index`` specifies the `k` notation to yield any possible value
an RSA public key can have.
An example of computing any arbitrary RSA public key:
>>> from sympy.crypto.crypto import rsa_public_key
>>> rsa_public_key(61, 53, 17, totient='Carmichael', index=0)
(3233, 17)
>>> rsa_public_key(61, 53, 17, totient='Carmichael', index=1)
(3233, 797)
>>> rsa_public_key(61, 53, 17, totient='Carmichael', index=2)
(3233, 1577)
multipower : bool, optional
Any pair of non-distinct primes found in the RSA specification
will restrict the domain of the cryptosystem, as noted in the
explaination of the parameter ``args``.
SymPy RSA key generator may give a warning before dispatching it
as a multi-power RSA, however, you can disable the warning if
you pass ``True`` to this keyword.
Returns
=======
(n, e) : int, int
`n` is a product of any arbitrary number of primes given as
the argument.
`e` is relatively prime (coprime) to the Euler totient
`\phi(n)`.
False
Returned if less than two arguments are given, or `e` is
not relatively prime to the modulus.
Examples
========
>>> from sympy.crypto.crypto import rsa_public_key
A public key of a two-prime RSA:
>>> p, q, e = 3, 5, 7
>>> rsa_public_key(p, q, e)
(15, 7)
>>> rsa_public_key(p, q, 30)
False
A public key of a multiprime RSA:
>>> primes = [2, 3, 5, 7, 11, 13]
>>> e = 7
>>> args = primes + [e]
>>> rsa_public_key(*args)
(30030, 7)
Notes
=====
Although the RSA can be generalized over any modulus `n`, using
two large primes had became the most popular specification because a
product of two large primes is usually the hardest to factor
relatively to the digits of `n` can have.
However, it may need further understanding of the time complexities
of each prime-factoring algorithms to verify the claim.
See Also
========
rsa_private_key
encipher_rsa
decipher_rsa
References
==========
.. [1] https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29
.. [2] http://cacr.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
.. [3] https://link.springer.com/content/pdf/10.1007%2FBFb0055738.pdf
.. [4] http://www.itiis.org/digital-library/manuscript/1381
"""
return _rsa_key(*args, public=True, private=False, **kwargs)
def rsa_private_key(*args, **kwargs):
r"""Return the RSA *private key* pair, `(n, d)`
Parameters
==========
args : naturals
The keyword is identical to the ``args`` in
:meth:`rsa_public_key`.
totient : bool, optional
If ``'Euler'``, it uses Euler's totient convention `\phi(n)`
which is :meth:`sympy.ntheory.factor_.totient` in SymPy.
If ``'Carmichael'``, it uses Carmichael's totient convention
`\lambda(n)` which is
:meth:`sympy.ntheory.factor_.reduced_totient` in SymPy.
There can be some output differences for private key generation
as examples below.
Example using Euler's totient:
>>> from sympy.crypto.crypto import rsa_private_key
>>> rsa_private_key(61, 53, 17, totient='Euler')
(3233, 2753)
Example using Carmichael's totient:
>>> from sympy.crypto.crypto import rsa_private_key
>>> rsa_private_key(61, 53, 17, totient='Carmichael')
(3233, 413)
index : nonnegative integer, optional
Returns an arbitrary solution of a RSA private key at the index
specified at `0, 1, 2, ...`. This parameter needs to be
specified along with ``totient='Carmichael'``.
RSA private exponent is a non-unique solution of
`e d \mod \lambda(n) = 1` and it is possible in any form of
`d + k \lambda(n)`, where `d` is an another
already-computed private exponent, and `\lambda` is a
Carmichael's totient function, and `k` is any integer.
However, considering only the positive cases, there can be
a principal solution of a RSA private exponent `d_0` in
`0 < d_0 < \lambda(n)`, and all the other solutions
can be canonicalzed in a form of `d_0 + k \lambda(n)`.
``index`` specifies the `k` notation to yield any possible value
an RSA private key can have.
An example of computing any arbitrary RSA private key:
>>> from sympy.crypto.crypto import rsa_private_key
>>> rsa_private_key(61, 53, 17, totient='Carmichael', index=0)
(3233, 413)
>>> rsa_private_key(61, 53, 17, totient='Carmichael', index=1)
(3233, 1193)
>>> rsa_private_key(61, 53, 17, totient='Carmichael', index=2)
(3233, 1973)
multipower : bool, optional
The keyword is identical to the ``multipower`` in
:meth:`rsa_public_key`.
Returns
=======
(n, d) : int, int
`n` is a product of any arbitrary number of primes given as
the argument.
`d` is the inverse of `e` (mod `\phi(n)`) where `e` is the
exponent given, and `\phi` is a Euler totient.
False
Returned if less than two arguments are given, or `e` is
not relatively prime to the totient of the modulus.
Examples
========
>>> from sympy.crypto.crypto import rsa_private_key
A private key of a two-prime RSA:
>>> p, q, e = 3, 5, 7
>>> rsa_private_key(p, q, e)
(15, 7)
>>> rsa_private_key(p, q, 30)
False
A private key of a multiprime RSA:
>>> primes = [2, 3, 5, 7, 11, 13]
>>> e = 7
>>> args = primes + [e]
>>> rsa_private_key(*args)
(30030, 823)
See Also
========
rsa_public_key
encipher_rsa
decipher_rsa
References
==========
.. [1] https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29
.. [2] http://cacr.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
.. [3] https://link.springer.com/content/pdf/10.1007%2FBFb0055738.pdf
.. [4] http://www.itiis.org/digital-library/manuscript/1381
"""
return _rsa_key(*args, public=False, private=True, **kwargs)
def _encipher_decipher_rsa(i, key, factors=None):
n, d = key
if not factors:
return pow(i, d, n)
def _is_coprime_set(l):
is_coprime_set = True
for i in range(len(l)):
for j in range(i+1, len(l)):
if igcd(l[i], l[j]) != 1:
is_coprime_set = False
break
return is_coprime_set
prod = reduce(lambda i, j: i*j, factors)
if prod == n and _is_coprime_set(factors):
return _decipher_rsa_crt(i, d, factors)
return _encipher_decipher_rsa(i, key, factors=None)
def encipher_rsa(i, key, factors=None):
r"""Encrypt the plaintext with RSA.
Parameters
==========
i : integer
The plaintext to be encrypted for.
key : (n, e) where n, e are integers
`n` is the modulus of the key and `e` is the exponent of the
key. The encryption is computed by `i^e \bmod n`.
The key can either be a public key or a private key, however,
the message encrypted by a public key can only be decrypted by
a private key, and vice versa, as RSA is an asymmetric
cryptography system.
factors : list of coprime integers
This is identical to the keyword ``factors`` in
:meth:`decipher_rsa`.
Notes
=====
Some specifications may make the RSA not cryptographically
meaningful.
For example, `0`, `1` will remain always same after taking any
number of exponentiation, thus, should be avoided.
Furthermore, if `i^e < n`, `i` may easily be figured out by taking
`e` th root.
And also, specifying the exponent as `1` or in more generalized form
as `1 + k \lambda(n)` where `k` is an nonnegative integer,
`\lambda` is a carmichael totient, the RSA becomes an identity
mapping.
Examples
========
>>> from sympy.crypto.crypto import encipher_rsa
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
Public Key Encryption:
>>> p, q, e = 3, 5, 7
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> encipher_rsa(msg, puk)
3
Private Key Encryption:
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> msg = 12
>>> encipher_rsa(msg, prk)
3
Encryption using chinese remainder theorem:
>>> encipher_rsa(msg, prk, factors=[p, q])
3
"""
return _encipher_decipher_rsa(i, key, factors=factors)
def decipher_rsa(i, key, factors=None):
r"""Decrypt the ciphertext with RSA.
Parameters
==========
i : integer
The ciphertext to be decrypted for.
key : (n, d) where n, d are integers
`n` is the modulus of the key and `d` is the exponent of the
key. The decryption is computed by `i^d \bmod n`.
The key can either be a public key or a private key, however,
the message encrypted by a public key can only be decrypted by
a private key, and vice versa, as RSA is an asymmetric
cryptography system.
factors : list of coprime integers
As the modulus `n` created from RSA key generation is composed
of arbitrary prime factors
`n = {p_1}^{k_1}{p_2}^{k_2}...{p_n}^{k_n}` where
`p_1, p_2, ..., p_n` are distinct primes and
`k_1, k_2, ..., k_n` are positive integers, chinese remainder
theorem can be used to compute `i^d \bmod n` from the
fragmented modulo operations like
.. math::
i^d \bmod {p_1}^{k_1}, i^d \bmod {p_2}^{k_2}, ... ,
i^d \bmod {p_n}^{k_n}
or like
.. math::
i^d \bmod {p_1}^{k_1}{p_2}^{k_2},
i^d \bmod {p_3}^{k_3}, ... ,
i^d \bmod {p_n}^{k_n}
as long as every moduli does not share any common divisor each
other.
The raw primes used in generating the RSA key pair can be a good
option.
Note that the speed advantage of using this is only viable for
very large cases (Like 2048-bit RSA keys) since the
overhead of using pure python implementation of
:meth:`sympy.ntheory.modular.crt` may overcompensate the
theoritical speed advantage.
Notes
=====
See the ``Notes`` section in the documentation of
:meth:`encipher_rsa`
Examples
========
>>> from sympy.crypto.crypto import decipher_rsa, encipher_rsa
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
Public Key Encryption and Decryption:
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> new_msg = encipher_rsa(msg, prk)
>>> new_msg
3
>>> decipher_rsa(new_msg, puk)
12
Private Key Encryption and Decryption:
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> new_msg = encipher_rsa(msg, puk)
>>> new_msg
3
>>> decipher_rsa(new_msg, prk)
12
Decryption using chinese remainder theorem:
>>> decipher_rsa(new_msg, prk, factors=[p, q])
12
See Also
========
encipher_rsa
"""
return _encipher_decipher_rsa(i, key, factors=factors)
#################### kid krypto (kid RSA) #############################
def kid_rsa_public_key(a, b, A, B):
r"""
Kid RSA is a version of RSA useful to teach grade school children
since it does not involve exponentiation.
Explanation
===========
Alice wants to talk to Bob. Bob generates keys as follows.
Key generation:
* Select positive integers `a, b, A, B` at random.
* Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1)//M`.
* The *public key* is `(n, e)`. Bob sends these to Alice.
* The *private key* is `(n, d)`, which Bob keeps secret.
Encryption: If `p` is the plaintext message then the
ciphertext is `c = p e \pmod n`.
Decryption: If `c` is the ciphertext message then the
plaintext is `p = c d \pmod n`.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_public_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_public_key(a, b, A, B)
(369, 58)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, e
def kid_rsa_private_key(a, b, A, B):
"""
Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1) / M`. The *private key* is `d`, which Bob
keeps secret.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_private_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_private_key(a, b, A, B)
(369, 70)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, d
def encipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the public key.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_kid_rsa, kid_rsa_public_key)
>>> msg = 200
>>> a, b, A, B = 3, 4, 5, 6
>>> key = kid_rsa_public_key(a, b, A, B)
>>> encipher_kid_rsa(msg, key)
161
"""
n, e = key
return (msg*e) % n
def decipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the private key.
Examples
========
>>> from sympy.crypto.crypto import (
... kid_rsa_public_key, kid_rsa_private_key,
... decipher_kid_rsa, encipher_kid_rsa)
>>> a, b, A, B = 3, 4, 5, 6
>>> d = kid_rsa_private_key(a, b, A, B)
>>> msg = 200
>>> pub = kid_rsa_public_key(a, b, A, B)
>>> pri = kid_rsa_private_key(a, b, A, B)
>>> ct = encipher_kid_rsa(msg, pub)
>>> decipher_kid_rsa(ct, pri)
200
"""
n, d = key
return (msg*d) % n
#################### Morse Code ######################################
morse_char = {
".-": "A", "-...": "B",
"-.-.": "C", "-..": "D",
".": "E", "..-.": "F",
"--.": "G", "....": "H",
"..": "I", ".---": "J",
"-.-": "K", ".-..": "L",
"--": "M", "-.": "N",
"---": "O", ".--.": "P",
"--.-": "Q", ".-.": "R",
"...": "S", "-": "T",
"..-": "U", "...-": "V",
".--": "W", "-..-": "X",
"-.--": "Y", "--..": "Z",
"-----": "0", ".----": "1",
"..---": "2", "...--": "3",
"....-": "4", ".....": "5",
"-....": "6", "--...": "7",
"---..": "8", "----.": "9",
".-.-.-": ".", "--..--": ",",
"---...": ":", "-.-.-.": ";",
"..--..": "?", "-....-": "-",
"..--.-": "_", "-.--.": "(",
"-.--.-": ")", ".----.": "'",
"-...-": "=", ".-.-.": "+",
"-..-.": "/", ".--.-.": "@",
"...-..-": "$", "-.-.--": "!"}
char_morse = {v: k for k, v in morse_char.items()}
def encode_morse(msg, sep='|', mapping=None):
"""
Encodes a plaintext into popular Morse Code with letters
separated by ``sep`` and words by a double ``sep``.
Examples
========
>>> from sympy.crypto.crypto import encode_morse
>>> msg = 'ATTACK RIGHT FLANK'
>>> encode_morse(msg)
'.-|-|-|.-|-.-.|-.-||.-.|..|--.|....|-||..-.|.-..|.-|-.|-.-'
References
==========
.. [1] https://en.wikipedia.org/wiki/Morse_code
"""
mapping = mapping or char_morse
assert sep not in mapping
word_sep = 2*sep
mapping[" "] = word_sep
suffix = msg and msg[-1] in whitespace
# normalize whitespace
msg = (' ' if word_sep else '').join(msg.split())
# omit unmapped chars
chars = set(''.join(msg.split()))
ok = set(mapping.keys())
msg = translate(msg, None, ''.join(chars - ok))
morsestring = []
words = msg.split()
for word in words:
morseword = []
for letter in word:
morseletter = mapping[letter]
morseword.append(morseletter)
word = sep.join(morseword)
morsestring.append(word)
return word_sep.join(morsestring) + (word_sep if suffix else '')
def decode_morse(msg, sep='|', mapping=None):
"""
Decodes a Morse Code with letters separated by ``sep``
(default is '|') and words by `word_sep` (default is '||)
into plaintext.
Examples
========
>>> from sympy.crypto.crypto import decode_morse
>>> mc = '--|---|...-|.||.|.-|...|-'
>>> decode_morse(mc)
'MOVE EAST'
References
==========
.. [1] https://en.wikipedia.org/wiki/Morse_code
"""
mapping = mapping or morse_char
word_sep = 2*sep
characterstring = []
words = msg.strip(word_sep).split(word_sep)
for word in words:
letters = word.split(sep)
chars = [mapping[c] for c in letters]
word = ''.join(chars)
characterstring.append(word)
rv = " ".join(characterstring)
return rv
#################### LFSRs ##########################################
def lfsr_sequence(key, fill, n):
r"""
This function creates an LFSR sequence.
Parameters
==========
key : list
A list of finite field elements, `[c_0, c_1, \ldots, c_k].`
fill : list
The list of the initial terms of the LFSR sequence,
`[x_0, x_1, \ldots, x_k].`
n
Number of terms of the sequence that the function returns.
Returns
=======
L
The LFSR sequence defined by
`x_{n+1} = c_k x_n + \ldots + c_0 x_{n-k}`, for
`n \leq k`.
Notes
=====
S. Golomb [G]_ gives a list of three statistical properties a
sequence of numbers `a = \{a_n\}_{n=1}^\infty`,
`a_n \in \{0,1\}`, should display to be considered
"random". Define the autocorrelation of `a` to be
.. math::
C(k) = C(k,a) = \lim_{N\rightarrow \infty} {1\over N}\sum_{n=1}^N (-1)^{a_n + a_{n+k}}.
In the case where `a` is periodic with period
`P` then this reduces to
.. math::
C(k) = {1\over P}\sum_{n=1}^P (-1)^{a_n + a_{n+k}}.
Assume `a` is periodic with period `P`.
- balance:
.. math::
\left|\sum_{n=1}^P(-1)^{a_n}\right| \leq 1.
- low autocorrelation:
.. math::
C(k) = \left\{ \begin{array}{cc} 1,& k = 0,\\ \epsilon, & k \ne 0. \end{array} \right.
(For sequences satisfying these first two properties, it is known
that `\epsilon = -1/P` must hold.)
- proportional runs property: In each period, half the runs have
length `1`, one-fourth have length `2`, etc.
Moreover, there are as many runs of `1`'s as there are of
`0`'s.
Examples
========
>>> from sympy.crypto.crypto import lfsr_sequence
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> lfsr_sequence(key, fill, 10)
[1 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 0 mod 2,
1 mod 2, 1 mod 2, 0 mod 2, 0 mod 2, 1 mod 2]
References
==========
.. [G] Solomon Golomb, Shift register sequences, Aegean Park Press,
Laguna Hills, Ca, 1967
"""
if not isinstance(key, list):
raise TypeError("key must be a list")
if not isinstance(fill, list):
raise TypeError("fill must be a list")
p = key[0].mod
F = FF(p)
s = fill
k = len(fill)
L = []
for i in range(n):
s0 = s[:]
L.append(s[0])
s = s[1:k]
x = sum([int(key[i]*s0[i]) for i in range(k)])
s.append(F(x))
return L # use [x.to_int() for x in L] for int version
def lfsr_autocorrelation(L, P, k):
"""
This function computes the LFSR autocorrelation function.
Parameters
==========
L
A periodic sequence of elements of `GF(2)`.
L must have length larger than P.
P
The period of L.
k : int
An integer `k` (`0 < k < P`).
Returns
=======
autocorrelation
The k-th value of the autocorrelation of the LFSR L.
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_autocorrelation)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_autocorrelation(s, 15, 7)
-1/15
>>> lfsr_autocorrelation(s, 15, 0)
1
"""
if not isinstance(L, list):
raise TypeError("L (=%s) must be a list" % L)
P = int(P)
k = int(k)
L0 = L[:P] # slices makes a copy
L1 = L0 + L0[:k]
L2 = [(-1)**(L1[i].to_int() + L1[i + k].to_int()) for i in range(P)]
tot = sum(L2)
return Rational(tot, P)
def lfsr_connection_polynomial(s):
"""
This function computes the LFSR connection polynomial.
Parameters
==========
s
A sequence of elements of even length, with entries in a finite
field.
Returns
=======
C(x)
The connection polynomial of a minimal LFSR yielding s.
This implements the algorithm in section 3 of J. L. Massey's
article [M]_.
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_connection_polynomial)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**4 + x + 1
>>> fill = [F(1), F(0), F(0), F(1)]
>>> key = [F(1), F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(1), F(0)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x**2 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x + 1
References
==========
.. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding."
IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127,
Jan 1969.
"""
# Initialization:
p = s[0].mod
x = Symbol("x")
C = 1*x**0
B = 1*x**0
m = 1
b = 1*x**0
L = 0
N = 0
while N < len(s):
if L > 0:
dC = Poly(C).degree()
r = min(L + 1, dC + 1)
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i)
for i in range(1, dC + 1)]
d = (s[N].to_int() + sum([coeffsC[i]*s[N - i].to_int()
for i in range(1, r)])) % p
if L == 0:
d = s[N].to_int()*x**0
if d == 0:
m += 1
N += 1
if d > 0:
if 2*L > N:
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
m += 1
N += 1
else:
T = C
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
L = N + 1 - L
m = 1
b = d
B = T
N += 1
dC = Poly(C).degree()
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)]
return sum([coeffsC[i] % p*x**i for i in range(dC + 1)
if coeffsC[i] is not None])
#################### ElGamal #############################
def elgamal_private_key(digit=10, seed=None):
r"""
Return three number tuple as private key.
Explanation
===========
Elgamal encryption is based on the mathmatical problem
called the Discrete Logarithm Problem (DLP). For example,
`a^{b} \equiv c \pmod p`
In general, if ``a`` and ``b`` are known, ``ct`` is easily
calculated. If ``b`` is unknown, it is hard to use
``a`` and ``ct`` to get ``b``.
Parameters
==========
digit : int
Minimum number of binary digits for key.
Returns
=======
tuple : (p, r, d)
p = prime number.
r = primitive root.
d = random number.
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.testing.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.ntheory import is_primitive_root, isprime
>>> a, b, _ = elgamal_private_key()
>>> isprime(a)
True
>>> is_primitive_root(b, a)
True
"""
randrange = _randrange(seed)
p = nextprime(2**digit)
return p, primitive_root(p), randrange(2, p)
def elgamal_public_key(key):
r"""
Return three number tuple as public key.
Parameters
==========
key : (p, r, e)
Tuple generated by ``elgamal_private_key``.
Returns
=======
tuple : (p, r, e)
`e = r**d \bmod p`
`d` is a random number in private key.
Examples
========
>>> from sympy.crypto.crypto import elgamal_public_key
>>> elgamal_public_key((1031, 14, 636))
(1031, 14, 212)
"""
p, r, e = key
return p, r, pow(r, e, p)
def encipher_elgamal(i, key, seed=None):
r"""
Encrypt message with public key.
Explanation
===========
``i`` is a plaintext message expressed as an integer.
``key`` is public key (p, r, e). In order to encrypt
a message, a random number ``a`` in ``range(2, p)``
is generated and the encryped message is returned as
`c_{1}` and `c_{2}` where:
`c_{1} \equiv r^{a} \pmod p`
`c_{2} \equiv m e^{a} \pmod p`
Parameters
==========
msg
int of encoded message.
key
Public key.
Returns
=======
tuple : (c1, c2)
Encipher into two number.
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.testing.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import encipher_elgamal, elgamal_private_key, elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3]); pri
(37, 2, 3)
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 36
>>> encipher_elgamal(msg, pub, seed=[3])
(8, 6)
"""
p, r, e = key
if i < 0 or i >= p:
raise ValueError(
'Message (%s) should be in range(%s)' % (i, p))
randrange = _randrange(seed)
a = randrange(2, p)
return pow(r, a, p), i*pow(e, a, p) % p
def decipher_elgamal(msg, key):
r"""
Decrypt message with private key.
`msg = (c_{1}, c_{2})`
`key = (p, r, d)`
According to extended Eucliden theorem,
`u c_{1}^{d} + p n = 1`
`u \equiv 1/{{c_{1}}^d} \pmod p`
`u c_{2} \equiv \frac{1}{c_{1}^d} c_{2} \equiv \frac{1}{r^{ad}} c_{2} \pmod p`
`\frac{1}{r^{ad}} m e^a \equiv \frac{1}{r^{ad}} m {r^{d a}} \equiv m \pmod p`
Examples
========
>>> from sympy.crypto.crypto import decipher_elgamal
>>> from sympy.crypto.crypto import encipher_elgamal
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.crypto.crypto import elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3])
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 17
>>> decipher_elgamal(encipher_elgamal(msg, pub), pri) == msg
True
"""
p, _, d = key
c1, c2 = msg
u = igcdex(c1**d, p)[0]
return u * c2 % p
################ Diffie-Hellman Key Exchange #########################
def dh_private_key(digit=10, seed=None):
r"""
Return three integer tuple as private key.
Explanation
===========
Diffie-Hellman key exchange is based on the mathematical problem
called the Discrete Logarithm Problem (see ElGamal).
Diffie-Hellman key exchange is divided into the following steps:
* Alice and Bob agree on a base that consist of a prime ``p``
and a primitive root of ``p`` called ``g``
* Alice choses a number ``a`` and Bob choses a number ``b`` where
``a`` and ``b`` are random numbers in range `[2, p)`. These are
their private keys.
* Alice then publicly sends Bob `g^{a} \pmod p` while Bob sends
Alice `g^{b} \pmod p`
* They both raise the received value to their secretly chosen
number (``a`` or ``b``) and now have both as their shared key
`g^{ab} \pmod p`
Parameters
==========
digit
Minimum number of binary digits required in key.
Returns
=======
tuple : (p, g, a)
p = prime number.
g = primitive root of p.
a = random number from 2 through p - 1.
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.testing.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import dh_private_key
>>> from sympy.ntheory import isprime, is_primitive_root
>>> p, g, _ = dh_private_key()
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
>>> p, g, _ = dh_private_key(5)
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
"""
p = nextprime(2**digit)
g = primitive_root(p)
randrange = _randrange(seed)
a = randrange(2, p)
return p, g, a
def dh_public_key(key):
r"""
Return three number tuple as public key.
This is the tuple that Alice sends to Bob.
Parameters
==========
key : (p, g, a)
A tuple generated by ``dh_private_key``.
Returns
=======
tuple : int, int, int
A tuple of `(p, g, g^a \mod p)` with `p`, `g` and `a` given as
parameters.s
Examples
========
>>> from sympy.crypto.crypto import dh_private_key, dh_public_key
>>> p, g, a = dh_private_key();
>>> _p, _g, x = dh_public_key((p, g, a))
>>> p == _p and g == _g
True
>>> x == pow(g, a, p)
True
"""
p, g, a = key
return p, g, pow(g, a, p)
def dh_shared_key(key, b):
"""
Return an integer that is the shared key.
This is what Bob and Alice can both calculate using the public
keys they received from each other and their private keys.
Parameters
==========
key : (p, g, x)
Tuple `(p, g, x)` generated by ``dh_public_key``.
b
Random number in the range of `2` to `p - 1`
(Chosen by second key exchange member (Bob)).
Returns
=======
int
A shared key.
Examples
========
>>> from sympy.crypto.crypto import (
... dh_private_key, dh_public_key, dh_shared_key)
>>> prk = dh_private_key();
>>> p, g, x = dh_public_key(prk);
>>> sk = dh_shared_key((p, g, x), 1000)
>>> sk == pow(x, 1000, p)
True
"""
p, _, x = key
if 1 >= b or b >= p:
raise ValueError(filldedent('''
Value of b should be greater 1 and less
than prime %s.''' % p))
return pow(x, b, p)
################ Goldwasser-Micali Encryption #########################
def _legendre(a, p):
"""
Returns the legendre symbol of a and p
assuming that p is a prime.
i.e. 1 if a is a quadratic residue mod p
-1 if a is not a quadratic residue mod p
0 if a is divisible by p
Parameters
==========
a : int
The number to test.
p : prime
The prime to test ``a`` against.
Returns
=======
int
Legendre symbol (a / p).
"""
sig = pow(a, (p - 1)//2, p)
if sig == 1:
return 1
elif sig == 0:
return 0
else:
return -1
def _random_coprime_stream(n, seed=None):
randrange = _randrange(seed)
while True:
y = randrange(n)
if gcd(y, n) == 1:
yield y
def gm_private_key(p, q, a=None):
"""
Check if ``p`` and ``q`` can be used as private keys for
the Goldwasser-Micali encryption. The method works
roughly as follows.
Explanation
===========
$\\cdot$ Pick two large primes $p$ and $q$.
$\\cdot$ Call their product $N$.
$\\cdot$ Given a message as an integer $i$, write $i$ in its
bit representation $b_0$ , $\\dotsc$ , $b_n$ .
$\\cdot$ For each $k$ ,
if $b_k$ = 0:
let $a_k$ be a random square
(quadratic residue) modulo $p q$
such that $jacobi \\_symbol(a, p q) = 1$
if $b_k$ = 1:
let $a_k$ be a random non-square
(non-quadratic residue) modulo $p q$
such that $jacobi \\_ symbol(a, p q) = 1$
returns [$a_1$ , $a_2$ , $\\dotsc$ ]
$b_k$ can be recovered by checking whether or not
$a_k$ is a residue. And from the $b_k$ 's, the message
can be reconstructed.
The idea is that, while $jacobi \\_ symbol(a, p q)$
can be easily computed (and when it is equal to $-1$ will
tell you that $a$ is not a square mod $p q$ ), quadratic
residuosity modulo a composite number is hard to compute
without knowing its factorization.
Moreover, approximately half the numbers coprime to $p q$ have
$jacobi \\_ symbol$ equal to $1$ . And among those, approximately half
are residues and approximately half are not. This maximizes the
entropy of the code.
Parameters
==========
p, q, a
Initialization variables.
Returns
=======
tuple : (p, q)
The input value ``p`` and ``q``.
Raises
======
ValueError
If ``p`` and ``q`` are not distinct odd primes.
"""
if p == q:
raise ValueError("expected distinct primes, "
"got two copies of %i" % p)
elif not isprime(p) or not isprime(q):
raise ValueError("first two arguments must be prime, "
"got %i of %i" % (p, q))
elif p == 2 or q == 2:
raise ValueError("first two arguments must not be even, "
"got %i of %i" % (p, q))
return p, q
def gm_public_key(p, q, a=None, seed=None):
"""
Compute public keys for ``p`` and ``q``.
Note that in Goldwasser-Micali Encryption,
public keys are randomly selected.
Parameters
==========
p, q, a : int, int, int
Initialization variables.
Returns
=======
tuple : (a, N)
``a`` is the input ``a`` if it is not ``None`` otherwise
some random integer coprime to ``p`` and ``q``.
``N`` is the product of ``p`` and ``q``.
"""
p, q = gm_private_key(p, q)
N = p * q
if a is None:
randrange = _randrange(seed)
while True:
a = randrange(N)
if _legendre(a, p) == _legendre(a, q) == -1:
break
else:
if _legendre(a, p) != -1 or _legendre(a, q) != -1:
return False
return (a, N)
def encipher_gm(i, key, seed=None):
"""
Encrypt integer 'i' using public_key 'key'
Note that gm uses random encryption.
Parameters
==========
i : int
The message to encrypt.
key : (a, N)
The public key.
Returns
=======
list : list of int
The randomized encrypted message.
"""
if i < 0:
raise ValueError(
"message must be a non-negative "
"integer: got %d instead" % i)
a, N = key
bits = []
while i > 0:
bits.append(i % 2)
i //= 2
gen = _random_coprime_stream(N, seed)
rev = reversed(bits)
encode = lambda b: next(gen)**2*pow(a, b) % N
return [ encode(b) for b in rev ]
def decipher_gm(message, key):
"""
Decrypt message 'message' using public_key 'key'.
Parameters
==========
message : list of int
The randomized encrypted message.
key : (p, q)
The private key.
Returns
=======
int
The encrypted message.
"""
p, q = key
res = lambda m, p: _legendre(m, p) > 0
bits = [res(m, p) * res(m, q) for m in message]
m = 0
for b in bits:
m <<= 1
m += not b
return m
########### RailFence Cipher #############
def encipher_railfence(message,rails):
"""
Performs Railfence Encryption on plaintext and returns ciphertext
Examples
========
>>> from sympy.crypto.crypto import encipher_railfence
>>> message = "hello world"
>>> encipher_railfence(message,3)
'horel ollwd'
Parameters
==========
message : string, the message to encrypt.
rails : int, the number of rails.
Returns
=======
The Encrypted string message.
References
==========
.. [1] https://en.wikipedia.org/wiki/Rail_fence_cipher
"""
r = list(range(rails))
p = cycle(r + r[-2:0:-1])
return ''.join(sorted(message, key=lambda i: next(p)))
def decipher_railfence(ciphertext,rails):
"""
Decrypt the message using the given rails
Examples
========
>>> from sympy.crypto.crypto import decipher_railfence
>>> decipher_railfence("horel ollwd",3)
'hello world'
Parameters
==========
message : string, the message to encrypt.
rails : int, the number of rails.
Returns
=======
The Decrypted string message.
"""
r = list(range(rails))
p = cycle(r + r[-2:0:-1])
idx = sorted(range(len(ciphertext)), key=lambda i: next(p))
res = [''] * len(ciphertext)
for i, c in zip(idx, ciphertext):
res[i] = c
return ''.join(res)
################ Blum-Goldwasser cryptosystem #########################
def bg_private_key(p, q):
"""
Check if p and q can be used as private keys for
the Blum-Goldwasser cryptosystem.
Explanation
===========
The three necessary checks for p and q to pass
so that they can be used as private keys:
1. p and q must both be prime
2. p and q must be distinct
3. p and q must be congruent to 3 mod 4
Parameters
==========
p, q
The keys to be checked.
Returns
=======
p, q
Input values.
Raises
======
ValueError
If p and q do not pass the above conditions.
"""
if not isprime(p) or not isprime(q):
raise ValueError("the two arguments must be prime, "
"got %i and %i" %(p, q))
elif p == q:
raise ValueError("the two arguments must be distinct, "
"got two copies of %i. " %p)
elif (p - 3) % 4 != 0 or (q - 3) % 4 != 0:
raise ValueError("the two arguments must be congruent to 3 mod 4, "
"got %i and %i" %(p, q))
return p, q
def bg_public_key(p, q):
"""
Calculates public keys from private keys.
Explanation
===========
The function first checks the validity of
private keys passed as arguments and
then returns their product.
Parameters
==========
p, q
The private keys.
Returns
=======
N
The public key.
"""
p, q = bg_private_key(p, q)
N = p * q
return N
def encipher_bg(i, key, seed=None):
"""
Encrypts the message using public key and seed.
Explanation
===========
ALGORITHM:
1. Encodes i as a string of L bits, m.
2. Select a random element r, where 1 < r < key, and computes
x = r^2 mod key.
3. Use BBS pseudo-random number generator to generate L random bits, b,
using the initial seed as x.
4. Encrypted message, c_i = m_i XOR b_i, 1 <= i <= L.
5. x_L = x^(2^L) mod key.
6. Return (c, x_L)
Parameters
==========
i
Message, a non-negative integer
key
The public key
Returns
=======
Tuple
(encrypted_message, x_L)
Raises
======
ValueError
If i is negative.
"""
if i < 0:
raise ValueError(
"message must be a non-negative "
"integer: got %d instead" % i)
enc_msg = []
while i > 0:
enc_msg.append(i % 2)
i //= 2
enc_msg.reverse()
L = len(enc_msg)
r = _randint(seed)(2, key - 1)
x = r**2 % key
x_L = pow(int(x), int(2**L), int(key))
rand_bits = []
for _ in range(L):
rand_bits.append(x % 2)
x = x**2 % key
encrypt_msg = [m ^ b for (m, b) in zip(enc_msg, rand_bits)]
return (encrypt_msg, x_L)
def decipher_bg(message, key):
"""
Decrypts the message using private keys.
Explanation
===========
ALGORITHM:
1. Let, c be the encrypted message, y the second number received,
and p and q be the private keys.
2. Compute, r_p = y^((p+1)/4 ^ L) mod p and
r_q = y^((q+1)/4 ^ L) mod q.
3. Compute x_0 = (q(q^-1 mod p)r_p + p(p^-1 mod q)r_q) mod N.
4. From, recompute the bits using the BBS generator, as in the
encryption algorithm.
5. Compute original message by XORing c and b.
Parameters
==========
message
Tuple of encrypted message and a non-negative integer.
key
Tuple of private keys.
Returns
=======
orig_msg
The original message
"""
p, q = key
encrypt_msg, y = message
public_key = p * q
L = len(encrypt_msg)
p_t = ((p + 1)/4)**L
q_t = ((q + 1)/4)**L
r_p = pow(int(y), int(p_t), int(p))
r_q = pow(int(y), int(q_t), int(q))
x = (q * mod_inverse(q, p) * r_p + p * mod_inverse(p, q) * r_q) % public_key
orig_bits = []
for _ in range(L):
orig_bits.append(x % 2)
x = x**2 % public_key
orig_msg = 0
for (m, b) in zip(encrypt_msg, orig_bits):
orig_msg = orig_msg * 2
orig_msg += (m ^ b)
return orig_msg
|
995dbc94b5fcf1e2a06ce72aa655d2eeb83c6caff96e7299dfaf4e7e8a024e22 | from sympy.core.add import Add
from sympy.core.compatibility import ordered
from sympy.core.function import expand_log
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.functions.elementary.exponential import (LambertW, exp, log)
from sympy.functions.elementary.miscellaneous import root
from sympy.polys.polyroots import roots
from sympy.polys.polytools import Poly, factor
from sympy.core.function import _mexpand
from sympy.simplify.simplify import separatevars
from sympy.simplify.radsimp import collect
from sympy.simplify.simplify import powsimp
from sympy.solvers.solvers import solve, _invert
from sympy.utilities.iterables import uniq
def _filtered_gens(poly, symbol):
"""process the generators of ``poly``, returning the set of generators that
have ``symbol``. If there are two generators that are inverses of each other,
prefer the one that has no denominator.
Examples
========
>>> from sympy.solvers.bivariate import _filtered_gens
>>> from sympy import Poly, exp
>>> from sympy.abc import x
>>> _filtered_gens(Poly(x + 1/x + exp(x)), x)
{x, exp(x)}
"""
gens = {g for g in poly.gens if symbol in g.free_symbols}
for g in list(gens):
ag = 1/g
if g in gens and ag in gens:
if ag.as_numer_denom()[1] is not S.One:
g = ag
gens.remove(g)
return gens
def _mostfunc(lhs, func, X=None):
"""Returns the term in lhs which contains the most of the
func-type things e.g. log(log(x)) wins over log(x) if both terms appear.
``func`` can be a function (exp, log, etc...) or any other SymPy object,
like Pow.
If ``X`` is not ``None``, then the function returns the term composed with the
most ``func`` having the specified variable.
Examples
========
>>> from sympy.solvers.bivariate import _mostfunc
>>> from sympy.functions.elementary.exponential import exp
>>> from sympy.abc import x, y
>>> _mostfunc(exp(x) + exp(exp(x) + 2), exp)
exp(exp(x) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp)
exp(exp(y) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp, x)
exp(x)
>>> _mostfunc(x, exp, x) is None
True
>>> _mostfunc(exp(x) + exp(x*y), exp, x)
exp(x)
"""
fterms = [tmp for tmp in lhs.atoms(func) if (not X or
X.is_Symbol and X in tmp.free_symbols or
not X.is_Symbol and tmp.has(X))]
if len(fterms) == 1:
return fterms[0]
elif fterms:
return max(list(ordered(fterms)), key=lambda x: x.count(func))
return None
def _linab(arg, symbol):
"""Return ``a, b, X`` assuming ``arg`` can be written as ``a*X + b``
where ``X`` is a symbol-dependent factor and ``a`` and ``b`` are
independent of ``symbol``.
Examples
========
>>> from sympy.functions.elementary.exponential import exp
>>> from sympy.solvers.bivariate import _linab
>>> from sympy.abc import x, y
>>> from sympy import S
>>> _linab(S(2), x)
(2, 0, 1)
>>> _linab(2*x, x)
(2, 0, x)
>>> _linab(y + y*x + 2*x, x)
(y + 2, y, x)
>>> _linab(3 + 2*exp(x), x)
(2, 3, exp(x))
"""
from sympy.core.exprtools import factor_terms
arg = factor_terms(arg.expand())
ind, dep = arg.as_independent(symbol)
if arg.is_Mul and dep.is_Add:
a, b, x = _linab(dep, symbol)
return ind*a, ind*b, x
if not arg.is_Add:
b = 0
a, x = ind, dep
else:
b = ind
a, x = separatevars(dep).as_independent(symbol, as_Add=False)
if x.could_extract_minus_sign():
a = -a
x = -x
return a, b, x
def _lambert(eq, x):
"""
Given an expression assumed to be in the form
``F(X, a..f) = a*log(b*X + c) + d*X + f = 0``
where X = g(x) and x = g^-1(X), return the Lambert solution,
``x = g^-1(-c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(-f/a)))``.
"""
eq = _mexpand(expand_log(eq))
mainlog = _mostfunc(eq, log, x)
if not mainlog:
return [] # violated assumptions
other = eq.subs(mainlog, 0)
if isinstance(-other, log):
eq = (eq - other).subs(mainlog, mainlog.args[0])
mainlog = mainlog.args[0]
if not isinstance(mainlog, log):
return [] # violated assumptions
other = -(-other).args[0]
eq += other
if not x in other.free_symbols:
return [] # violated assumptions
d, f, X2 = _linab(other, x)
logterm = collect(eq - other, mainlog)
a = logterm.as_coefficient(mainlog)
if a is None or x in a.free_symbols:
return [] # violated assumptions
logarg = mainlog.args[0]
b, c, X1 = _linab(logarg, x)
if X1 != X2:
return [] # violated assumptions
# invert the generator X1 so we have x(u)
u = Dummy('rhs')
xusolns = solve(X1 - u, x)
# There are infinitely many branches for LambertW
# but only branches for k = -1 and 0 might be real. The k = 0
# branch is real and the k = -1 branch is real if the LambertW argumen
# in in range [-1/e, 0]. Since `solve` does not return infinite
# solutions we will only include the -1 branch if it tests as real.
# Otherwise, inclusion of any LambertW in the solution indicates to
# the user that there are imaginary solutions corresponding to
# different k values.
lambert_real_branches = [-1, 0]
sol = []
# solution of the given Lambert equation is like
# sol = -c/b + (a/d)*LambertW(arg, k),
# where arg = d/(a*b)*exp((c*d-b*f)/a/b) and k in lambert_real_branches.
# Instead of considering the single arg, `d/(a*b)*exp((c*d-b*f)/a/b)`,
# the individual `p` roots obtained when writing `exp((c*d-b*f)/a/b)`
# as `exp(A/p) = exp(A)**(1/p)`, where `p` is an Integer, are used.
# calculating args for LambertW
num, den = ((c*d-b*f)/a/b).as_numer_denom()
p, den = den.as_coeff_Mul()
e = exp(num/den)
t = Dummy('t')
args = [d/(a*b)*t for t in roots(t**p - e, t).keys()]
# calculating solutions from args
for arg in args:
for k in lambert_real_branches:
w = LambertW(arg, k)
if k and not w.is_real:
continue
rhs = -c/b + (a/d)*w
for xu in xusolns:
sol.append(xu.subs(u, rhs))
return sol
def _solve_lambert(f, symbol, gens):
"""Return solution to ``f`` if it is a Lambert-type expression
else raise NotImplementedError.
For ``f(X, a..f) = a*log(b*X + c) + d*X - f = 0`` the solution
for ``X`` is ``X = -c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(f/a))``.
There are a variety of forms for `f(X, a..f)` as enumerated below:
1a1)
if B**B = R for R not in [0, 1] (since those cases would already
be solved before getting here) then log of both sides gives
log(B) + log(log(B)) = log(log(R)) and
X = log(B), a = 1, b = 1, c = 0, d = 1, f = log(log(R))
1a2)
if B*(b*log(B) + c)**a = R then log of both sides gives
log(B) + a*log(b*log(B) + c) = log(R) and
X = log(B), d=1, f=log(R)
1b)
if a*log(b*B + c) + d*B = R and
X = B, f = R
2a)
if (b*B + c)*exp(d*B + g) = R then log of both sides gives
log(b*B + c) + d*B + g = log(R) and
X = B, a = 1, f = log(R) - g
2b)
if g*exp(d*B + h) - b*B = c then the log form is
log(g) + d*B + h - log(b*B + c) = 0 and
X = B, a = -1, f = -h - log(g)
3)
if d*p**(a*B + g) - b*B = c then the log form is
log(d) + (a*B + g)*log(p) - log(b*B + c) = 0 and
X = B, a = -1, d = a*log(p), f = -log(d) - g*log(p)
"""
def _solve_even_degree_expr(expr, t, symbol):
"""Return the unique solutions of equations derived from
``expr`` by replacing ``t`` with ``+/- symbol``.
Parameters
==========
expr : Expr
The expression which includes a dummy variable t to be
replaced with +symbol and -symbol.
symbol : Symbol
The symbol for which a solution is being sought.
Returns
=======
List of unique solution of the two equations generated by
replacing ``t`` with positive and negative ``symbol``.
Notes
=====
If ``expr = 2*log(t) + x/2` then solutions for
``2*log(x) + x/2 = 0`` and ``2*log(-x) + x/2 = 0`` are
returned by this function. Though this may seem
counter-intuitive, one must note that the ``expr`` being
solved here has been derived from a different expression. For
an expression like ``eq = x**2*g(x) = 1``, if we take the
log of both sides we obtain ``log(x**2) + log(g(x)) = 0``. If
x is positive then this simplifies to
``2*log(x) + log(g(x)) = 0``; the Lambert-solving routines will
return solutions for this, but we must also consider the
solutions for ``2*log(-x) + log(g(x))`` since those must also
be a solution of ``eq`` which has the same value when the ``x``
in ``x**2`` is negated. If `g(x)` does not have even powers of
symbol then we don't want to replace the ``x`` there with
``-x``. So the role of the ``t`` in the expression received by
this function is to mark where ``+/-x`` should be inserted
before obtaining the Lambert solutions.
"""
nlhs, plhs = [
expr.xreplace({t: sgn*symbol}) for sgn in (-1, 1)]
sols = _solve_lambert(nlhs, symbol, gens)
if plhs != nlhs:
sols.extend(_solve_lambert(plhs, symbol, gens))
# uniq is needed for a case like
# 2*log(t) - log(-z**2) + log(z + log(x) + log(z))
# where subtituting t with +/-x gives all the same solution;
# uniq, rather than list(set()), is used to maintain canonical
# order
return list(uniq(sols))
nrhs, lhs = f.as_independent(symbol, as_Add=True)
rhs = -nrhs
lamcheck = [tmp for tmp in gens
if (tmp.func in [exp, log] or
(tmp.is_Pow and symbol in tmp.exp.free_symbols))]
if not lamcheck:
raise NotImplementedError()
if lhs.is_Add or lhs.is_Mul:
# replacing all even_degrees of symbol with dummy variable t
# since these will need special handling; non-Add/Mul do not
# need this handling
t = Dummy('t', **symbol.assumptions0)
lhs = lhs.replace(
lambda i: # find symbol**even
i.is_Pow and i.base == symbol and i.exp.is_even,
lambda i: # replace t**even
t**i.exp)
if lhs.is_Add and lhs.has(t):
t_indep = lhs.subs(t, 0)
t_term = lhs - t_indep
_rhs = rhs - t_indep
if not t_term.is_Add and _rhs and not (
t_term.has(S.ComplexInfinity, S.NaN)):
eq = expand_log(log(t_term) - log(_rhs))
return _solve_even_degree_expr(eq, t, symbol)
elif lhs.is_Mul and rhs:
# this needs to happen whether t is present or not
lhs = expand_log(log(lhs), force=True)
rhs = log(rhs)
if lhs.has(t) and lhs.is_Add:
# it expanded from Mul to Add
eq = lhs - rhs
return _solve_even_degree_expr(eq, t, symbol)
# restore symbol in lhs
lhs = lhs.xreplace({t: symbol})
lhs = powsimp(factor(lhs, deep=True))
# make sure we have inverted as completely as possible
r = Dummy()
i, lhs = _invert(lhs - r, symbol)
rhs = i.xreplace({r: rhs})
# For the first forms:
#
# 1a1) B**B = R will arrive here as B*log(B) = log(R)
# lhs is Mul so take log of both sides:
# log(B) + log(log(B)) = log(log(R))
# 1a2) B*(b*log(B) + c)**a = R will arrive unchanged so
# lhs is Mul, so take log of both sides:
# log(B) + a*log(b*log(B) + c) = log(R)
# 1b) d*log(a*B + b) + c*B = R will arrive unchanged so
# lhs is Add, so isolate c*B and expand log of both sides:
# log(c) + log(B) = log(R - d*log(a*B + b))
soln = []
if not soln:
mainlog = _mostfunc(lhs, log, symbol)
if mainlog:
if lhs.is_Mul and rhs != 0:
soln = _lambert(log(lhs) - log(rhs), symbol)
elif lhs.is_Add:
other = lhs.subs(mainlog, 0)
if other and not other.is_Add and [
tmp for tmp in other.atoms(Pow)
if symbol in tmp.free_symbols]:
if not rhs:
diff = log(other) - log(other - lhs)
else:
diff = log(lhs - other) - log(rhs - other)
soln = _lambert(expand_log(diff), symbol)
else:
#it's ready to go
soln = _lambert(lhs - rhs, symbol)
# For the next forms,
#
# collect on main exp
# 2a) (b*B + c)*exp(d*B + g) = R
# lhs is mul, so take log of both sides:
# log(b*B + c) + d*B = log(R) - g
# 2b) g*exp(d*B + h) - b*B = R
# lhs is add, so add b*B to both sides,
# take the log of both sides and rearrange to give
# log(R + b*B) - d*B = log(g) + h
if not soln:
mainexp = _mostfunc(lhs, exp, symbol)
if mainexp:
lhs = collect(lhs, mainexp)
if lhs.is_Mul and rhs != 0:
soln = _lambert(expand_log(log(lhs) - log(rhs)), symbol)
elif lhs.is_Add:
# move all but mainexp-containing term to rhs
other = lhs.subs(mainexp, 0)
mainterm = lhs - other
rhs = rhs - other
if (mainterm.could_extract_minus_sign() and
rhs.could_extract_minus_sign()):
mainterm *= -1
rhs *= -1
diff = log(mainterm) - log(rhs)
soln = _lambert(expand_log(diff), symbol)
# For the last form:
#
# 3) d*p**(a*B + g) - b*B = c
# collect on main pow, add b*B to both sides,
# take log of both sides and rearrange to give
# a*B*log(p) - log(b*B + c) = -log(d) - g*log(p)
if not soln:
mainpow = _mostfunc(lhs, Pow, symbol)
if mainpow and symbol in mainpow.exp.free_symbols:
lhs = collect(lhs, mainpow)
if lhs.is_Mul and rhs != 0:
# b*B = 0
soln = _lambert(expand_log(log(lhs) - log(rhs)), symbol)
elif lhs.is_Add:
# move all but mainpow-containing term to rhs
other = lhs.subs(mainpow, 0)
mainterm = lhs - other
rhs = rhs - other
diff = log(mainterm) - log(rhs)
soln = _lambert(expand_log(diff), symbol)
if not soln:
raise NotImplementedError('%s does not appear to have a solution in '
'terms of LambertW' % f)
return list(ordered(soln))
def bivariate_type(f, x, y, *, first=True):
"""Given an expression, f, 3 tests will be done to see what type
of composite bivariate it might be, options for u(x, y) are::
x*y
x+y
x*y+x
x*y+y
If it matches one of these types, ``u(x, y)``, ``P(u)`` and dummy
variable ``u`` will be returned. Solving ``P(u)`` for ``u`` and
equating the solutions to ``u(x, y)`` and then solving for ``x`` or
``y`` is equivalent to solving the original expression for ``x`` or
``y``. If ``x`` and ``y`` represent two functions in the same
variable, e.g. ``x = g(t)`` and ``y = h(t)``, then if ``u(x, y) - p``
can be solved for ``t`` then these represent the solutions to
``P(u) = 0`` when ``p`` are the solutions of ``P(u) = 0``.
Only positive values of ``u`` are considered.
Examples
========
>>> from sympy.solvers.solvers import solve
>>> from sympy.solvers.bivariate import bivariate_type
>>> from sympy.abc import x, y
>>> eq = (x**2 - 3).subs(x, x + y)
>>> bivariate_type(eq, x, y)
(x + y, _u**2 - 3, _u)
>>> uxy, pu, u = _
>>> usol = solve(pu, u); usol
[sqrt(3)]
>>> [solve(uxy - s) for s in solve(pu, u)]
[[{x: -y + sqrt(3)}]]
>>> all(eq.subs(s).equals(0) for sol in _ for s in sol)
True
"""
u = Dummy('u', positive=True)
if first:
p = Poly(f, x, y)
f = p.as_expr()
_x = Dummy()
_y = Dummy()
rv = bivariate_type(Poly(f.subs({x: _x, y: _y}), _x, _y), _x, _y, first=False)
if rv:
reps = {_x: x, _y: y}
return rv[0].xreplace(reps), rv[1].xreplace(reps), rv[2]
return
p = f
f = p.as_expr()
# f(x*y)
args = Add.make_args(p.as_expr())
new = []
for a in args:
a = _mexpand(a.subs(x, u/y))
free = a.free_symbols
if x in free or y in free:
break
new.append(a)
else:
return x*y, Add(*new), u
def ok(f, v, c):
new = _mexpand(f.subs(v, c))
free = new.free_symbols
return None if (x in free or y in free) else new
# f(a*x + b*y)
new = []
d = p.degree(x)
if p.degree(y) == d:
a = root(p.coeff_monomial(x**d), d)
b = root(p.coeff_monomial(y**d), d)
new = ok(f, x, (u - b*y)/a)
if new is not None:
return a*x + b*y, new, u
# f(a*x*y + b*y)
new = []
d = p.degree(x)
if p.degree(y) == d:
for itry in range(2):
a = root(p.coeff_monomial(x**d*y**d), d)
b = root(p.coeff_monomial(y**d), d)
new = ok(f, x, (u - b*y)/a/y)
if new is not None:
return a*x*y + b*y, new, u
x, y = y, x
|
6979c6ce6a257f2ae9f98793ac839436534e4ab9bddc03a0420acae110359871 | r"""
This module is intended for solving recurrences or, in other words,
difference equations. Currently supported are linear, inhomogeneous
equations with polynomial or rational coefficients.
The solutions are obtained among polynomials, rational functions,
hypergeometric terms, or combinations of hypergeometric term which
are pairwise dissimilar.
``rsolve_X`` functions were meant as a low level interface
for ``rsolve`` which would use Mathematica's syntax.
Given a recurrence relation:
.. math:: a_{k}(n) y(n+k) + a_{k-1}(n) y(n+k-1) +
... + a_{0}(n) y(n) = f(n)
where `k > 0` and `a_{i}(n)` are polynomials in `n`. To use
``rsolve_X`` we need to put all coefficients in to a list ``L`` of
`k+1` elements the following way:
``L = [a_{0}(n), ..., a_{k-1}(n), a_{k}(n)]``
where ``L[i]``, for `i=0, \ldots, k`, maps to
`a_{i}(n) y(n+i)` (`y(n+i)` is implicit).
For example if we would like to compute `m`-th Bernoulli polynomial
up to a constant (example was taken from rsolve_poly docstring),
then we would use `b(n+1) - b(n) = m n^{m-1}` recurrence, which
has solution `b(n) = B_m + C`.
Then ``L = [-1, 1]`` and `f(n) = m n^(m-1)` and finally for `m=4`:
>>> from sympy import Symbol, bernoulli, rsolve_poly
>>> n = Symbol('n', integer=True)
>>> rsolve_poly([-1, 1], 4*n**3, n)
C0 + n**4 - 2*n**3 + n**2
>>> bernoulli(4, n)
n**4 - 2*n**3 + n**2 - 1/30
For the sake of completeness, `f(n)` can be:
[1] a polynomial -> rsolve_poly
[2] a rational function -> rsolve_ratio
[3] a hypergeometric function -> rsolve_hyper
"""
from collections import defaultdict
from sympy.core.singleton import S
from sympy.core.numbers import Rational, I
from sympy.core.symbol import Symbol, Wild, Dummy
from sympy.core.relational import Equality
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core import sympify
from sympy.simplify import simplify, hypersimp, hypersimilar # type: ignore
from sympy.solvers import solve, solve_undetermined_coeffs
from sympy.polys import Poly, quo, gcd, lcm, roots, resultant
from sympy.functions import binomial, factorial, FallingFactorial, RisingFactorial
from sympy.matrices import Matrix, casoratian
from sympy.concrete import product
from sympy.core.compatibility import default_sort_key
from sympy.utilities.iterables import numbered_symbols
def rsolve_poly(coeffs, f, n, **hints):
r"""
Given linear recurrence operator `\operatorname{L}` of order
`k` with polynomial coefficients and inhomogeneous equation
`\operatorname{L} y = f`, where `f` is a polynomial, we seek for
all polynomial solutions over field `K` of characteristic zero.
The algorithm performs two basic steps:
(1) Compute degree `N` of the general polynomial solution.
(2) Find all polynomials of degree `N` or less
of `\operatorname{L} y = f`.
There are two methods for computing the polynomial solutions.
If the degree bound is relatively small, i.e. it's smaller than
or equal to the order of the recurrence, then naive method of
undetermined coefficients is being used. This gives system
of algebraic equations with `N+1` unknowns.
In the other case, the algorithm performs transformation of the
initial equation to an equivalent one, for which the system of
algebraic equations has only `r` indeterminates. This method is
quite sophisticated (in comparison with the naive one) and was
invented together by Abramov, Bronstein and Petkovsek.
It is possible to generalize the algorithm implemented here to
the case of linear q-difference and differential equations.
Lets say that we would like to compute `m`-th Bernoulli polynomial
up to a constant. For this we can use `b(n+1) - b(n) = m n^{m-1}`
recurrence, which has solution `b(n) = B_m + C`. For example:
>>> from sympy import Symbol, rsolve_poly
>>> n = Symbol('n', integer=True)
>>> rsolve_poly([-1, 1], 4*n**3, n)
C0 + n**4 - 2*n**3 + n**2
References
==========
.. [1] S. A. Abramov, M. Bronstein and M. Petkovsek, On polynomial
solutions of linear operator equations, in: T. Levelt, ed.,
Proc. ISSAC '95, ACM Press, New York, 1995, 290-296.
.. [2] M. Petkovsek, Hypergeometric solutions of linear recurrences
with polynomial coefficients, J. Symbolic Computation,
14 (1992), 243-264.
.. [3] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996.
"""
f = sympify(f)
if not f.is_polynomial(n):
return None
homogeneous = f.is_zero
r = len(coeffs) - 1
coeffs = [Poly(coeff, n) for coeff in coeffs]
polys = [Poly(0, n)]*(r + 1)
terms = [(S.Zero, S.NegativeInfinity)]*(r + 1)
for i in range(r + 1):
for j in range(i, r + 1):
polys[i] += coeffs[j]*(binomial(j, i).as_poly(n))
if not polys[i].is_zero:
(exp,), coeff = polys[i].LT()
terms[i] = (coeff, exp)
d = b = terms[0][1]
for i in range(1, r + 1):
if terms[i][1] > d:
d = terms[i][1]
if terms[i][1] - i > b:
b = terms[i][1] - i
d, b = int(d), int(b)
x = Dummy('x')
degree_poly = S.Zero
for i in range(r + 1):
if terms[i][1] - i == b:
degree_poly += terms[i][0]*FallingFactorial(x, i)
nni_roots = list(roots(degree_poly, x, filter='Z',
predicate=lambda r: r >= 0).keys())
if nni_roots:
N = [max(nni_roots)]
else:
N = []
if homogeneous:
N += [-b - 1]
else:
N += [f.as_poly(n).degree() - b, -b - 1]
N = int(max(N))
if N < 0:
if homogeneous:
if hints.get('symbols', False):
return (S.Zero, [])
else:
return S.Zero
else:
return None
if N <= r:
C = []
y = E = S.Zero
for i in range(N + 1):
C.append(Symbol('C' + str(i)))
y += C[i] * n**i
for i in range(r + 1):
E += coeffs[i].as_expr()*y.subs(n, n + i)
solutions = solve_undetermined_coeffs(E - f, C, n)
if solutions is not None:
C = [c for c in C if (c not in solutions)]
result = y.subs(solutions)
else:
return None # TBD
else:
A = r
U = N + A + b + 1
nni_roots = list(roots(polys[r], filter='Z',
predicate=lambda r: r >= 0).keys())
if nni_roots != []:
a = max(nni_roots) + 1
else:
a = S.Zero
def _zero_vector(k):
return [S.Zero] * k
def _one_vector(k):
return [S.One] * k
def _delta(p, k):
B = S.One
D = p.subs(n, a + k)
for i in range(1, k + 1):
B *= Rational(i - k - 1, i)
D += B * p.subs(n, a + k - i)
return D
alpha = {}
for i in range(-A, d + 1):
I = _one_vector(d + 1)
for k in range(1, d + 1):
I[k] = I[k - 1] * (x + i - k + 1)/k
alpha[i] = S.Zero
for j in range(A + 1):
for k in range(d + 1):
B = binomial(k, i + j)
D = _delta(polys[j].as_expr(), k)
alpha[i] += I[k]*B*D
V = Matrix(U, A, lambda i, j: int(i == j))
if homogeneous:
for i in range(A, U):
v = _zero_vector(A)
for k in range(1, A + b + 1):
if i - k < 0:
break
B = alpha[k - A].subs(x, i - k)
for j in range(A):
v[j] += B * V[i - k, j]
denom = alpha[-A].subs(x, i)
for j in range(A):
V[i, j] = -v[j] / denom
else:
G = _zero_vector(U)
for i in range(A, U):
v = _zero_vector(A)
g = S.Zero
for k in range(1, A + b + 1):
if i - k < 0:
break
B = alpha[k - A].subs(x, i - k)
for j in range(A):
v[j] += B * V[i - k, j]
g += B * G[i - k]
denom = alpha[-A].subs(x, i)
for j in range(A):
V[i, j] = -v[j] / denom
G[i] = (_delta(f, i - A) - g) / denom
P, Q = _one_vector(U), _zero_vector(A)
for i in range(1, U):
P[i] = (P[i - 1] * (n - a - i + 1)/i).expand()
for i in range(A):
Q[i] = Add(*[(v*p).expand() for v, p in zip(V[:, i], P)])
if not homogeneous:
h = Add(*[(g*p).expand() for g, p in zip(G, P)])
C = [Symbol('C' + str(i)) for i in range(A)]
g = lambda i: Add(*[c*_delta(q, i) for c, q in zip(C, Q)])
if homogeneous:
E = [g(i) for i in range(N + 1, U)]
else:
E = [g(i) + _delta(h, i) for i in range(N + 1, U)]
if E != []:
solutions = solve(E, *C)
if not solutions:
if homogeneous:
if hints.get('symbols', False):
return (S.Zero, [])
else:
return S.Zero
else:
return None
else:
solutions = {}
if homogeneous:
result = S.Zero
else:
result = h
for c, q in list(zip(C, Q)):
if c in solutions:
s = solutions[c]*q
C.remove(c)
else:
s = c*q
result += s.expand()
if hints.get('symbols', False):
return (result, C)
else:
return result
def rsolve_ratio(coeffs, f, n, **hints):
r"""
Given linear recurrence operator `\operatorname{L}` of order `k`
with polynomial coefficients and inhomogeneous equation
`\operatorname{L} y = f`, where `f` is a polynomial, we seek
for all rational solutions over field `K` of characteristic zero.
This procedure accepts only polynomials, however if you are
interested in solving recurrence with rational coefficients
then use ``rsolve`` which will pre-process the given equation
and run this procedure with polynomial arguments.
The algorithm performs two basic steps:
(1) Compute polynomial `v(n)` which can be used as universal
denominator of any rational solution of equation
`\operatorname{L} y = f`.
(2) Construct new linear difference equation by substitution
`y(n) = u(n)/v(n)` and solve it for `u(n)` finding all its
polynomial solutions. Return ``None`` if none were found.
Algorithm implemented here is a revised version of the original
Abramov's algorithm, developed in 1989. The new approach is much
simpler to implement and has better overall efficiency. This
method can be easily adapted to q-difference equations case.
Besides finding rational solutions alone, this functions is
an important part of Hyper algorithm were it is used to find
particular solution of inhomogeneous part of a recurrence.
Examples
========
>>> from sympy.abc import x
>>> from sympy.solvers.recurr import rsolve_ratio
>>> rsolve_ratio([-2*x**3 + x**2 + 2*x - 1, 2*x**3 + x**2 - 6*x,
... - 2*x**3 - 11*x**2 - 18*x - 9, 2*x**3 + 13*x**2 + 22*x + 8], 0, x)
C2*(2*x - 3)/(2*(x**2 - 1))
References
==========
.. [1] S. A. Abramov, Rational solutions of linear difference
and q-difference equations with polynomial coefficients,
in: T. Levelt, ed., Proc. ISSAC '95, ACM Press, New York,
1995, 285-289
See Also
========
rsolve_hyper
"""
f = sympify(f)
if not f.is_polynomial(n):
return None
coeffs = list(map(sympify, coeffs))
r = len(coeffs) - 1
A, B = coeffs[r], coeffs[0]
A = A.subs(n, n - r).expand()
h = Dummy('h')
res = resultant(A, B.subs(n, n + h), n)
if not res.is_polynomial(h):
p, q = res.as_numer_denom()
res = quo(p, q, h)
nni_roots = list(roots(res, h, filter='Z',
predicate=lambda r: r >= 0).keys())
if not nni_roots:
return rsolve_poly(coeffs, f, n, **hints)
else:
C, numers = S.One, [S.Zero]*(r + 1)
for i in range(int(max(nni_roots)), -1, -1):
d = gcd(A, B.subs(n, n + i), n)
A = quo(A, d, n)
B = quo(B, d.subs(n, n - i), n)
C *= Mul(*[d.subs(n, n - j) for j in range(i + 1)])
denoms = [C.subs(n, n + i) for i in range(r + 1)]
for i in range(r + 1):
g = gcd(coeffs[i], denoms[i], n)
numers[i] = quo(coeffs[i], g, n)
denoms[i] = quo(denoms[i], g, n)
for i in range(r + 1):
numers[i] *= Mul(*(denoms[:i] + denoms[i + 1:]))
result = rsolve_poly(numers, f * Mul(*denoms), n, **hints)
if result is not None:
if hints.get('symbols', False):
return (simplify(result[0] / C), result[1])
else:
return simplify(result / C)
else:
return None
def rsolve_hyper(coeffs, f, n, **hints):
r"""
Given linear recurrence operator `\operatorname{L}` of order `k`
with polynomial coefficients and inhomogeneous equation
`\operatorname{L} y = f` we seek for all hypergeometric solutions
over field `K` of characteristic zero.
The inhomogeneous part can be either hypergeometric or a sum
of a fixed number of pairwise dissimilar hypergeometric terms.
The algorithm performs three basic steps:
(1) Group together similar hypergeometric terms in the
inhomogeneous part of `\operatorname{L} y = f`, and find
particular solution using Abramov's algorithm.
(2) Compute generating set of `\operatorname{L}` and find basis
in it, so that all solutions are linearly independent.
(3) Form final solution with the number of arbitrary
constants equal to dimension of basis of `\operatorname{L}`.
Term `a(n)` is hypergeometric if it is annihilated by first order
linear difference equations with polynomial coefficients or, in
simpler words, if consecutive term ratio is a rational function.
The output of this procedure is a linear combination of fixed
number of hypergeometric terms. However the underlying method
can generate larger class of solutions - D'Alembertian terms.
Note also that this method not only computes the kernel of the
inhomogeneous equation, but also reduces in to a basis so that
solutions generated by this procedure are linearly independent
Examples
========
>>> from sympy.solvers import rsolve_hyper
>>> from sympy.abc import x
>>> rsolve_hyper([-1, -1, 1], 0, x)
C0*(1/2 - sqrt(5)/2)**x + C1*(1/2 + sqrt(5)/2)**x
>>> rsolve_hyper([-1, 1], 1 + x, x)
C0 + x*(x + 1)/2
References
==========
.. [1] M. Petkovsek, Hypergeometric solutions of linear recurrences
with polynomial coefficients, J. Symbolic Computation,
14 (1992), 243-264.
.. [2] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996.
"""
coeffs = list(map(sympify, coeffs))
f = sympify(f)
r, kernel, symbols = len(coeffs) - 1, [], set()
if not f.is_zero:
if f.is_Add:
similar = {}
for g in f.expand().args:
if not g.is_hypergeometric(n):
return None
for h in similar.keys():
if hypersimilar(g, h, n):
similar[h] += g
break
else:
similar[g] = S.Zero
inhomogeneous = []
for g, h in similar.items():
inhomogeneous.append(g + h)
elif f.is_hypergeometric(n):
inhomogeneous = [f]
else:
return None
for i, g in enumerate(inhomogeneous):
coeff, polys = S.One, coeffs[:]
denoms = [S.One]*(r + 1)
s = hypersimp(g, n)
for j in range(1, r + 1):
coeff *= s.subs(n, n + j - 1)
p, q = coeff.as_numer_denom()
polys[j] *= p
denoms[j] = q
for j in range(r + 1):
polys[j] *= Mul(*(denoms[:j] + denoms[j + 1:]))
R = rsolve_poly(polys, Mul(*denoms), n)
if not (R is None or R is S.Zero):
inhomogeneous[i] *= R
else:
return None
result = Add(*inhomogeneous)
else:
result = S.Zero
Z = Dummy('Z')
p, q = coeffs[0], coeffs[r].subs(n, n - r + 1)
p_factors = [z for z in roots(p, n).keys()]
q_factors = [z for z in roots(q, n).keys()]
factors = [(S.One, S.One)]
for p in p_factors:
for q in q_factors:
if p.is_integer and q.is_integer and p <= q:
continue
else:
factors += [(n - p, n - q)]
p = [(n - p, S.One) for p in p_factors]
q = [(S.One, n - q) for q in q_factors]
factors = p + factors + q
for A, B in factors:
polys, degrees = [], []
D = A*B.subs(n, n + r - 1)
for i in range(r + 1):
a = Mul(*[A.subs(n, n + j) for j in range(i)])
b = Mul(*[B.subs(n, n + j) for j in range(i, r)])
poly = quo(coeffs[i]*a*b, D, n)
polys.append(poly.as_poly(n))
if not poly.is_zero:
degrees.append(polys[i].degree())
if degrees:
d, poly = max(degrees), S.Zero
else:
return None
for i in range(r + 1):
coeff = polys[i].nth(d)
if coeff is not S.Zero:
poly += coeff * Z**i
for z in roots(poly, Z).keys():
if z.is_zero:
continue
(C, s) = rsolve_poly([polys[i].as_expr()*z**i for i in range(r + 1)], 0, n, symbols=True)
if C is not None and C is not S.Zero:
symbols |= set(s)
ratio = z * A * C.subs(n, n + 1) / B / C
ratio = simplify(ratio)
# If there is a nonnegative root in the denominator of the ratio,
# this indicates that the term y(n_root) is zero, and one should
# start the product with the term y(n_root + 1).
n0 = 0
for n_root in roots(ratio.as_numer_denom()[1], n).keys():
if n_root.has(I):
return None
elif (n0 < (n_root + 1)) == True:
n0 = n_root + 1
K = product(ratio, (n, n0, n - 1))
if K.has(factorial, FallingFactorial, RisingFactorial):
K = simplify(K)
if casoratian(kernel + [K], n, zero=False) != 0:
kernel.append(K)
kernel.sort(key=default_sort_key)
sk = list(zip(numbered_symbols('C'), kernel))
if sk:
for C, ker in sk:
result += C * ker
else:
return None
if hints.get('symbols', False):
# XXX: This returns the symbols in a non-deterministic order
symbols |= {s for s, k in sk}
return (result, list(symbols))
else:
return result
def rsolve(f, y, init=None):
r"""
Solve univariate recurrence with rational coefficients.
Given `k`-th order linear recurrence `\operatorname{L} y = f`,
or equivalently:
.. math:: a_{k}(n) y(n+k) + a_{k-1}(n) y(n+k-1) +
\cdots + a_{0}(n) y(n) = f(n)
where `a_{i}(n)`, for `i=0, \ldots, k`, are polynomials or rational
functions in `n`, and `f` is a hypergeometric function or a sum
of a fixed number of pairwise dissimilar hypergeometric terms in
`n`, finds all solutions or returns ``None``, if none were found.
Initial conditions can be given as a dictionary in two forms:
(1) ``{ n_0 : v_0, n_1 : v_1, ..., n_m : v_m}``
(2) ``{y(n_0) : v_0, y(n_1) : v_1, ..., y(n_m) : v_m}``
or as a list ``L`` of values:
``L = [v_0, v_1, ..., v_m]``
where ``L[i] = v_i``, for `i=0, \ldots, m`, maps to `y(n_i)`.
Examples
========
Lets consider the following recurrence:
.. math:: (n - 1) y(n + 2) - (n^2 + 3 n - 2) y(n + 1) +
2 n (n + 1) y(n) = 0
>>> from sympy import Function, rsolve
>>> from sympy.abc import n
>>> y = Function('y')
>>> f = (n - 1)*y(n + 2) - (n**2 + 3*n - 2)*y(n + 1) + 2*n*(n + 1)*y(n)
>>> rsolve(f, y(n))
2**n*C0 + C1*factorial(n)
>>> rsolve(f, y(n), {y(0):0, y(1):3})
3*2**n - 3*factorial(n)
See Also
========
rsolve_poly, rsolve_ratio, rsolve_hyper
"""
if isinstance(f, Equality):
f = f.lhs - f.rhs
n = y.args[0]
k = Wild('k', exclude=(n,))
# Preprocess user input to allow things like
# y(n) + a*(y(n + 1) + y(n - 1))/2
f = f.expand().collect(y.func(Wild('m', integer=True)))
h_part = defaultdict(list)
i_part = []
for g in Add.make_args(f):
coeff, dep = g.as_coeff_mul(y.func)
if not dep:
i_part.append(coeff)
continue
for h in dep:
if h.is_Function and h.func == y.func:
result = h.args[0].match(n + k)
if result is not None:
h_part[int(result[k])].append(coeff)
continue
raise ValueError(
"'%s(%s + k)' expected, got '%s'" % (y.func, n, h))
for k in h_part:
h_part[k] = Add(*h_part[k])
h_part.default_factory = lambda: 0
i_part = Add(*i_part)
for k, coeff in h_part.items():
h_part[k] = simplify(coeff)
common = S.One
if not i_part.is_zero and not i_part.is_hypergeometric(n) and \
not (i_part.is_Add and all(map(lambda x: x.is_hypergeometric(n), i_part.expand().args))):
raise ValueError("The independent term should be a sum of hypergeometric functions, got '%s'" % i_part)
for coeff in h_part.values():
if coeff.is_rational_function(n):
if not coeff.is_polynomial(n):
common = lcm(common, coeff.as_numer_denom()[1], n)
else:
raise ValueError(
"Polynomial or rational function expected, got '%s'" % coeff)
i_numer, i_denom = i_part.as_numer_denom()
if i_denom.is_polynomial(n):
common = lcm(common, i_denom, n)
if common is not S.One:
for k, coeff in h_part.items():
numer, denom = coeff.as_numer_denom()
h_part[k] = numer*quo(common, denom, n)
i_part = i_numer*quo(common, i_denom, n)
K_min = min(h_part.keys())
if K_min < 0:
K = abs(K_min)
H_part = defaultdict(lambda: S.Zero)
i_part = i_part.subs(n, n + K).expand()
common = common.subs(n, n + K).expand()
for k, coeff in h_part.items():
H_part[k + K] = coeff.subs(n, n + K).expand()
else:
H_part = h_part
K_max = max(H_part.keys())
coeffs = [H_part[i] for i in range(K_max + 1)]
result = rsolve_hyper(coeffs, -i_part, n, symbols=True)
if result is None:
return None
solution, symbols = result
if init == {} or init == []:
init = None
if symbols and init is not None:
if isinstance(init, list):
init = {i: init[i] for i in range(len(init))}
equations = []
for k, v in init.items():
try:
i = int(k)
except TypeError:
if k.is_Function and k.func == y.func:
i = int(k.args[0])
else:
raise ValueError("Integer or term expected, got '%s'" % k)
eq = solution.subs(n, i) - v
if eq.has(S.NaN):
eq = solution.limit(n, i) - v
equations.append(eq)
result = solve(equations, *symbols)
if not result:
return None
else:
solution = solution.subs(result)
return solution
|
582902239582005a55a76cf702ba605404b6fb238f1258a08d9bd066c7e8a215 | """
This module contains functions to:
- solve a single equation for a single variable, in any domain either real or complex.
- solve a single transcendental equation for a single variable in any domain either real or complex.
(currently supports solving in real domain only)
- solve a system of linear equations with N variables and M equations.
- solve a system of Non Linear Equations with N variables and M equations
"""
from sympy.core.sympify import sympify
from sympy.core import (S, Pow, Dummy, pi, Expr, Wild, Mul, Equality,
Add)
from sympy.core.containers import Tuple
from sympy.core.numbers import I, Number, Rational, oo
from sympy.core.function import (Lambda, expand_complex, AppliedUndef,
expand_log)
from sympy.core.mod import Mod
from sympy.core.numbers import igcd
from sympy.core.relational import Eq, Ne, Relational
from sympy.core.symbol import Symbol, _uniquely_named_symbol
from sympy.core.sympify import _sympify
from sympy.simplify.simplify import simplify, fraction, trigsimp
from sympy.simplify import powdenest, logcombine
from sympy.functions import (log, Abs, tan, cot, sin, cos, sec, csc, exp,
acos, asin, acsc, asec, arg,
piecewise_fold, Piecewise)
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.functions.elementary.miscellaneous import real_root
from sympy.logic.boolalg import And
from sympy.sets import (FiniteSet, EmptySet, imageset, Interval, Intersection,
Union, ConditionSet, ImageSet, Complement, Contains)
from sympy.sets.sets import Set, ProductSet
from sympy.matrices import Matrix, MatrixBase
from sympy.ntheory import totient
from sympy.ntheory.factor_ import divisors
from sympy.ntheory.residue_ntheory import discrete_log, nthroot_mod
from sympy.polys import (roots, Poly, degree, together, PolynomialError,
RootOf, factor, lcm, gcd)
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.polytools import invert
from sympy.polys.solvers import (sympy_eqs_to_ring, solve_lin_sys,
PolyNonlinearError)
from sympy.solvers.solvers import (checksol, denoms, unrad,
_simple_dens, recast_to_symbols)
from sympy.solvers.polysys import solve_poly_system
from sympy.solvers.inequalities import solve_univariate_inequality
from sympy.utilities import filldedent
from sympy.utilities.iterables import numbered_symbols, has_dups
from sympy.calculus.util import periodicity, continuous_domain
from sympy.core.compatibility import ordered, default_sort_key, is_sequence
from types import GeneratorType
from collections import defaultdict
class NonlinearError(ValueError):
"""Raised when unexpectedly encountering nonlinear equations"""
pass
_rc = Dummy("R", real=True), Dummy("C", complex=True)
def _masked(f, *atoms):
"""Return ``f``, with all objects given by ``atoms`` replaced with
Dummy symbols, ``d``, and the list of replacements, ``(d, e)``,
where ``e`` is an object of type given by ``atoms`` in which
any other instances of atoms have been recursively replaced with
Dummy symbols, too. The tuples are ordered so that if they are
applied in sequence, the origin ``f`` will be restored.
Examples
========
>>> from sympy import cos
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import _masked
>>> f = cos(cos(x) + 1)
>>> f, reps = _masked(cos(1 + cos(x)), cos)
>>> f
_a1
>>> reps
[(_a1, cos(_a0 + 1)), (_a0, cos(x))]
>>> for d, e in reps:
... f = f.xreplace({d: e})
>>> f
cos(cos(x) + 1)
"""
sym = numbered_symbols('a', cls=Dummy, real=True)
mask = []
for a in ordered(f.atoms(*atoms)):
for i in mask:
a = a.replace(*i)
mask.append((a, next(sym)))
for i, (o, n) in enumerate(mask):
f = f.replace(o, n)
mask[i] = (n, o)
mask = list(reversed(mask))
return f, mask
def _invert(f_x, y, x, domain=S.Complexes):
r"""
Reduce the complex valued equation ``f(x) = y`` to a set of equations
``{g(x) = h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is
a simpler function than ``f(x)``. The return value is a tuple ``(g(x),
set_h)``, where ``g(x)`` is a function of ``x`` and ``set_h`` is
the set of function ``{h_1(y), h_2(y), ..., h_n(y)}``.
Here, ``y`` is not necessarily a symbol.
The ``set_h`` contains the functions, along with the information
about the domain in which they are valid, through set
operations. For instance, if ``y = Abs(x) - n`` is inverted
in the real domain, then ``set_h`` is not simply
`{-n, n}` as the nature of `n` is unknown; rather, it is:
`Intersection([0, oo) {n}) U Intersection((-oo, 0], {-n})`
By default, the complex domain is used which means that inverting even
seemingly simple functions like ``exp(x)`` will give very different
results from those obtained in the real domain.
(In the case of ``exp(x)``, the inversion via ``log`` is multi-valued
in the complex domain, having infinitely many branches.)
If you are working with real values only (or you are not sure which
function to use) you should probably set the domain to
``S.Reals`` (or use `invert\_real` which does that automatically).
Examples
========
>>> from sympy.solvers.solveset import invert_complex, invert_real
>>> from sympy.abc import x, y
>>> from sympy import exp
When does exp(x) == y?
>>> invert_complex(exp(x), y, x)
(x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers))
>>> invert_real(exp(x), y, x)
(x, Intersection(FiniteSet(log(y)), Reals))
When does exp(x) == 1?
>>> invert_complex(exp(x), 1, x)
(x, ImageSet(Lambda(_n, 2*_n*I*pi), Integers))
>>> invert_real(exp(x), 1, x)
(x, FiniteSet(0))
See Also
========
invert_real, invert_complex
"""
x = sympify(x)
if not x.is_Symbol:
raise ValueError("x must be a symbol")
f_x = sympify(f_x)
if x not in f_x.free_symbols:
raise ValueError("Inverse of constant function doesn't exist")
y = sympify(y)
if x in y.free_symbols:
raise ValueError("y should be independent of x ")
if domain.is_subset(S.Reals):
x1, s = _invert_real(f_x, FiniteSet(y), x)
else:
x1, s = _invert_complex(f_x, FiniteSet(y), x)
if not isinstance(s, FiniteSet) or x1 != x:
return x1, s
# Avoid adding gratuitous intersections with S.Complexes. Actual
# conditions should be handled by the respective inverters.
if domain is S.Complexes:
return x1, s
else:
return x1, s.intersection(domain)
invert_complex = _invert
def invert_real(f_x, y, x, domain=S.Reals):
"""
Inverts a real-valued function. Same as _invert, but sets
the domain to ``S.Reals`` before inverting.
"""
return _invert(f_x, y, x, domain)
def _invert_real(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol:
return (f, g_ys)
n = Dummy('n', real=True)
if hasattr(f, 'inverse') and not isinstance(f, (
TrigonometricFunction,
HyperbolicFunction,
)):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_real(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys),
symbol)
if isinstance(f, Abs):
return _invert_abs(f.args[0], g_ys, symbol)
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_real(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
return _invert_real(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if not expo_has_sym:
res = imageset(Lambda(n, real_root(n, expo)), g_ys)
if expo.is_rational:
numer, denom = expo.as_numer_denom()
if denom % 2 == 0:
base_positive = solveset(base >= 0, symbol, S.Reals)
res = imageset(Lambda(n, real_root(n, expo)
), g_ys.intersect(
Interval.Ropen(S.Zero, S.Infinity)))
_inv, _set = _invert_real(base, res, symbol)
return (_inv, _set.intersect(base_positive))
elif numer % 2 == 0:
n = Dummy('n')
neg_res = imageset(Lambda(n, -n), res)
return _invert_real(base, res + neg_res, symbol)
else:
return _invert_real(base, res, symbol)
else:
if not base.is_positive:
raise ValueError("x**w where w is irrational is not "
"defined for negative x")
return _invert_real(base, res, symbol)
if not base_has_sym:
rhs = g_ys.args[0]
if base.is_positive:
return _invert_real(expo,
imageset(Lambda(n, log(n, base, evaluate=False)), g_ys), symbol)
elif base.is_negative:
from sympy.core.power import integer_log
s, b = integer_log(rhs, base)
if b:
return _invert_real(expo, FiniteSet(s), symbol)
else:
return _invert_real(expo, S.EmptySet, symbol)
elif base.is_zero:
one = Eq(rhs, 1)
if one == S.true:
# special case: 0**x - 1
return _invert_real(expo, FiniteSet(0), symbol)
elif one == S.false:
return _invert_real(expo, S.EmptySet, symbol)
if isinstance(f, TrigonometricFunction):
if isinstance(g_ys, FiniteSet):
def inv(trig):
if isinstance(f, (sin, csc)):
F = asin if isinstance(f, sin) else acsc
return (lambda a: n*pi + (-1)**n*F(a),)
if isinstance(f, (cos, sec)):
F = acos if isinstance(f, cos) else asec
return (
lambda a: 2*n*pi + F(a),
lambda a: 2*n*pi - F(a),)
if isinstance(f, (tan, cot)):
return (lambda a: n*pi + f.inverse()(a),)
n = Dummy('n', integer=True)
invs = S.EmptySet
for L in inv(f):
invs += Union(*[imageset(Lambda(n, L(g)), S.Integers) for g in g_ys])
return _invert_real(f.args[0], invs, symbol)
return (f, g_ys)
def _invert_complex(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol:
return (f, g_ys)
n = Dummy('n')
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_complex(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
if g in {S.NegativeInfinity, S.ComplexInfinity, S.Infinity}:
return (h, S.EmptySet)
return _invert_complex(h, imageset(Lambda(n, n/g), g_ys), symbol)
if hasattr(f, 'inverse') and \
not isinstance(f, TrigonometricFunction) and \
not isinstance(f, HyperbolicFunction) and \
not isinstance(f, exp):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_complex(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, exp):
if isinstance(g_ys, FiniteSet):
exp_invs = Union(*[imageset(Lambda(n, I*(2*n*pi + arg(g_y)) +
log(Abs(g_y))), S.Integers)
for g_y in g_ys if g_y != 0])
return _invert_complex(f.args[0], exp_invs, symbol)
return (f, g_ys)
def _invert_abs(f, g_ys, symbol):
"""Helper function for inverting absolute value functions.
Returns the complete result of inverting an absolute value
function along with the conditions which must also be satisfied.
If it is certain that all these conditions are met, a `FiniteSet`
of all possible solutions is returned. If any condition cannot be
satisfied, an `EmptySet` is returned. Otherwise, a `ConditionSet`
of the solutions, with all the required conditions specified, is
returned.
"""
if not g_ys.is_FiniteSet:
# this could be used for FiniteSet, but the
# results are more compact if they aren't, e.g.
# ConditionSet(x, Contains(n, Interval(0, oo)), {-n, n}) vs
# Union(Intersection(Interval(0, oo), {n}), Intersection(Interval(-oo, 0), {-n}))
# for the solution of abs(x) - n
pos = Intersection(g_ys, Interval(0, S.Infinity))
parg = _invert_real(f, pos, symbol)
narg = _invert_real(-f, pos, symbol)
if parg[0] != narg[0]:
raise NotImplementedError
return parg[0], Union(narg[1], parg[1])
# check conditions: all these must be true. If any are unknown
# then return them as conditions which must be satisfied
unknown = []
for a in g_ys.args:
ok = a.is_nonnegative if a.is_Number else a.is_positive
if ok is None:
unknown.append(a)
elif not ok:
return symbol, S.EmptySet
if unknown:
conditions = And(*[Contains(i, Interval(0, oo))
for i in unknown])
else:
conditions = True
n = Dummy('n', real=True)
# this is slightly different than above: instead of solving
# +/-f on positive values, here we solve for f on +/- g_ys
g_x, values = _invert_real(f, Union(
imageset(Lambda(n, n), g_ys),
imageset(Lambda(n, -n), g_ys)), symbol)
return g_x, ConditionSet(g_x, conditions, values)
def domain_check(f, symbol, p):
"""Returns False if point p is infinite or any subexpression of f
is infinite or becomes so after replacing symbol with p. If none of
these conditions is met then True will be returned.
Examples
========
>>> from sympy import Mul, oo
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import domain_check
>>> g = 1/(1 + (1/(x + 1))**2)
>>> domain_check(g, x, -1)
False
>>> domain_check(x**2, x, 0)
True
>>> domain_check(1/x, x, oo)
False
* The function relies on the assumption that the original form
of the equation has not been changed by automatic simplification.
>>> domain_check(x/x, x, 0) # x/x is automatically simplified to 1
True
* To deal with automatic evaluations use evaluate=False:
>>> domain_check(Mul(x, 1/x, evaluate=False), x, 0)
False
"""
f, p = sympify(f), sympify(p)
if p.is_infinite:
return False
return _domain_check(f, symbol, p)
def _domain_check(f, symbol, p):
# helper for domain check
if f.is_Atom and f.is_finite:
return True
elif f.subs(symbol, p).is_infinite:
return False
else:
return all([_domain_check(g, symbol, p)
for g in f.args])
def _is_finite_with_finite_vars(f, domain=S.Complexes):
"""
Return True if the given expression is finite. For symbols that
don't assign a value for `complex` and/or `real`, the domain will
be used to assign a value; symbols that don't assign a value
for `finite` will be made finite. All other assumptions are
left unmodified.
"""
def assumptions(s):
A = s.assumptions0
A.setdefault('finite', A.get('finite', True))
if domain.is_subset(S.Reals):
# if this gets set it will make complex=True, too
A.setdefault('real', True)
else:
# don't change 'real' because being complex implies
# nothing about being real
A.setdefault('complex', True)
return A
reps = {s: Dummy(**assumptions(s)) for s in f.free_symbols}
return f.xreplace(reps).is_finite
def _is_function_class_equation(func_class, f, symbol):
""" Tests whether the equation is an equation of the given function class.
The given equation belongs to the given function class if it is
comprised of functions of the function class which are multiplied by
or added to expressions independent of the symbol. In addition, the
arguments of all such functions must be linear in the symbol as well.
Examples
========
>>> from sympy.solvers.solveset import _is_function_class_equation
>>> from sympy import tan, sin, tanh, sinh, exp
>>> from sympy.abc import x
>>> from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
... HyperbolicFunction)
>>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)
True
>>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)
True
>>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)
True
"""
if f.is_Mul or f.is_Add:
return all(_is_function_class_equation(func_class, arg, symbol)
for arg in f.args)
if f.is_Pow:
if not f.exp.has(symbol):
return _is_function_class_equation(func_class, f.base, symbol)
else:
return False
if not f.has(symbol):
return True
if isinstance(f, func_class):
try:
g = Poly(f.args[0], symbol)
return g.degree() <= 1
except PolynomialError:
return False
else:
return False
def _solve_as_rational(f, symbol, domain):
""" solve rational functions"""
f = together(f, deep=True)
g, h = fraction(f)
if not h.has(symbol):
try:
return _solve_as_poly(g, symbol, domain)
except NotImplementedError:
# The polynomial formed from g could end up having
# coefficients in a ring over which finding roots
# isn't implemented yet, e.g. ZZ[a] for some symbol a
return ConditionSet(symbol, Eq(f, 0), domain)
except CoercionFailed:
# contained oo, zoo or nan
return S.EmptySet
else:
valid_solns = _solveset(g, symbol, domain)
invalid_solns = _solveset(h, symbol, domain)
return valid_solns - invalid_solns
class _SolveTrig1Error(Exception):
"""Raised when _solve_trig1 heuristics do not apply"""
def _solve_trig(f, symbol, domain):
"""Function to call other helpers to solve trigonometric equations """
sol = None
try:
sol = _solve_trig1(f, symbol, domain)
except _SolveTrig1Error:
try:
sol = _solve_trig2(f, symbol, domain)
except ValueError:
raise NotImplementedError(filldedent('''
Solution to this kind of trigonometric equations
is yet to be implemented'''))
return sol
def _solve_trig1(f, symbol, domain):
"""Primary solver for trigonometric and hyperbolic equations
Returns either the solution set as a ConditionSet (auto-evaluated to a
union of ImageSets if no variables besides 'symbol' are involved) or
raises _SolveTrig1Error if f == 0 can't be solved.
Notes
=====
Algorithm:
1. Do a change of variable x -> mu*x in arguments to trigonometric and
hyperbolic functions, in order to reduce them to small integers. (This
step is crucial to keep the degrees of the polynomials of step 4 low.)
2. Rewrite trigonometric/hyperbolic functions as exponentials.
3. Proceed to a 2nd change of variable, replacing exp(I*x) or exp(x) by y.
4. Solve the resulting rational equation.
5. Use invert_complex or invert_real to return to the original variable.
6. If the coefficients of 'symbol' were symbolic in nature, add the
necessary consistency conditions in a ConditionSet.
"""
# Prepare change of variable
x = Dummy('x')
if _is_function_class_equation(HyperbolicFunction, f, symbol):
cov = exp(x)
inverter = invert_real if domain.is_subset(S.Reals) else invert_complex
else:
cov = exp(I*x)
inverter = invert_complex
f = trigsimp(f)
f_original = f
trig_functions = f.atoms(TrigonometricFunction, HyperbolicFunction)
trig_arguments = [e.args[0] for e in trig_functions]
# trigsimp may have reduced the equation to an expression
# that is independent of 'symbol' (e.g. cos**2+sin**2)
if not any(a.has(symbol) for a in trig_arguments):
return solveset(f_original, symbol, domain)
denominators = []
numerators = []
for ar in trig_arguments:
try:
poly_ar = Poly(ar, symbol)
except PolynomialError:
raise _SolveTrig1Error("trig argument is not a polynomial")
if poly_ar.degree() > 1: # degree >1 still bad
raise _SolveTrig1Error("degree of variable must not exceed one")
if poly_ar.degree() == 0: # degree 0, don't care
continue
c = poly_ar.all_coeffs()[0] # got the coefficient of 'symbol'
numerators.append(fraction(c)[0])
denominators.append(fraction(c)[1])
mu = lcm(denominators)/gcd(numerators)
f = f.subs(symbol, mu*x)
f = f.rewrite(exp)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(cov, y), h.subs(cov, y)
if g.has(x) or h.has(x):
raise _SolveTrig1Error("change of variable not possible")
solns = solveset_complex(g, y) - solveset_complex(h, y)
if isinstance(solns, ConditionSet):
raise _SolveTrig1Error("polynomial has ConditionSet solution")
if isinstance(solns, FiniteSet):
if any(isinstance(s, RootOf) for s in solns):
raise _SolveTrig1Error("polynomial results in RootOf object")
# revert the change of variable
cov = cov.subs(x, symbol/mu)
result = Union(*[inverter(cov, s, symbol)[1] for s in solns])
# In case of symbolic coefficients, the solution set is only valid
# if numerator and denominator of mu are non-zero.
if mu.has(Symbol):
syms = (mu).atoms(Symbol)
munum, muden = fraction(mu)
condnum = munum.as_independent(*syms, as_Add=False)[1]
condden = muden.as_independent(*syms, as_Add=False)[1]
cond = And(Ne(condnum, 0), Ne(condden, 0))
else:
cond = True
# Actual conditions are returned as part of the ConditionSet. Adding an
# intersection with C would only complicate some solution sets due to
# current limitations of intersection code. (e.g. #19154)
if domain is S.Complexes:
# This is a slight abuse of ConditionSet. Ideally this should
# be some kind of "PiecewiseSet". (See #19507 discussion)
return ConditionSet(symbol, cond, result)
else:
return ConditionSet(symbol, cond, Intersection(result, domain))
elif solns is S.EmptySet:
return S.EmptySet
else:
raise _SolveTrig1Error("polynomial solutions must form FiniteSet")
def _solve_trig2(f, symbol, domain):
"""Secondary helper to solve trigonometric equations,
called when first helper fails """
from sympy import ilcm, expand_trig, degree
f = trigsimp(f)
f_original = f
trig_functions = f.atoms(sin, cos, tan, sec, cot, csc)
trig_arguments = [e.args[0] for e in trig_functions]
denominators = []
numerators = []
# todo: This solver can be extended to hyperbolics if the
# analogous change of variable to tanh (instead of tan)
# is used.
if not trig_functions:
return ConditionSet(symbol, Eq(f_original, 0), domain)
# todo: The pre-processing below (extraction of numerators, denominators,
# gcd, lcm, mu, etc.) should be updated to the enhanced version in
# _solve_trig1. (See #19507)
for ar in trig_arguments:
try:
poly_ar = Poly(ar, symbol)
except PolynomialError:
raise ValueError("give up, we can't solve if this is not a polynomial in x")
if poly_ar.degree() > 1: # degree >1 still bad
raise ValueError("degree of variable inside polynomial should not exceed one")
if poly_ar.degree() == 0: # degree 0, don't care
continue
c = poly_ar.all_coeffs()[0] # got the coefficient of 'symbol'
try:
numerators.append(Rational(c).p)
denominators.append(Rational(c).q)
except TypeError:
return ConditionSet(symbol, Eq(f_original, 0), domain)
x = Dummy('x')
# ilcm() and igcd() require more than one argument
if len(numerators) > 1:
mu = Rational(2)*ilcm(*denominators)/igcd(*numerators)
else:
assert len(numerators) == 1
mu = Rational(2)*denominators[0]/numerators[0]
f = f.subs(symbol, mu*x)
f = f.rewrite(tan)
f = expand_trig(f)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(tan(x), y), h.subs(tan(x), y)
if g.has(x) or h.has(x):
return ConditionSet(symbol, Eq(f_original, 0), domain)
solns = solveset(g, y, S.Reals) - solveset(h, y, S.Reals)
if isinstance(solns, FiniteSet):
result = Union(*[invert_real(tan(symbol/mu), s, symbol)[1]
for s in solns])
dsol = invert_real(tan(symbol/mu), oo, symbol)[1]
if degree(h) > degree(g): # If degree(denom)>degree(num) then there
result = Union(result, dsol) # would be another sol at Lim(denom-->oo)
return Intersection(result, domain)
elif solns is S.EmptySet:
return S.EmptySet
else:
return ConditionSet(symbol, Eq(f_original, 0), S.Reals)
def _solve_as_poly(f, symbol, domain=S.Complexes):
"""
Solve the equation using polynomial techniques if it already is a
polynomial equation or, with a change of variables, can be made so.
"""
result = None
if f.is_polynomial(symbol):
solns = roots(f, symbol, cubics=True, quartics=True,
quintics=True, domain='EX')
num_roots = sum(solns.values())
if degree(f, symbol) <= num_roots:
result = FiniteSet(*solns.keys())
else:
poly = Poly(f, symbol)
solns = poly.all_roots()
if poly.degree() <= len(solns):
result = FiniteSet(*solns)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
poly = Poly(f)
if poly is None:
result = ConditionSet(symbol, Eq(f, 0), domain)
gens = [g for g in poly.gens if g.has(symbol)]
if len(gens) == 1:
poly = Poly(poly, gens[0])
gen = poly.gen
deg = poly.degree()
poly = Poly(poly.as_expr(), poly.gen, composite=True)
poly_solns = FiniteSet(*roots(poly, cubics=True, quartics=True,
quintics=True).keys())
if len(poly_solns) < deg:
result = ConditionSet(symbol, Eq(f, 0), domain)
if gen != symbol:
y = Dummy('y')
inverter = invert_real if domain.is_subset(S.Reals) else invert_complex
lhs, rhs_s = inverter(gen, y, symbol)
if lhs == symbol:
result = Union(*[rhs_s.subs(y, s) for s in poly_solns])
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
if result is not None:
if isinstance(result, FiniteSet):
# this is to simplify solutions like -sqrt(-I) to sqrt(2)/2
# - sqrt(2)*I/2. We are not expanding for solution with symbols
# or undefined functions because that makes the solution more complicated.
# For example, expand_complex(a) returns re(a) + I*im(a)
if all([s.atoms(Symbol, AppliedUndef) == set() and not isinstance(s, RootOf)
for s in result]):
s = Dummy('s')
result = imageset(Lambda(s, expand_complex(s)), result)
if isinstance(result, FiniteSet) and domain != S.Complexes:
# Avoid adding gratuitous intersections with S.Complexes. Actual
# conditions should be handled elsewhere.
result = result.intersection(domain)
return result
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def _has_rational_power(expr, symbol):
"""
Returns (bool, den) where bool is True if the term has a
non-integer rational power and den is the denominator of the
expression's exponent.
Examples
========
>>> from sympy.solvers.solveset import _has_rational_power
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> _has_rational_power(sqrt(x), x)
(True, 2)
>>> _has_rational_power(x**2, x)
(False, 1)
"""
a, p, q = Wild('a'), Wild('p'), Wild('q')
pattern_match = expr.match(a*p**q) or {}
if pattern_match.get(a, S.Zero).is_zero:
return (False, S.One)
elif p not in pattern_match.keys():
return (False, S.One)
elif isinstance(pattern_match[q], Rational) \
and pattern_match[p].has(symbol):
if not pattern_match[q].q == S.One:
return (True, pattern_match[q].q)
if not isinstance(pattern_match[a], Pow) \
or isinstance(pattern_match[a], Mul):
return (False, S.One)
else:
return _has_rational_power(pattern_match[a], symbol)
def _solve_radical(f, symbol, solveset_solver):
""" Helper function to solve equations with radicals """
res = unrad(f)
eq, cov = res if res else (f, [])
if not cov:
result = solveset_solver(eq, symbol) - \
Union(*[solveset_solver(g, symbol) for g in denoms(f, symbol)])
else:
y, yeq = cov
if not solveset_solver(y - I, y):
yreal = Dummy('yreal', real=True)
yeq = yeq.xreplace({y: yreal})
eq = eq.xreplace({y: yreal})
y = yreal
g_y_s = solveset_solver(yeq, symbol)
f_y_sols = solveset_solver(eq, y)
result = Union(*[imageset(Lambda(y, g_y), f_y_sols)
for g_y in g_y_s])
if isinstance(result, Complement) or isinstance(result,ConditionSet):
solution_set = result
else:
f_set = [] # solutions for FiniteSet
c_set = [] # solutions for ConditionSet
for s in result:
if checksol(f, symbol, s):
f_set.append(s)
else:
c_set.append(s)
solution_set = FiniteSet(*f_set) + ConditionSet(symbol, Eq(f, 0), FiniteSet(*c_set))
return solution_set
def _solve_abs(f, symbol, domain):
""" Helper function to solve equation involving absolute value function """
if not domain.is_subset(S.Reals):
raise ValueError(filldedent('''
Absolute values cannot be inverted in the
complex domain.'''))
p, q, r = Wild('p'), Wild('q'), Wild('r')
pattern_match = f.match(p*Abs(q) + r) or {}
f_p, f_q, f_r = [pattern_match.get(i, S.Zero) for i in (p, q, r)]
if not (f_p.is_zero or f_q.is_zero):
domain = continuous_domain(f_q, symbol, domain)
q_pos_cond = solve_univariate_inequality(f_q >= 0, symbol,
relational=False, domain=domain, continuous=True)
q_neg_cond = q_pos_cond.complement(domain)
sols_q_pos = solveset_real(f_p*f_q + f_r,
symbol).intersect(q_pos_cond)
sols_q_neg = solveset_real(f_p*(-f_q) + f_r,
symbol).intersect(q_neg_cond)
return Union(sols_q_pos, sols_q_neg)
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def solve_decomposition(f, symbol, domain):
"""
Function to solve equations via the principle of "Decomposition
and Rewriting".
Examples
========
>>> from sympy import exp, sin, Symbol, pprint, S
>>> from sympy.solvers.solveset import solve_decomposition as sd
>>> x = Symbol('x')
>>> f1 = exp(2*x) - 3*exp(x) + 2
>>> sd(f1, x, S.Reals)
FiniteSet(0, log(2))
>>> f2 = sin(x)**2 + 2*sin(x) + 1
>>> pprint(sd(f2, x, S.Reals), use_unicode=False)
3*pi
{2*n*pi + ---- | n in Integers}
2
>>> f3 = sin(x + 2)
>>> pprint(sd(f3, x, S.Reals), use_unicode=False)
{2*n*pi - 2 | n in Integers} U {2*n*pi - 2 + pi | n in Integers}
"""
from sympy.solvers.decompogen import decompogen
from sympy.calculus.util import function_range
# decompose the given function
g_s = decompogen(f, symbol)
# `y_s` represents the set of values for which the function `g` is to be
# solved.
# `solutions` represent the solutions of the equations `g = y_s` or
# `g = 0` depending on the type of `y_s`.
# As we are interested in solving the equation: f = 0
y_s = FiniteSet(0)
for g in g_s:
frange = function_range(g, symbol, domain)
y_s = Intersection(frange, y_s)
result = S.EmptySet
if isinstance(y_s, FiniteSet):
for y in y_s:
solutions = solveset(Eq(g, y), symbol, domain)
if not isinstance(solutions, ConditionSet):
result += solutions
else:
if isinstance(y_s, ImageSet):
iter_iset = (y_s,)
elif isinstance(y_s, Union):
iter_iset = y_s.args
elif y_s is EmptySet:
# y_s is not in the range of g in g_s, so no solution exists
#in the given domain
return EmptySet
for iset in iter_iset:
new_solutions = solveset(Eq(iset.lamda.expr, g), symbol, domain)
dummy_var = tuple(iset.lamda.expr.free_symbols)[0]
(base_set,) = iset.base_sets
if isinstance(new_solutions, FiniteSet):
new_exprs = new_solutions
elif isinstance(new_solutions, Intersection):
if isinstance(new_solutions.args[1], FiniteSet):
new_exprs = new_solutions.args[1]
for new_expr in new_exprs:
result += ImageSet(Lambda(dummy_var, new_expr), base_set)
if result is S.EmptySet:
return ConditionSet(symbol, Eq(f, 0), domain)
y_s = result
return y_s
def _solveset(f, symbol, domain, _check=False):
"""Helper for solveset to return a result from an expression
that has already been sympify'ed and is known to contain the
given symbol."""
# _check controls whether the answer is checked or not
from sympy.simplify.simplify import signsimp
from sympy.logic.boolalg import BooleanTrue
if isinstance(f, BooleanTrue):
return domain
orig_f = f
if f.is_Mul:
coeff, f = f.as_independent(symbol, as_Add=False)
if coeff in {S.ComplexInfinity, S.NegativeInfinity, S.Infinity}:
f = together(orig_f)
elif f.is_Add:
a, h = f.as_independent(symbol)
m, h = h.as_independent(symbol, as_Add=False)
if m not in {S.ComplexInfinity, S.Zero, S.Infinity,
S.NegativeInfinity}:
f = a/m + h # XXX condition `m != 0` should be added to soln
# assign the solvers to use
solver = lambda f, x, domain=domain: _solveset(f, x, domain)
inverter = lambda f, rhs, symbol: _invert(f, rhs, symbol, domain)
result = EmptySet
if f.expand().is_zero:
return domain
elif not f.has(symbol):
return EmptySet
elif f.is_Mul and all(_is_finite_with_finite_vars(m, domain)
for m in f.args):
# if f(x) and g(x) are both finite we can say that the solution of
# f(x)*g(x) == 0 is same as Union(f(x) == 0, g(x) == 0) is not true in
# general. g(x) can grow to infinitely large for the values where
# f(x) == 0. To be sure that we are not silently allowing any
# wrong solutions we are using this technique only if both f and g are
# finite for a finite input.
result = Union(*[solver(m, symbol) for m in f.args])
elif _is_function_class_equation(TrigonometricFunction, f, symbol) or \
_is_function_class_equation(HyperbolicFunction, f, symbol):
result = _solve_trig(f, symbol, domain)
elif isinstance(f, arg):
a = f.args[0]
result = solveset_real(a > 0, symbol)
elif f.is_Piecewise:
expr_set_pairs = f.as_expr_set_pairs(domain)
for (expr, in_set) in expr_set_pairs:
if in_set.is_Relational:
in_set = in_set.as_set()
solns = solver(expr, symbol, in_set)
result += solns
elif isinstance(f, Eq):
result = solver(Add(f.lhs, - f.rhs, evaluate=False), symbol, domain)
elif f.is_Relational:
if not domain.is_subset(S.Reals):
raise NotImplementedError(filldedent('''
Inequalities in the complex domain are
not supported. Try the real domain by
setting domain=S.Reals'''))
try:
result = solve_univariate_inequality(
f, symbol, domain=domain, relational=False)
except NotImplementedError:
result = ConditionSet(symbol, f, domain)
return result
elif _is_modular(f, symbol):
result = _solve_modular(f, symbol, domain)
else:
lhs, rhs_s = inverter(f, 0, symbol)
if lhs == symbol:
# do some very minimal simplification since
# repeated inversion may have left the result
# in a state that other solvers (e.g. poly)
# would have simplified; this is done here
# rather than in the inverter since here it
# is only done once whereas there it would
# be repeated for each step of the inversion
if isinstance(rhs_s, FiniteSet):
rhs_s = FiniteSet(*[Mul(*
signsimp(i).as_content_primitive())
for i in rhs_s])
result = rhs_s
elif isinstance(rhs_s, FiniteSet):
for equation in [lhs - rhs for rhs in rhs_s]:
if equation == f:
if any(_has_rational_power(g, symbol)[0]
for g in equation.args) or _has_rational_power(
equation, symbol)[0]:
result += _solve_radical(equation,
symbol,
solver)
elif equation.has(Abs):
result += _solve_abs(f, symbol, domain)
else:
result_rational = _solve_as_rational(equation, symbol, domain)
if isinstance(result_rational, ConditionSet):
# may be a transcendental type equation
result += _transolve(equation, symbol, domain)
else:
result += result_rational
else:
result += solver(equation, symbol)
elif rhs_s is not S.EmptySet:
result = ConditionSet(symbol, Eq(f, 0), domain)
if isinstance(result, ConditionSet):
if isinstance(f, Expr):
num, den = f.as_numer_denom()
else:
num, den = f, S.One
if den.has(symbol):
_result = _solveset(num, symbol, domain)
if not isinstance(_result, ConditionSet):
singularities = _solveset(den, symbol, domain)
result = _result - singularities
if _check:
if isinstance(result, ConditionSet):
# it wasn't solved or has enumerated all conditions
# -- leave it alone
return result
# whittle away all but the symbol-containing core
# to use this for testing
if isinstance(orig_f, Expr):
fx = orig_f.as_independent(symbol, as_Add=True)[1]
fx = fx.as_independent(symbol, as_Add=False)[1]
else:
fx = orig_f
if isinstance(result, FiniteSet):
# check the result for invalid solutions
result = FiniteSet(*[s for s in result
if isinstance(s, RootOf)
or domain_check(fx, symbol, s)])
return result
def _is_modular(f, symbol):
"""
Helper function to check below mentioned types of modular equations.
``A - Mod(B, C) = 0``
A -> This can or cannot be a function of symbol.
B -> This is surely a function of symbol.
C -> It is an integer.
Parameters
==========
f : Expr
The equation to be checked.
symbol : Symbol
The concerned variable for which the equation is to be checked.
Examples
========
>>> from sympy import symbols, exp, Mod
>>> from sympy.solvers.solveset import _is_modular as check
>>> x, y = symbols('x y')
>>> check(Mod(x, 3) - 1, x)
True
>>> check(Mod(x, 3) - 1, y)
False
>>> check(Mod(x, 3)**2 - 5, x)
False
>>> check(Mod(x, 3)**2 - y, x)
False
>>> check(exp(Mod(x, 3)) - 1, x)
False
>>> check(Mod(3, y) - 1, y)
False
"""
if not f.has(Mod):
return False
# extract modterms from f.
modterms = list(f.atoms(Mod))
return (len(modterms) == 1 and # only one Mod should be present
modterms[0].args[0].has(symbol) and # B-> function of symbol
modterms[0].args[1].is_integer and # C-> to be an integer.
any(isinstance(term, Mod)
for term in list(_term_factors(f))) # free from other funcs
)
def _invert_modular(modterm, rhs, n, symbol):
"""
Helper function to invert modular equation.
``Mod(a, m) - rhs = 0``
Generally it is inverted as (a, ImageSet(Lambda(n, m*n + rhs), S.Integers)).
More simplified form will be returned if possible.
If it is not invertible then (modterm, rhs) is returned.
The following cases arise while inverting equation ``Mod(a, m) - rhs = 0``:
1. If a is symbol then m*n + rhs is the required solution.
2. If a is an instance of ``Add`` then we try to find two symbol independent
parts of a and the symbol independent part gets tranferred to the other
side and again the ``_invert_modular`` is called on the symbol
dependent part.
3. If a is an instance of ``Mul`` then same as we done in ``Add`` we separate
out the symbol dependent and symbol independent parts and transfer the
symbol independent part to the rhs with the help of invert and again the
``_invert_modular`` is called on the symbol dependent part.
4. If a is an instance of ``Pow`` then two cases arise as following:
- If a is of type (symbol_indep)**(symbol_dep) then the remainder is
evaluated with the help of discrete_log function and then the least
period is being found out with the help of totient function.
period*n + remainder is the required solution in this case.
For reference: (https://en.wikipedia.org/wiki/Euler's_theorem)
- If a is of type (symbol_dep)**(symbol_indep) then we try to find all
primitive solutions list with the help of nthroot_mod function.
m*n + rem is the general solution where rem belongs to solutions list
from nthroot_mod function.
Parameters
==========
modterm, rhs : Expr
The modular equation to be inverted, ``modterm - rhs = 0``
symbol : Symbol
The variable in the equation to be inverted.
n : Dummy
Dummy variable for output g_n.
Returns
=======
A tuple (f_x, g_n) is being returned where f_x is modular independent function
of symbol and g_n being set of values f_x can have.
Examples
========
>>> from sympy import symbols, exp, Mod, Dummy, S
>>> from sympy.solvers.solveset import _invert_modular as invert_modular
>>> x, y = symbols('x y')
>>> n = Dummy('n')
>>> invert_modular(Mod(exp(x), 7), S(5), n, x)
(Mod(exp(x), 7), 5)
>>> invert_modular(Mod(x, 7), S(5), n, x)
(x, ImageSet(Lambda(_n, 7*_n + 5), Integers))
>>> invert_modular(Mod(3*x + 8, 7), S(5), n, x)
(x, ImageSet(Lambda(_n, 7*_n + 6), Integers))
>>> invert_modular(Mod(x**4, 7), S(5), n, x)
(x, EmptySet)
>>> invert_modular(Mod(2**(x**2 + x + 1), 7), S(2), n, x)
(x**2 + x + 1, ImageSet(Lambda(_n, 3*_n + 1), Naturals0))
"""
a, m = modterm.args
if rhs.is_real is False or any(term.is_real is False
for term in list(_term_factors(a))):
# Check for complex arguments
return modterm, rhs
if abs(rhs) >= abs(m):
# if rhs has value greater than value of m.
return symbol, EmptySet
if a == symbol:
return symbol, ImageSet(Lambda(n, m*n + rhs), S.Integers)
if a.is_Add:
# g + h = a
g, h = a.as_independent(symbol)
if g is not S.Zero:
x_indep_term = rhs - Mod(g, m)
return _invert_modular(Mod(h, m), Mod(x_indep_term, m), n, symbol)
if a.is_Mul:
# g*h = a
g, h = a.as_independent(symbol)
if g is not S.One:
x_indep_term = rhs*invert(g, m)
return _invert_modular(Mod(h, m), Mod(x_indep_term, m), n, symbol)
if a.is_Pow:
# base**expo = a
base, expo = a.args
if expo.has(symbol) and not base.has(symbol):
# remainder -> solution independent of n of equation.
# m, rhs are made coprime by dividing igcd(m, rhs)
try:
remainder = discrete_log(m / igcd(m, rhs), rhs, a.base)
except ValueError: # log does not exist
return modterm, rhs
# period -> coefficient of n in the solution and also referred as
# the least period of expo in which it is repeats itself.
# (a**(totient(m)) - 1) divides m. Here is link of theorem:
# (https://en.wikipedia.org/wiki/Euler's_theorem)
period = totient(m)
for p in divisors(period):
# there might a lesser period exist than totient(m).
if pow(a.base, p, m / igcd(m, a.base)) == 1:
period = p
break
# recursion is not applied here since _invert_modular is currently
# not smart enough to handle infinite rhs as here expo has infinite
# rhs = ImageSet(Lambda(n, period*n + remainder), S.Naturals0).
return expo, ImageSet(Lambda(n, period*n + remainder), S.Naturals0)
elif base.has(symbol) and not expo.has(symbol):
try:
remainder_list = nthroot_mod(rhs, expo, m, all_roots=True)
if remainder_list == []:
return symbol, EmptySet
except (ValueError, NotImplementedError):
return modterm, rhs
g_n = EmptySet
for rem in remainder_list:
g_n += ImageSet(Lambda(n, m*n + rem), S.Integers)
return base, g_n
return modterm, rhs
def _solve_modular(f, symbol, domain):
r"""
Helper function for solving modular equations of type ``A - Mod(B, C) = 0``,
where A can or cannot be a function of symbol, B is surely a function of
symbol and C is an integer.
Currently ``_solve_modular`` is only able to solve cases
where A is not a function of symbol.
Parameters
==========
f : Expr
The modular equation to be solved, ``f = 0``
symbol : Symbol
The variable in the equation to be solved.
domain : Set
A set over which the equation is solved. It has to be a subset of
Integers.
Returns
=======
A set of integer solutions satisfying the given modular equation.
A ``ConditionSet`` if the equation is unsolvable.
Examples
========
>>> from sympy.solvers.solveset import _solve_modular as solve_modulo
>>> from sympy import S, Symbol, sin, Intersection, Interval
>>> from sympy.core.mod import Mod
>>> x = Symbol('x')
>>> solve_modulo(Mod(5*x - 8, 7) - 3, x, S.Integers)
ImageSet(Lambda(_n, 7*_n + 5), Integers)
>>> solve_modulo(Mod(5*x - 8, 7) - 3, x, S.Reals) # domain should be subset of integers.
ConditionSet(x, Eq(Mod(5*x + 6, 7) - 3, 0), Reals)
>>> solve_modulo(-7 + Mod(x, 5), x, S.Integers)
EmptySet
>>> solve_modulo(Mod(12**x, 21) - 18, x, S.Integers)
ImageSet(Lambda(_n, 6*_n + 2), Naturals0)
>>> solve_modulo(Mod(sin(x), 7) - 3, x, S.Integers) # not solvable
ConditionSet(x, Eq(Mod(sin(x), 7) - 3, 0), Integers)
>>> solve_modulo(3 - Mod(x, 5), x, Intersection(S.Integers, Interval(0, 100)))
Intersection(ImageSet(Lambda(_n, 5*_n + 3), Integers), Range(0, 101, 1))
"""
# extract modterm and g_y from f
unsolved_result = ConditionSet(symbol, Eq(f, 0), domain)
modterm = list(f.atoms(Mod))[0]
rhs = -S.One*(f.subs(modterm, S.Zero))
if f.as_coefficients_dict()[modterm].is_negative:
# checks if coefficient of modterm is negative in main equation.
rhs *= -S.One
if not domain.is_subset(S.Integers):
return unsolved_result
if rhs.has(symbol):
# TODO Case: A-> function of symbol, can be extended here
# in future.
return unsolved_result
n = Dummy('n', integer=True)
f_x, g_n = _invert_modular(modterm, rhs, n, symbol)
if f_x == modterm and g_n == rhs:
return unsolved_result
if f_x == symbol:
if domain is not S.Integers:
return domain.intersect(g_n)
return g_n
if isinstance(g_n, ImageSet):
lamda_expr = g_n.lamda.expr
lamda_vars = g_n.lamda.variables
base_sets = g_n.base_sets
sol_set = _solveset(f_x - lamda_expr, symbol, S.Integers)
if isinstance(sol_set, FiniteSet):
tmp_sol = EmptySet
for sol in sol_set:
tmp_sol += ImageSet(Lambda(lamda_vars, sol), *base_sets)
sol_set = tmp_sol
else:
sol_set = ImageSet(Lambda(lamda_vars, sol_set), *base_sets)
return domain.intersect(sol_set)
return unsolved_result
def _term_factors(f):
"""
Iterator to get the factors of all terms present
in the given equation.
Parameters
==========
f : Expr
Equation that needs to be addressed
Returns
=======
Factors of all terms present in the equation.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers.solveset import _term_factors
>>> x = symbols('x')
>>> list(_term_factors(-2 - x**2 + x*(x + 1)))
[-2, -1, x**2, x, x + 1]
"""
for add_arg in Add.make_args(f):
yield from Mul.make_args(add_arg)
def _solve_exponential(lhs, rhs, symbol, domain):
r"""
Helper function for solving (supported) exponential equations.
Exponential equations are the sum of (currently) at most
two terms with one or both of them having a power with a
symbol-dependent exponent.
For example
.. math:: 5^{2x + 3} - 5^{3x - 1}
.. math:: 4^{5 - 9x} - e^{2 - x}
Parameters
==========
lhs, rhs : Expr
The exponential equation to be solved, `lhs = rhs`
symbol : Symbol
The variable in which the equation is solved
domain : Set
A set over which the equation is solved.
Returns
=======
A set of solutions satisfying the given equation.
A ``ConditionSet`` if the equation is unsolvable or
if the assumptions are not properly defined, in that case
a different style of ``ConditionSet`` is returned having the
solution(s) of the equation with the desired assumptions.
Examples
========
>>> from sympy.solvers.solveset import _solve_exponential as solve_expo
>>> from sympy import symbols, S
>>> x = symbols('x', real=True)
>>> a, b = symbols('a b')
>>> solve_expo(2**x + 3**x - 5**x, 0, x, S.Reals) # not solvable
ConditionSet(x, Eq(2**x + 3**x - 5**x, 0), Reals)
>>> solve_expo(a**x - b**x, 0, x, S.Reals) # solvable but incorrect assumptions
ConditionSet(x, (a > 0) & (b > 0), FiniteSet(0))
>>> solve_expo(3**(2*x) - 2**(x + 3), 0, x, S.Reals)
FiniteSet(-3*log(2)/(-2*log(3) + log(2)))
>>> solve_expo(2**x - 4**x, 0, x, S.Reals)
FiniteSet(0)
* Proof of correctness of the method
The logarithm function is the inverse of the exponential function.
The defining relation between exponentiation and logarithm is:
.. math:: {\log_b x} = y \enspace if \enspace b^y = x
Therefore if we are given an equation with exponent terms, we can
convert every term to its corresponding logarithmic form. This is
achieved by taking logarithms and expanding the equation using
logarithmic identities so that it can easily be handled by ``solveset``.
For example:
.. math:: 3^{2x} = 2^{x + 3}
Taking log both sides will reduce the equation to
.. math:: (2x)\log(3) = (x + 3)\log(2)
This form can be easily handed by ``solveset``.
"""
unsolved_result = ConditionSet(symbol, Eq(lhs - rhs, 0), domain)
newlhs = powdenest(lhs)
if lhs != newlhs:
# it may also be advantageous to factor the new expr
return _solveset(factor(newlhs - rhs), symbol, domain) # try again with _solveset
if not (isinstance(lhs, Add) and len(lhs.args) == 2):
# solving for the sum of more than two powers is possible
# but not yet implemented
return unsolved_result
if rhs != 0:
return unsolved_result
a, b = list(ordered(lhs.args))
a_term = a.as_independent(symbol)[1]
b_term = b.as_independent(symbol)[1]
a_base, a_exp = a_term.base, a_term.exp
b_base, b_exp = b_term.base, b_term.exp
from sympy.functions.elementary.complexes import im
if domain.is_subset(S.Reals):
conditions = And(
a_base > 0,
b_base > 0,
Eq(im(a_exp), 0),
Eq(im(b_exp), 0))
else:
conditions = And(
Ne(a_base, 0),
Ne(b_base, 0))
L, R = map(lambda i: expand_log(log(i), force=True), (a, -b))
solutions = _solveset(L - R, symbol, domain)
return ConditionSet(symbol, conditions, solutions)
def _is_exponential(f, symbol):
r"""
Return ``True`` if one or more terms contain ``symbol`` only in
exponents, else ``False``.
Parameters
==========
f : Expr
The equation to be checked
symbol : Symbol
The variable in which the equation is checked
Examples
========
>>> from sympy import symbols, cos, exp
>>> from sympy.solvers.solveset import _is_exponential as check
>>> x, y = symbols('x y')
>>> check(y, y)
False
>>> check(x**y - 1, y)
True
>>> check(x**y*2**y - 1, y)
True
>>> check(exp(x + 3) + 3**x, x)
True
>>> check(cos(2**x), x)
False
* Philosophy behind the helper
The function extracts each term of the equation and checks if it is
of exponential form w.r.t ``symbol``.
"""
rv = False
for expr_arg in _term_factors(f):
if symbol not in expr_arg.free_symbols:
continue
if (isinstance(expr_arg, Pow) and
symbol not in expr_arg.base.free_symbols or
isinstance(expr_arg, exp)):
rv = True # symbol in exponent
else:
return False # dependent on symbol in non-exponential way
return rv
def _solve_logarithm(lhs, rhs, symbol, domain):
r"""
Helper to solve logarithmic equations which are reducible
to a single instance of `\log`.
Logarithmic equations are (currently) the equations that contains
`\log` terms which can be reduced to a single `\log` term or
a constant using various logarithmic identities.
For example:
.. math:: \log(x) + \log(x - 4)
can be reduced to:
.. math:: \log(x(x - 4))
Parameters
==========
lhs, rhs : Expr
The logarithmic equation to be solved, `lhs = rhs`
symbol : Symbol
The variable in which the equation is solved
domain : Set
A set over which the equation is solved.
Returns
=======
A set of solutions satisfying the given equation.
A ``ConditionSet`` if the equation is unsolvable.
Examples
========
>>> from sympy import symbols, log, S
>>> from sympy.solvers.solveset import _solve_logarithm as solve_log
>>> x = symbols('x')
>>> f = log(x - 3) + log(x + 3)
>>> solve_log(f, 0, x, S.Reals)
FiniteSet(sqrt(10), -sqrt(10))
* Proof of correctness
A logarithm is another way to write exponent and is defined by
.. math:: {\log_b x} = y \enspace if \enspace b^y = x
When one side of the equation contains a single logarithm, the
equation can be solved by rewriting the equation as an equivalent
exponential equation as defined above. But if one side contains
more than one logarithm, we need to use the properties of logarithm
to condense it into a single logarithm.
Take for example
.. math:: \log(2x) - 15 = 0
contains single logarithm, therefore we can directly rewrite it to
exponential form as
.. math:: x = \frac{e^{15}}{2}
But if the equation has more than one logarithm as
.. math:: \log(x - 3) + \log(x + 3) = 0
we use logarithmic identities to convert it into a reduced form
Using,
.. math:: \log(a) + \log(b) = \log(ab)
the equation becomes,
.. math:: \log((x - 3)(x + 3))
This equation contains one logarithm and can be solved by rewriting
to exponents.
"""
new_lhs = logcombine(lhs, force=True)
new_f = new_lhs - rhs
return _solveset(new_f, symbol, domain)
def _is_logarithmic(f, symbol):
r"""
Return ``True`` if the equation is in the form
`a\log(f(x)) + b\log(g(x)) + ... + c` else ``False``.
Parameters
==========
f : Expr
The equation to be checked
symbol : Symbol
The variable in which the equation is checked
Returns
=======
``True`` if the equation is logarithmic otherwise ``False``.
Examples
========
>>> from sympy import symbols, tan, log
>>> from sympy.solvers.solveset import _is_logarithmic as check
>>> x, y = symbols('x y')
>>> check(log(x + 2) - log(x + 3), x)
True
>>> check(tan(log(2*x)), x)
False
>>> check(x*log(x), x)
False
>>> check(x + log(x), x)
False
>>> check(y + log(x), x)
True
* Philosophy behind the helper
The function extracts each term and checks whether it is
logarithmic w.r.t ``symbol``.
"""
rv = False
for term in Add.make_args(f):
saw_log = False
for term_arg in Mul.make_args(term):
if symbol not in term_arg.free_symbols:
continue
if isinstance(term_arg, log):
if saw_log:
return False # more than one log in term
saw_log = True
else:
return False # dependent on symbol in non-log way
if saw_log:
rv = True
return rv
def _transolve(f, symbol, domain):
r"""
Function to solve transcendental equations. It is a helper to
``solveset`` and should be used internally. ``_transolve``
currently supports the following class of equations:
- Exponential equations
- Logarithmic equations
Parameters
==========
f : Any transcendental equation that needs to be solved.
This needs to be an expression, which is assumed
to be equal to ``0``.
symbol : The variable for which the equation is solved.
This needs to be of class ``Symbol``.
domain : A set over which the equation is solved.
This needs to be of class ``Set``.
Returns
=======
Set
A set of values for ``symbol`` for which ``f`` is equal to
zero. An ``EmptySet`` is returned if ``f`` does not have solutions
in respective domain. A ``ConditionSet`` is returned as unsolved
object if algorithms to evaluate complete solution are not
yet implemented.
How to use ``_transolve``
=========================
``_transolve`` should not be used as an independent function, because
it assumes that the equation (``f``) and the ``symbol`` comes from
``solveset`` and might have undergone a few modification(s).
To use ``_transolve`` as an independent function the equation (``f``)
and the ``symbol`` should be passed as they would have been by
``solveset``.
Examples
========
>>> from sympy.solvers.solveset import _transolve as transolve
>>> from sympy.solvers.solvers import _tsolve as tsolve
>>> from sympy import symbols, S, pprint
>>> x = symbols('x', real=True) # assumption added
>>> transolve(5**(x - 3) - 3**(2*x + 1), x, S.Reals)
FiniteSet(-(log(3) + 3*log(5))/(-log(5) + 2*log(3)))
How ``_transolve`` works
========================
``_transolve`` uses two types of helper functions to solve equations
of a particular class:
Identifying helpers: To determine whether a given equation
belongs to a certain class of equation or not. Returns either
``True`` or ``False``.
Solving helpers: Once an equation is identified, a corresponding
helper either solves the equation or returns a form of the equation
that ``solveset`` might better be able to handle.
* Philosophy behind the module
The purpose of ``_transolve`` is to take equations which are not
already polynomial in their generator(s) and to either recast them
as such through a valid transformation or to solve them outright.
A pair of helper functions for each class of supported
transcendental functions are employed for this purpose. One
identifies the transcendental form of an equation and the other
either solves it or recasts it into a tractable form that can be
solved by ``solveset``.
For example, an equation in the form `ab^{f(x)} - cd^{g(x)} = 0`
can be transformed to
`\log(a) + f(x)\log(b) - \log(c) - g(x)\log(d) = 0`
(under certain assumptions) and this can be solved with ``solveset``
if `f(x)` and `g(x)` are in polynomial form.
How ``_transolve`` is better than ``_tsolve``
=============================================
1) Better output
``_transolve`` provides expressions in a more simplified form.
Consider a simple exponential equation
>>> f = 3**(2*x) - 2**(x + 3)
>>> pprint(transolve(f, x, S.Reals), use_unicode=False)
-3*log(2)
{------------------}
-2*log(3) + log(2)
>>> pprint(tsolve(f, x), use_unicode=False)
/ 3 \
| --------|
| log(2/9)|
[-log\2 /]
2) Extensible
The API of ``_transolve`` is designed such that it is easily
extensible, i.e. the code that solves a given class of
equations is encapsulated in a helper and not mixed in with
the code of ``_transolve`` itself.
3) Modular
``_transolve`` is designed to be modular i.e, for every class of
equation a separate helper for identification and solving is
implemented. This makes it easy to change or modify any of the
method implemented directly in the helpers without interfering
with the actual structure of the API.
4) Faster Computation
Solving equation via ``_transolve`` is much faster as compared to
``_tsolve``. In ``solve``, attempts are made computing every possibility
to get the solutions. This series of attempts makes solving a bit
slow. In ``_transolve``, computation begins only after a particular
type of equation is identified.
How to add new class of equations
=================================
Adding a new class of equation solver is a three-step procedure:
- Identify the type of the equations
Determine the type of the class of equations to which they belong:
it could be of ``Add``, ``Pow``, etc. types. Separate internal functions
are used for each type. Write identification and solving helpers
and use them from within the routine for the given type of equation
(after adding it, if necessary). Something like:
.. code-block:: python
def add_type(lhs, rhs, x):
....
if _is_exponential(lhs, x):
new_eq = _solve_exponential(lhs, rhs, x)
....
rhs, lhs = eq.as_independent(x)
if lhs.is_Add:
result = add_type(lhs, rhs, x)
- Define the identification helper.
- Define the solving helper.
Apart from this, a few other things needs to be taken care while
adding an equation solver:
- Naming conventions:
Name of the identification helper should be as
``_is_class`` where class will be the name or abbreviation
of the class of equation. The solving helper will be named as
``_solve_class``.
For example: for exponential equations it becomes
``_is_exponential`` and ``_solve_expo``.
- The identifying helpers should take two input parameters,
the equation to be checked and the variable for which a solution
is being sought, while solving helpers would require an additional
domain parameter.
- Be sure to consider corner cases.
- Add tests for each helper.
- Add a docstring to your helper that describes the method
implemented.
The documentation of the helpers should identify:
- the purpose of the helper,
- the method used to identify and solve the equation,
- a proof of correctness
- the return values of the helpers
"""
def add_type(lhs, rhs, symbol, domain):
"""
Helper for ``_transolve`` to handle equations of
``Add`` type, i.e. equations taking the form as
``a*f(x) + b*g(x) + .... = c``.
For example: 4**x + 8**x = 0
"""
result = ConditionSet(symbol, Eq(lhs - rhs, 0), domain)
# check if it is exponential type equation
if _is_exponential(lhs, symbol):
result = _solve_exponential(lhs, rhs, symbol, domain)
# check if it is logarithmic type equation
elif _is_logarithmic(lhs, symbol):
result = _solve_logarithm(lhs, rhs, symbol, domain)
return result
result = ConditionSet(symbol, Eq(f, 0), domain)
# invert_complex handles the call to the desired inverter based
# on the domain specified.
lhs, rhs_s = invert_complex(f, 0, symbol, domain)
if isinstance(rhs_s, FiniteSet):
assert (len(rhs_s.args)) == 1
rhs = rhs_s.args[0]
if lhs.is_Add:
result = add_type(lhs, rhs, symbol, domain)
else:
result = rhs_s
return result
def solveset(f, symbol=None, domain=S.Complexes):
r"""Solves a given inequality or equation with set as output
Parameters
==========
f : Expr or a relational.
The target equation or inequality
symbol : Symbol
The variable for which the equation is solved
domain : Set
The domain over which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` is True or is equal to
zero. An `EmptySet` is returned if `f` is False or nonzero.
A `ConditionSet` is returned as unsolved object if algorithms
to evaluate complete solution are not yet implemented.
`solveset` claims to be complete in the solution set that it returns.
Raises
======
NotImplementedError
The algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
Notes
=====
Python interprets 0 and 1 as False and True, respectively, but
in this function they refer to solutions of an expression. So 0 and 1
return the Domain and EmptySet, respectively, while True and False
return the opposite (as they are assumed to be solutions of relational
expressions).
See Also
========
solveset_real: solver for real domain
solveset_complex: solver for complex domain
Examples
========
>>> from sympy import exp, sin, Symbol, pprint, S, Eq
>>> from sympy.solvers.solveset import solveset, solveset_real
* The default domain is complex. Not specifying a domain will lead
to the solving of the equation in the complex domain (and this
is not affected by the assumptions on the symbol):
>>> x = Symbol('x')
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers}
>>> x = Symbol('x', real=True)
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers}
* If you want to use `solveset` to solve the equation in the
real domain, provide a real domain. (Using ``solveset_real``
does this automatically.)
>>> R = S.Reals
>>> x = Symbol('x')
>>> solveset(exp(x) - 1, x, R)
FiniteSet(0)
>>> solveset_real(exp(x) - 1, x)
FiniteSet(0)
The solution is unaffected by assumptions on the symbol:
>>> p = Symbol('p', positive=True)
>>> pprint(solveset(p**2 - 4))
{-2, 2}
When a conditionSet is returned, symbols with assumptions that
would alter the set are replaced with more generic symbols:
>>> i = Symbol('i', imaginary=True)
>>> solveset(Eq(i**2 + i*sin(i), 1), i, domain=S.Reals)
ConditionSet(_R, Eq(_R**2 + _R*sin(_R) - 1, 0), Reals)
* Inequalities can be solved over the real domain only. Use of a complex
domain leads to a NotImplementedError.
>>> solveset(exp(x) > 1, x, R)
Interval.open(0, oo)
"""
f = sympify(f)
symbol = sympify(symbol)
if f is S.true:
return domain
if f is S.false:
return S.EmptySet
if not isinstance(f, (Expr, Relational, Number)):
raise ValueError("%s is not a valid SymPy expression" % f)
if not isinstance(symbol, (Expr, Relational)) and symbol is not None:
raise ValueError("%s is not a valid SymPy symbol" % symbol)
if not isinstance(domain, Set):
raise ValueError("%s is not a valid domain" %(domain))
free_symbols = f.free_symbols
if symbol is None and not free_symbols:
b = Eq(f, 0)
if b is S.true:
return domain
elif b is S.false:
return S.EmptySet
else:
raise NotImplementedError(filldedent('''
relationship between value and 0 is unknown: %s''' % b))
if symbol is None:
if len(free_symbols) == 1:
symbol = free_symbols.pop()
elif free_symbols:
raise ValueError(filldedent('''
The independent variable must be specified for a
multivariate equation.'''))
elif not isinstance(symbol, Symbol):
f, s, swap = recast_to_symbols([f], [symbol])
# the xreplace will be needed if a ConditionSet is returned
return solveset(f[0], s[0], domain).xreplace(swap)
# solveset should ignore assumptions on symbols
if symbol not in _rc:
x = _rc[0] if domain.is_subset(S.Reals) else _rc[1]
rv = solveset(f.xreplace({symbol: x}), x, domain)
# try to use the original symbol if possible
try:
_rv = rv.xreplace({x: symbol})
except TypeError:
_rv = rv
if rv.dummy_eq(_rv):
rv = _rv
return rv
# Abs has its own handling method which avoids the
# rewriting property that the first piece of abs(x)
# is for x >= 0 and the 2nd piece for x < 0 -- solutions
# can look better if the 2nd condition is x <= 0. Since
# the solution is a set, duplication of results is not
# an issue, e.g. {y, -y} when y is 0 will be {0}
f, mask = _masked(f, Abs)
f = f.rewrite(Piecewise) # everything that's not an Abs
for d, e in mask:
# everything *in* an Abs
e = e.func(e.args[0].rewrite(Piecewise))
f = f.xreplace({d: e})
f = piecewise_fold(f)
return _solveset(f, symbol, domain, _check=True)
def solveset_real(f, symbol):
return solveset(f, symbol, S.Reals)
def solveset_complex(f, symbol):
return solveset(f, symbol, S.Complexes)
def _solveset_multi(eqs, syms, domains):
'''Basic implementation of a multivariate solveset.
For internal use (not ready for public consumption)'''
rep = {}
for sym, dom in zip(syms, domains):
if dom is S.Reals:
rep[sym] = Symbol(sym.name, real=True)
eqs = [eq.subs(rep) for eq in eqs]
syms = [sym.subs(rep) for sym in syms]
syms = tuple(syms)
if len(eqs) == 0:
return ProductSet(*domains)
if len(syms) == 1:
sym = syms[0]
domain = domains[0]
solsets = [solveset(eq, sym, domain) for eq in eqs]
solset = Intersection(*solsets)
return ImageSet(Lambda((sym,), (sym,)), solset).doit()
eqs = sorted(eqs, key=lambda eq: len(eq.free_symbols & set(syms)))
for n in range(len(eqs)):
sols = []
all_handled = True
for sym in syms:
if sym not in eqs[n].free_symbols:
continue
sol = solveset(eqs[n], sym, domains[syms.index(sym)])
if isinstance(sol, FiniteSet):
i = syms.index(sym)
symsp = syms[:i] + syms[i+1:]
domainsp = domains[:i] + domains[i+1:]
eqsp = eqs[:n] + eqs[n+1:]
for s in sol:
eqsp_sub = [eq.subs(sym, s) for eq in eqsp]
sol_others = _solveset_multi(eqsp_sub, symsp, domainsp)
fun = Lambda((symsp,), symsp[:i] + (s,) + symsp[i:])
sols.append(ImageSet(fun, sol_others).doit())
else:
all_handled = False
if all_handled:
return Union(*sols)
def solvify(f, symbol, domain):
"""Solves an equation using solveset and returns the solution in accordance
with the `solve` output API.
Returns
=======
We classify the output based on the type of solution returned by `solveset`.
Solution | Output
----------------------------------------
FiniteSet | list
ImageSet, | list (if `f` is periodic)
Union |
EmptySet | empty list
Others | None
Raises
======
NotImplementedError
A ConditionSet is the input.
Examples
========
>>> from sympy.solvers.solveset import solvify
>>> from sympy.abc import x
>>> from sympy import S, tan, sin, exp
>>> solvify(x**2 - 9, x, S.Reals)
[-3, 3]
>>> solvify(sin(x) - 1, x, S.Reals)
[pi/2]
>>> solvify(tan(x), x, S.Reals)
[0]
>>> solvify(exp(x) - 1, x, S.Complexes)
>>> solvify(exp(x) - 1, x, S.Reals)
[0]
"""
solution_set = solveset(f, symbol, domain)
result = None
if solution_set is S.EmptySet:
result = []
elif isinstance(solution_set, ConditionSet):
raise NotImplementedError('solveset is unable to solve this equation.')
elif isinstance(solution_set, FiniteSet):
result = list(solution_set)
else:
period = periodicity(f, symbol)
if period is not None:
solutions = S.EmptySet
iter_solutions = ()
if isinstance(solution_set, ImageSet):
iter_solutions = (solution_set,)
elif isinstance(solution_set, Union):
if all(isinstance(i, ImageSet) for i in solution_set.args):
iter_solutions = solution_set.args
for solution in iter_solutions:
solutions += solution.intersect(Interval(0, period, False, True))
if isinstance(solutions, FiniteSet):
result = list(solutions)
else:
solution = solution_set.intersect(domain)
if isinstance(solution, FiniteSet):
result += solution
return result
###############################################################################
################################ LINSOLVE #####################################
###############################################################################
def linear_coeffs(eq, *syms, **_kw):
"""Return a list whose elements are the coefficients of the
corresponding symbols in the sum of terms in ``eq``.
The additive constant is returned as the last element of the
list.
Raises
======
NonlinearError
The equation contains a nonlinear term
Examples
========
>>> from sympy.solvers.solveset import linear_coeffs
>>> from sympy.abc import x, y, z
>>> linear_coeffs(3*x + 2*y - 1, x, y)
[3, 2, -1]
It is not necessary to expand the expression:
>>> linear_coeffs(x + y*(z*(x*3 + 2) + 3), x)
[3*y*z + 1, y*(2*z + 3)]
But if there are nonlinear or cross terms -- even if they would
cancel after simplification -- an error is raised so the situation
does not pass silently past the caller's attention:
>>> eq = 1/x*(x - 1) + 1/x
>>> linear_coeffs(eq.expand(), x)
[0, 1]
>>> linear_coeffs(eq, x)
Traceback (most recent call last):
...
NonlinearError: nonlinear term encountered: 1/x
>>> linear_coeffs(x*(y + 1) - x*y, x, y)
Traceback (most recent call last):
...
NonlinearError: nonlinear term encountered: x*(y + 1)
"""
d = defaultdict(list)
eq = _sympify(eq)
symset = set(syms)
has = eq.free_symbols & symset
if not has:
return [S.Zero]*len(syms) + [eq]
c, terms = eq.as_coeff_add(*has)
d[0].extend(Add.make_args(c))
for t in terms:
m, f = t.as_coeff_mul(*has)
if len(f) != 1:
break
f = f[0]
if f in symset:
d[f].append(m)
elif f.is_Add:
d1 = linear_coeffs(f, *has, **{'dict': True})
d[0].append(m*d1.pop(0))
for xf, vf in d1.items():
d[xf].append(m*vf)
else:
break
else:
for k, v in d.items():
d[k] = Add(*v)
if not _kw:
return [d.get(s, S.Zero) for s in syms] + [d[0]]
return d # default is still list but this won't matter
raise NonlinearError('nonlinear term encountered: %s' % t)
def linear_eq_to_matrix(equations, *symbols):
r"""
Converts a given System of Equations into Matrix form.
Here `equations` must be a linear system of equations in
`symbols`. Element M[i, j] corresponds to the coefficient
of the jth symbol in the ith equation.
The Matrix form corresponds to the augmented matrix form.
For example:
.. math:: 4x + 2y + 3z = 1
.. math:: 3x + y + z = -6
.. math:: 2x + 4y + 9z = 2
This system would return `A` & `b` as given below:
::
[ 4 2 3 ] [ 1 ]
A = [ 3 1 1 ] b = [-6 ]
[ 2 4 9 ] [ 2 ]
The only simplification performed is to convert
`Eq(a, b) -> a - b`.
Raises
======
NonlinearError
The equations contain a nonlinear term.
ValueError
The symbols are not given or are not unique.
Examples
========
>>> from sympy import linear_eq_to_matrix, symbols
>>> c, x, y, z = symbols('c, x, y, z')
The coefficients (numerical or symbolic) of the symbols will
be returned as matrices:
>>> eqns = [c*x + z - 1 - c, y + z, x - y]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[c, 0, 1],
[0, 1, 1],
[1, -1, 0]])
>>> b
Matrix([
[c + 1],
[ 0],
[ 0]])
This routine does not simplify expressions and will raise an error
if nonlinearity is encountered:
>>> eqns = [
... (x**2 - 3*x)/(x - 3) - 3,
... y**2 - 3*y - y*(y - 4) + x - 4]
>>> linear_eq_to_matrix(eqns, [x, y])
Traceback (most recent call last):
...
NonlinearError:
The term (x**2 - 3*x)/(x - 3) is nonlinear in {x, y}
Simplifying these equations will discard the removable singularity
in the first, reveal the linear structure of the second:
>>> [e.simplify() for e in eqns]
[x - 3, x + y - 4]
Any such simplification needed to eliminate nonlinear terms must
be done before calling this routine.
"""
if not symbols:
raise ValueError(filldedent('''
Symbols must be given, for which coefficients
are to be found.
'''))
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
for i in symbols:
if not isinstance(i, Symbol):
raise ValueError(filldedent('''
Expecting a Symbol but got %s
''' % i))
if has_dups(symbols):
raise ValueError('Symbols must be unique')
equations = sympify(equations)
if isinstance(equations, MatrixBase):
equations = list(equations)
elif isinstance(equations, (Expr, Eq)):
equations = [equations]
elif not is_sequence(equations):
raise ValueError(filldedent('''
Equation(s) must be given as a sequence, Expr,
Eq or Matrix.
'''))
A, b = [], []
for i, f in enumerate(equations):
if isinstance(f, Equality):
f = f.rewrite(Add, evaluate=False)
coeff_list = linear_coeffs(f, *symbols)
b.append(-coeff_list.pop())
A.append(coeff_list)
A, b = map(Matrix, (A, b))
return A, b
def linsolve(system, *symbols):
r"""
Solve system of N linear equations with M variables; both
underdetermined and overdetermined systems are supported.
The possible number of solutions is zero, one or infinite.
Zero solutions throws a ValueError, whereas infinite
solutions are represented parametrically in terms of the given
symbols. For unique solution a FiniteSet of ordered tuples
is returned.
All Standard input formats are supported:
For the given set of Equations, the respective input types
are given below:
.. math:: 3x + 2y - z = 1
.. math:: 2x - 2y + 4z = -2
.. math:: 2x - y + 2z = 0
* Augmented Matrix Form, `system` given below:
::
[3 2 -1 1]
system = [2 -2 4 -2]
[2 -1 2 0]
* List Of Equations Form
`system = [3x + 2y - z - 1, 2x - 2y + 4z + 2, 2x - y + 2z]`
* Input A & b Matrix Form (from Ax = b) are given as below:
::
[3 2 -1 ] [ 1 ]
A = [2 -2 4 ] b = [ -2 ]
[2 -1 2 ] [ 0 ]
`system = (A, b)`
Symbols can always be passed but are actually only needed
when 1) a system of equations is being passed and 2) the
system is passed as an underdetermined matrix and one wants
to control the name of the free variables in the result.
An error is raised if no symbols are used for case 1, but if
no symbols are provided for case 2, internally generated symbols
will be provided. When providing symbols for case 2, there should
be at least as many symbols are there are columns in matrix A.
The algorithm used here is Gauss-Jordan elimination, which
results, after elimination, in a row echelon form matrix.
Returns
=======
A FiniteSet containing an ordered tuple of values for the
unknowns for which the `system` has a solution. (Wrapping
the tuple in FiniteSet is used to maintain a consistent
output format throughout solveset.)
Returns EmptySet, if the linear system is inconsistent.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
Examples
========
>>> from sympy import Matrix, linsolve, symbols
>>> x, y, z = symbols("x, y, z")
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> b = Matrix([3, 6, 9])
>>> A
Matrix([
[1, 2, 3],
[4, 5, 6],
[7, 8, 10]])
>>> b
Matrix([
[3],
[6],
[9]])
>>> linsolve((A, b), [x, y, z])
FiniteSet((-1, 2, 0))
* Parametric Solution: In case the system is underdetermined, the
function will return a parametric solution in terms of the given
symbols. Those that are free will be returned unchanged. e.g. in
the system below, `z` is returned as the solution for variable z;
it can take on any value.
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = Matrix([3, 6, 9])
>>> linsolve((A, b), x, y, z)
FiniteSet((z - 1, 2 - 2*z, z))
If no symbols are given, internally generated symbols will be used.
The `tau0` in the 3rd position indicates (as before) that the 3rd
variable -- whatever it's named -- can take on any value:
>>> linsolve((A, b))
FiniteSet((tau0 - 1, 2 - 2*tau0, tau0))
* List of Equations as input
>>> Eqns = [3*x + 2*y - z - 1, 2*x - 2*y + 4*z + 2, - x + y/2 - z]
>>> linsolve(Eqns, x, y, z)
FiniteSet((1, -2, -2))
* Augmented Matrix as input
>>> aug = Matrix([[2, 1, 3, 1], [2, 6, 8, 3], [6, 8, 18, 5]])
>>> aug
Matrix([
[2, 1, 3, 1],
[2, 6, 8, 3],
[6, 8, 18, 5]])
>>> linsolve(aug, x, y, z)
FiniteSet((3/10, 2/5, 0))
* Solve for symbolic coefficients
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> linsolve(eqns, x, y)
FiniteSet(((-b*f + c*e)/(a*e - b*d), (a*f - c*d)/(a*e - b*d)))
* A degenerate system returns solution as set of given
symbols.
>>> system = Matrix(([0, 0, 0], [0, 0, 0], [0, 0, 0]))
>>> linsolve(system, x, y)
FiniteSet((x, y))
* For an empty system linsolve returns empty set
>>> linsolve([], x)
EmptySet
* An error is raised if, after expansion, any nonlinearity
is detected:
>>> linsolve([x*(1/x - 1), (y - 1)**2 - y**2 + 1], x, y)
FiniteSet((1, 1))
>>> linsolve([x**2 - 1], x)
Traceback (most recent call last):
...
NonlinearError:
nonlinear term encountered: x**2
"""
if not system:
return S.EmptySet
# If second argument is an iterable
if symbols and hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
sym_gen = isinstance(symbols, GeneratorType)
b = None # if we don't get b the input was bad
syms_needed_msg = None
# unpack system
if hasattr(system, '__iter__'):
# 1). (A, b)
if len(system) == 2 and isinstance(system[0], MatrixBase):
A, b = system
# 2). (eq1, eq2, ...)
if not isinstance(system[0], MatrixBase):
if sym_gen or not symbols:
raise ValueError(filldedent('''
When passing a system of equations, the explicit
symbols for which a solution is being sought must
be given as a sequence, too.
'''))
eqs = system
try:
eqs, ring = sympy_eqs_to_ring(eqs, symbols)
except PolynomialError as exc:
# e.g. cos(x) contains an element of the set of generators
raise NonlinearError(str(exc))
try:
sol = solve_lin_sys(eqs, ring, _raw=False)
except PolyNonlinearError as exc:
raise NonlinearError(str(exc))
if sol is None:
return S.EmptySet
sol = FiniteSet(Tuple(*(sol.get(sym, sym) for sym in symbols)))
return sol
elif isinstance(system, MatrixBase) and not (
symbols and not isinstance(symbols, GeneratorType) and
isinstance(symbols[0], MatrixBase)):
# 3). A augmented with b
A, b = system[:, :-1], system[:, -1:]
if b is None:
raise ValueError("Invalid arguments")
syms_needed_msg = syms_needed_msg or 'columns of A'
if sym_gen:
symbols = [next(symbols) for i in range(A.cols)]
if any(set(symbols) & (A.free_symbols | b.free_symbols)):
raise ValueError(filldedent('''
At least one of the symbols provided
already appears in the system to be solved.
One way to avoid this is to use Dummy symbols in
the generator, e.g. numbered_symbols('%s', cls=Dummy)
''' % symbols[0].name.rstrip('1234567890')))
if not symbols:
symbols = [Dummy() for _ in range(A.cols)]
name = _uniquely_named_symbol('tau', (A, b),
compare=lambda i: str(i).rstrip('1234567890')).name
gen = numbered_symbols(name)
else:
gen = None
# This is just a wrapper for solve_lin_sys
eqs = []
rows = A.tolist()
for rowi, bi in zip(rows, b):
terms = [elem * sym for elem, sym in zip(rowi, symbols) if elem]
terms.append(-bi)
eqs.append(Add(*terms))
eqs, ring = sympy_eqs_to_ring(eqs, symbols)
sol = solve_lin_sys(eqs, ring, _raw=False)
if sol is None:
return S.EmptySet
#sol = {sym:val for sym, val in sol.items() if sym != val}
sol = FiniteSet(Tuple(*(sol.get(sym, sym) for sym in symbols)))
if gen is not None:
solsym = sol.free_symbols
rep = {sym: next(gen) for sym in symbols if sym in solsym}
sol = sol.subs(rep)
return sol
##############################################################################
# ------------------------------nonlinsolve ---------------------------------#
##############################################################################
def _return_conditionset(eqs, symbols):
# return conditionset
eqs = (Eq(lhs, 0) for lhs in eqs)
condition_set = ConditionSet(
Tuple(*symbols), And(*eqs), S.Complexes**len(symbols))
return condition_set
def substitution(system, symbols, result=[{}], known_symbols=[],
exclude=[], all_symbols=None):
r"""
Solves the `system` using substitution method. It is used in
`nonlinsolve`. This will be called from `nonlinsolve` when any
equation(s) is non polynomial equation.
Parameters
==========
system : list of equations
The target system of equations
symbols : list of symbols to be solved.
The variable(s) for which the system is solved
known_symbols : list of solved symbols
Values are known for these variable(s)
result : An empty list or list of dict
If No symbol values is known then empty list otherwise
symbol as keys and corresponding value in dict.
exclude : Set of expression.
Mostly denominator expression(s) of the equations of the system.
Final solution should not satisfy these expressions.
all_symbols : known_symbols + symbols(unsolved).
Returns
=======
A FiniteSet of ordered tuple of values of `all_symbols` for which the
`system` has solution. Order of values in the tuple is same as symbols
present in the parameter `all_symbols`. If parameter `all_symbols` is None
then same as symbols present in the parameter `symbols`.
Please note that general FiniteSet is unordered, the solution returned
here is not simply a FiniteSet of solutions, rather it is a FiniteSet of
ordered tuple, i.e. the first & only argument to FiniteSet is a tuple of
solutions, which is ordered, & hence the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper `{}` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not `Symbol` type.
Examples
========
>>> from sympy.core.symbol import symbols
>>> x, y = symbols('x, y', real=True)
>>> from sympy.solvers.solveset import substitution
>>> substitution([x + y], [x], [{y: 1}], [y], set([]), [x, y])
FiniteSet((-1, 1))
* when you want soln should not satisfy eq `x + 1 = 0`
>>> substitution([x + y], [x], [{y: 1}], [y], set([x + 1]), [y, x])
EmptySet
>>> substitution([x + y], [x], [{y: 1}], [y], set([x - 1]), [y, x])
FiniteSet((1, -1))
>>> substitution([x + y - 1, y - x**2 + 5], [x, y])
FiniteSet((-3, 4), (2, -1))
* Returns both real and complex solution
>>> x, y, z = symbols('x, y, z')
>>> from sympy import exp, sin
>>> substitution([exp(x) - sin(y), y**2 - 4], [x, y])
FiniteSet((ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2),
(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2))
>>> eqs = [z**2 + exp(2*x) - sin(y), -3 + exp(-y)]
>>> substitution(eqs, [y, z])
FiniteSet((-log(3), sqrt(-exp(2*x) - sin(log(3)))),
(-log(3), -sqrt(-exp(2*x) - sin(log(3)))),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers)),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, -sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers)))
"""
from sympy import Complement
from sympy.core.compatibility import is_sequence
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if not is_sequence(symbols):
msg = ('symbols should be given as a sequence, e.g. a list.'
'Not type %s: %s')
raise TypeError(filldedent(msg % (type(symbols), symbols)))
if not getattr(symbols[0], 'is_Symbol', False):
msg = ('Iterable of symbols must be given as '
'second argument, not type %s: %s')
raise ValueError(filldedent(msg % (type(symbols[0]), symbols[0])))
# By default `all_symbols` will be same as `symbols`
if all_symbols is None:
all_symbols = symbols
old_result = result
# storing complements and intersection for particular symbol
complements = {}
intersections = {}
# when total_solveset_call equals total_conditionset
# it means that solveset failed to solve all eqs.
total_conditionset = -1
total_solveset_call = -1
def _unsolved_syms(eq, sort=False):
"""Returns the unsolved symbol present
in the equation `eq`.
"""
free = eq.free_symbols
unsolved = (free - set(known_symbols)) & set(all_symbols)
if sort:
unsolved = list(unsolved)
unsolved.sort(key=default_sort_key)
return unsolved
# end of _unsolved_syms()
# sort such that equation with the fewest potential symbols is first.
# means eq with less number of variable first in the list.
eqs_in_better_order = list(
ordered(system, lambda _: len(_unsolved_syms(_))))
def add_intersection_complement(result, intersection_dict, complement_dict):
# If solveset has returned some intersection/complement
# for any symbol, it will be added in the final solution.
final_result = []
for res in result:
res_copy = res
for key_res, value_res in res.items():
intersect_set, complement_set = None, None
for key_sym, value_sym in intersection_dict.items():
if key_sym == key_res:
intersect_set = value_sym
for key_sym, value_sym in complement_dict.items():
if key_sym == key_res:
complement_set = value_sym
if intersect_set or complement_set:
new_value = FiniteSet(value_res)
if intersect_set and intersect_set != S.Complexes:
new_value = Intersection(new_value, intersect_set)
if complement_set:
new_value = Complement(new_value, complement_set)
if new_value is S.EmptySet:
res_copy = None
break
elif new_value.is_FiniteSet and len(new_value) == 1:
res_copy[key_res] = set(new_value).pop()
else:
res_copy[key_res] = new_value
if res_copy is not None:
final_result.append(res_copy)
return final_result
# end of def add_intersection_complement()
def _extract_main_soln(sym, sol, soln_imageset):
"""Separate the Complements, Intersections, ImageSet lambda expr
and its base_set.
"""
# if there is union, then need to check
# Complement, Intersection, Imageset.
# Order should not be changed.
if isinstance(sol, Complement):
# extract solution and complement
complements[sym] = sol.args[1]
sol = sol.args[0]
# complement will be added at the end
# using `add_intersection_complement` method
if isinstance(sol, Intersection):
# Interval/Set will be at 0th index always
if sol.args[0] not in (S.Reals, S.Complexes):
# Sometimes solveset returns soln with intersection
# S.Reals or S.Complexes. We don't consider that
# intersection.
intersections[sym] = sol.args[0]
sol = sol.args[1]
# after intersection and complement Imageset should
# be checked.
if isinstance(sol, ImageSet):
soln_imagest = sol
expr2 = sol.lamda.expr
sol = FiniteSet(expr2)
soln_imageset[expr2] = soln_imagest
# if there is union of Imageset or other in soln.
# no testcase is written for this if block
if isinstance(sol, Union):
sol_args = sol.args
sol = S.EmptySet
# We need in sequence so append finteset elements
# and then imageset or other.
for sol_arg2 in sol_args:
if isinstance(sol_arg2, FiniteSet):
sol += sol_arg2
else:
# ImageSet, Intersection, complement then
# append them directly
sol += FiniteSet(sol_arg2)
if not isinstance(sol, FiniteSet):
sol = FiniteSet(sol)
return sol, soln_imageset
# end of def _extract_main_soln()
# helper function for _append_new_soln
def _check_exclude(rnew, imgset_yes):
rnew_ = rnew
if imgset_yes:
# replace all dummy variables (Imageset lambda variables)
# with zero before `checksol`. Considering fundamental soln
# for `checksol`.
rnew_copy = rnew.copy()
dummy_n = imgset_yes[0]
for key_res, value_res in rnew_copy.items():
rnew_copy[key_res] = value_res.subs(dummy_n, 0)
rnew_ = rnew_copy
# satisfy_exclude == true if it satisfies the expr of `exclude` list.
try:
# something like : `Mod(-log(3), 2*I*pi)` can't be
# simplified right now, so `checksol` returns `TypeError`.
# when this issue is fixed this try block should be
# removed. Mod(-log(3), 2*I*pi) == -log(3)
satisfy_exclude = any(
checksol(d, rnew_) for d in exclude)
except TypeError:
satisfy_exclude = None
return satisfy_exclude
# end of def _check_exclude()
# helper function for _append_new_soln
def _restore_imgset(rnew, original_imageset, newresult):
restore_sym = set(rnew.keys()) & \
set(original_imageset.keys())
for key_sym in restore_sym:
img = original_imageset[key_sym]
rnew[key_sym] = img
if rnew not in newresult:
newresult.append(rnew)
# end of def _restore_imgset()
def _append_eq(eq, result, res, delete_soln, n=None):
u = Dummy('u')
if n:
eq = eq.subs(n, 0)
satisfy = checksol(u, u, eq, minimal=True)
if satisfy is False:
delete_soln = True
res = {}
else:
result.append(res)
return result, res, delete_soln
def _append_new_soln(rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult, eq=None):
"""If `rnew` (A dict <symbol: soln>) contains valid soln
append it to `newresult` list.
`imgset_yes` is (base, dummy_var) if there was imageset in previously
calculated result(otherwise empty tuple). `original_imageset` is dict
of imageset expr and imageset from this result.
`soln_imageset` dict of imageset expr and imageset of new soln.
"""
satisfy_exclude = _check_exclude(rnew, imgset_yes)
delete_soln = False
# soln should not satisfy expr present in `exclude` list.
if not satisfy_exclude:
local_n = None
# if it is imageset
if imgset_yes:
local_n = imgset_yes[0]
base = imgset_yes[1]
if sym and sol:
# when `sym` and `sol` is `None` means no new
# soln. In that case we will append rnew directly after
# substituting original imagesets in rnew values if present
# (second last line of this function using _restore_imgset)
dummy_list = list(sol.atoms(Dummy))
# use one dummy `n` which is in
# previous imageset
local_n_list = [
local_n for i in range(
0, len(dummy_list))]
dummy_zip = zip(dummy_list, local_n_list)
lam = Lambda(local_n, sol.subs(dummy_zip))
rnew[sym] = ImageSet(lam, base)
if eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln, local_n)
elif eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln)
elif soln_imageset:
rnew[sym] = soln_imageset[sol]
# restore original imageset
_restore_imgset(rnew, original_imageset, newresult)
else:
newresult.append(rnew)
elif satisfy_exclude:
delete_soln = True
rnew = {}
_restore_imgset(rnew, original_imageset, newresult)
return newresult, delete_soln
# end of def _append_new_soln()
def _new_order_result(result, eq):
# separate first, second priority. `res` that makes `eq` value equals
# to zero, should be used first then other result(second priority).
# If it is not done then we may miss some soln.
first_priority = []
second_priority = []
for res in result:
if not any(isinstance(val, ImageSet) for val in res.values()):
if eq.subs(res) == 0:
first_priority.append(res)
else:
second_priority.append(res)
if first_priority or second_priority:
return first_priority + second_priority
return result
def _solve_using_known_values(result, solver):
"""Solves the system using already known solution
(result contains the dict <symbol: value>).
solver is `solveset_complex` or `solveset_real`.
"""
# stores imageset <expr: imageset(Lambda(n, expr), base)>.
soln_imageset = {}
total_solvest_call = 0
total_conditionst = 0
# sort such that equation with the fewest potential symbols is first.
# means eq with less variable first
for index, eq in enumerate(eqs_in_better_order):
newresult = []
original_imageset = {}
# if imageset expr is used to solve other symbol
imgset_yes = False
result = _new_order_result(result, eq)
for res in result:
got_symbol = set() # symbols solved in one iteration
if soln_imageset:
# find the imageset and use its expr.
for key_res, value_res in res.items():
if isinstance(value_res, ImageSet):
res[key_res] = value_res.lamda.expr
original_imageset[key_res] = value_res
dummy_n = value_res.lamda.expr.atoms(Dummy).pop()
(base,) = value_res.base_sets
imgset_yes = (dummy_n, base)
# update eq with everything that is known so far
eq2 = eq.subs(res).expand()
unsolved_syms = _unsolved_syms(eq2, sort=True)
if not unsolved_syms:
if res:
newresult, delete_res = _append_new_soln(
res, None, None, imgset_yes, soln_imageset,
original_imageset, newresult, eq2)
if delete_res:
# `delete_res` is true, means substituting `res` in
# eq2 doesn't return `zero` or deleting the `res`
# (a soln) since it staisfies expr of `exclude`
# list.
result.remove(res)
continue # skip as it's independent of desired symbols
depen1, depen2 = (eq2.rewrite(Add)).as_independent(*unsolved_syms)
if (depen1.has(Abs) or depen2.has(Abs)) and solver == solveset_complex:
# Absolute values cannot be inverted in the
# complex domain
continue
soln_imageset = {}
for sym in unsolved_syms:
not_solvable = False
try:
soln = solver(eq2, sym)
total_solvest_call += 1
soln_new = S.EmptySet
if isinstance(soln, Complement):
# separate solution and complement
complements[sym] = soln.args[1]
soln = soln.args[0]
# complement will be added at the end
if isinstance(soln, Intersection):
# Interval will be at 0th index always
if soln.args[0] != Interval(-oo, oo):
# sometimes solveset returns soln
# with intersection S.Reals, to confirm that
# soln is in domain=S.Reals
intersections[sym] = soln.args[0]
soln_new += soln.args[1]
soln = soln_new if soln_new else soln
if index > 0 and solver == solveset_real:
# one symbol's real soln , another symbol may have
# corresponding complex soln.
if not isinstance(soln, (ImageSet, ConditionSet)):
soln += solveset_complex(eq2, sym)
except NotImplementedError:
# If sovleset is not able to solve equation `eq2`. Next
# time we may get soln using next equation `eq2`
continue
if isinstance(soln, ConditionSet):
soln = S.EmptySet
# don't do `continue` we may get soln
# in terms of other symbol(s)
not_solvable = True
total_conditionst += 1
if soln is not S.EmptySet:
soln, soln_imageset = _extract_main_soln(
sym, soln, soln_imageset)
for sol in soln:
# sol is not a `Union` since we checked it
# before this loop
sol, soln_imageset = _extract_main_soln(
sym, sol, soln_imageset)
sol = set(sol).pop()
free = sol.free_symbols
if got_symbol and any([
ss in free for ss in got_symbol
]):
# sol depends on previously solved symbols
# then continue
continue
rnew = res.copy()
# put each solution in res and append the new result
# in the new result list (solution for symbol `s`)
# along with old results.
for k, v in res.items():
if isinstance(v, Expr):
# if any unsolved symbol is present
# Then subs known value
rnew[k] = v.subs(sym, sol)
# and add this new solution
if soln_imageset:
# replace all lambda variables with 0.
imgst = soln_imageset[sol]
rnew[sym] = imgst.lamda(
*[0 for i in range(0, len(
imgst.lamda.variables))])
else:
rnew[sym] = sol
newresult, delete_res = _append_new_soln(
rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult)
if delete_res:
# deleting the `res` (a soln) since it staisfies
# eq of `exclude` list
result.remove(res)
# solution got for sym
if not not_solvable:
got_symbol.add(sym)
# next time use this new soln
if newresult:
result = newresult
return result, total_solvest_call, total_conditionst
# end def _solve_using_know_values()
new_result_real, solve_call1, cnd_call1 = _solve_using_known_values(
old_result, solveset_real)
new_result_complex, solve_call2, cnd_call2 = _solve_using_known_values(
old_result, solveset_complex)
# when `total_solveset_call` is equals to `total_conditionset`
# means solvest fails to solve all the eq.
# return conditionset in this case
total_conditionset += (cnd_call1 + cnd_call2)
total_solveset_call += (solve_call1 + solve_call2)
if total_conditionset == total_solveset_call and total_solveset_call != -1:
return _return_conditionset(eqs_in_better_order, all_symbols)
# overall result
result = new_result_real + new_result_complex
result_all_variables = []
result_infinite = []
for res in result:
if not res:
# means {None : None}
continue
# If length < len(all_symbols) means infinite soln.
# Some or all the soln is dependent on 1 symbol.
# eg. {x: y+2} then final soln {x: y+2, y: y}
if len(res) < len(all_symbols):
solved_symbols = res.keys()
unsolved = list(filter(
lambda x: x not in solved_symbols, all_symbols))
for unsolved_sym in unsolved:
res[unsolved_sym] = unsolved_sym
result_infinite.append(res)
if res not in result_all_variables:
result_all_variables.append(res)
if result_infinite:
# we have general soln
# eg : [{x: -1, y : 1}, {x : -y , y: y}] then
# return [{x : -y, y : y}]
result_all_variables = result_infinite
if intersections or complements:
result_all_variables = add_intersection_complement(
result_all_variables, intersections, complements)
# convert to ordered tuple
result = S.EmptySet
for r in result_all_variables:
temp = [r[symb] for symb in all_symbols]
result += FiniteSet(tuple(temp))
return result
# end of def substitution()
def _solveset_work(system, symbols):
soln = solveset(system[0], symbols[0])
if isinstance(soln, FiniteSet):
_soln = FiniteSet(*[tuple((s,)) for s in soln])
return _soln
else:
return FiniteSet(tuple(FiniteSet(soln)))
def _handle_positive_dimensional(polys, symbols, denominators):
from sympy.polys.polytools import groebner
# substitution method where new system is groebner basis of the system
_symbols = list(symbols)
_symbols.sort(key=default_sort_key)
basis = groebner(polys, _symbols, polys=True)
new_system = []
for poly_eq in basis:
new_system.append(poly_eq.as_expr())
result = [{}]
result = substitution(
new_system, symbols, result, [],
denominators)
return result
# end of def _handle_positive_dimensional()
def _handle_zero_dimensional(polys, symbols, system):
# solve 0 dimensional poly system using `solve_poly_system`
result = solve_poly_system(polys, *symbols)
# May be some extra soln is added because
# we used `unrad` in `_separate_poly_nonpoly`, so
# need to check and remove if it is not a soln.
result_update = S.EmptySet
for res in result:
dict_sym_value = dict(list(zip(symbols, res)))
if all(checksol(eq, dict_sym_value) for eq in system):
result_update += FiniteSet(res)
return result_update
# end of def _handle_zero_dimensional()
def _separate_poly_nonpoly(system, symbols):
polys = []
polys_expr = []
nonpolys = []
denominators = set()
poly = None
for eq in system:
# Store denom expression if it contains symbol
denominators.update(_simple_dens(eq, symbols))
# try to remove sqrt and rational power
without_radicals = unrad(simplify(eq))
if without_radicals:
eq_unrad, cov = without_radicals
if not cov:
eq = eq_unrad
if isinstance(eq, Expr):
eq = eq.as_numer_denom()[0]
poly = eq.as_poly(*symbols, extension=True)
elif simplify(eq).is_number:
continue
if poly is not None:
polys.append(poly)
polys_expr.append(poly.as_expr())
else:
nonpolys.append(eq)
return polys, polys_expr, nonpolys, denominators
# end of def _separate_poly_nonpoly()
def nonlinsolve(system, *symbols):
r"""
Solve system of N nonlinear equations with M variables, which means both
under and overdetermined systems are supported. Positive dimensional
system is also supported (A system with infinitely many solutions is said
to be positive-dimensional). In Positive dimensional system solution will
be dependent on at least one symbol. Returns both real solution
and complex solution(If system have). The possible number of solutions
is zero, one or infinite.
Parameters
==========
system : list of equations
The target system of equations
symbols : list of Symbols
symbols should be given as a sequence eg. list
Returns
=======
A FiniteSet of ordered tuple of values of `symbols` for which the `system`
has solution. Order of values in the tuple is same as symbols present in
the parameter `symbols`.
Please note that general FiniteSet is unordered, the solution returned
here is not simply a FiniteSet of solutions, rather it is a FiniteSet of
ordered tuple, i.e. the first & only argument to FiniteSet is a tuple of
solutions, which is ordered, & hence the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper `{}` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
For the given set of Equations, the respective input types
are given below:
.. math:: x*y - 1 = 0
.. math:: 4*x**2 + y**2 - 5 = 0
`system = [x*y - 1, 4*x**2 + y**2 - 5]`
`symbols = [x, y]`
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not `Symbol` type.
Examples
========
>>> from sympy.core.symbol import symbols
>>> from sympy.solvers.solveset import nonlinsolve
>>> x, y, z = symbols('x, y, z', real=True)
>>> nonlinsolve([x*y - 1, 4*x**2 + y**2 - 5], [x, y])
FiniteSet((-1, -1), (-1/2, -2), (1/2, 2), (1, 1))
1. Positive dimensional system and complements:
>>> from sympy import pprint
>>> from sympy.polys.polytools import is_zero_dimensional
>>> a, b, c, d = symbols('a, b, c, d', extended_real=True)
>>> eq1 = a + b + c + d
>>> eq2 = a*b + b*c + c*d + d*a
>>> eq3 = a*b*c + b*c*d + c*d*a + d*a*b
>>> eq4 = a*b*c*d - 1
>>> system = [eq1, eq2, eq3, eq4]
>>> is_zero_dimensional(system)
False
>>> pprint(nonlinsolve(system, [a, b, c, d]), use_unicode=False)
-1 1 1 -1
{(---, -d, -, {d} \ {0}), (-, -d, ---, {d} \ {0})}
d d d d
>>> nonlinsolve([(x+y)**2 - 4, x + y - 2], [x, y])
FiniteSet((2 - y, y))
2. If some of the equations are non-polynomial then `nonlinsolve`
will call the `substitution` function and return real and complex solutions,
if present.
>>> from sympy import exp, sin
>>> nonlinsolve([exp(x) - sin(y), y**2 - 4], [x, y])
FiniteSet((ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2),
(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2))
3. If system is non-linear polynomial and zero-dimensional then it
returns both solution (real and complex solutions, if present) using
`solve_poly_system`:
>>> from sympy import sqrt
>>> nonlinsolve([x**2 - 2*y**2 -2, x*y - 2], [x, y])
FiniteSet((-2, -1), (2, 1), (-sqrt(2)*I, sqrt(2)*I), (sqrt(2)*I, -sqrt(2)*I))
4. `nonlinsolve` can solve some linear (zero or positive dimensional)
system (because it uses the `groebner` function to get the
groebner basis and then uses the `substitution` function basis as the
new `system`). But it is not recommended to solve linear system using
`nonlinsolve`, because `linsolve` is better for general linear systems.
>>> nonlinsolve([x + 2*y -z - 3, x - y - 4*z + 9 , y + z - 4], [x, y, z])
FiniteSet((3*z - 5, 4 - z, z))
5. System having polynomial equations and only real solution is
solved using `solve_poly_system`:
>>> e1 = sqrt(x**2 + y**2) - 10
>>> e2 = sqrt(y**2 + (-x + 10)**2) - 3
>>> nonlinsolve((e1, e2), (x, y))
FiniteSet((191/20, -3*sqrt(391)/20), (191/20, 3*sqrt(391)/20))
>>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [x, y])
FiniteSet((1, 2), (1 - sqrt(5), 2 + sqrt(5)), (1 + sqrt(5), 2 - sqrt(5)))
>>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [y, x])
FiniteSet((2, 1), (2 - sqrt(5), 1 + sqrt(5)), (2 + sqrt(5), 1 - sqrt(5)))
6. It is better to use symbols instead of Trigonometric Function or
Function (e.g. replace `sin(x)` with symbol, replace `f(x)` with symbol
and so on. Get soln from `nonlinsolve` and then using `solveset` get
the value of `x`)
How nonlinsolve is better than old solver `_solve_system` :
===========================================================
1. A positive dimensional system solver : nonlinsolve can return
solution for positive dimensional system. It finds the
Groebner Basis of the positive dimensional system(calling it as
basis) then we can start solving equation(having least number of
variable first in the basis) using solveset and substituting that
solved solutions into other equation(of basis) to get solution in
terms of minimum variables. Here the important thing is how we
are substituting the known values and in which equations.
2. Real and Complex both solutions : nonlinsolve returns both real
and complex solution. If all the equations in the system are polynomial
then using `solve_poly_system` both real and complex solution is returned.
If all the equations in the system are not polynomial equation then goes to
`substitution` method with this polynomial and non polynomial equation(s),
to solve for unsolved variables. Here to solve for particular variable
solveset_real and solveset_complex is used. For both real and complex
solution function `_solve_using_know_values` is used inside `substitution`
function.(`substitution` function will be called when there is any non
polynomial equation(s) is present). When solution is valid then add its
general solution in the final result.
3. Complement and Intersection will be added if any : nonlinsolve maintains
dict for complements and Intersections. If solveset find complements or/and
Intersection with any Interval or set during the execution of
`substitution` function ,then complement or/and Intersection for that
variable is added before returning final solution.
"""
from sympy.polys.polytools import is_zero_dimensional
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
if not is_sequence(symbols) or not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise IndexError(filldedent(msg))
system, symbols, swap = recast_to_symbols(system, symbols)
if swap:
soln = nonlinsolve(system, symbols)
return FiniteSet(*[tuple(i.xreplace(swap) for i in s) for s in soln])
if len(system) == 1 and len(symbols) == 1:
return _solveset_work(system, symbols)
# main code of def nonlinsolve() starts from here
polys, polys_expr, nonpolys, denominators = _separate_poly_nonpoly(
system, symbols)
if len(symbols) == len(polys):
# If all the equations in the system are poly
if is_zero_dimensional(polys, symbols):
# finite number of soln (Zero dimensional system)
try:
return _handle_zero_dimensional(polys, symbols, system)
except NotImplementedError:
# Right now it doesn't fail for any polynomial system of
# equation. If `solve_poly_system` fails then `substitution`
# method will handle it.
result = substitution(
polys_expr, symbols, exclude=denominators)
return result
# positive dimensional system
res = _handle_positive_dimensional(polys, symbols, denominators)
if res is EmptySet and any(not p.domain.is_Exact for p in polys):
raise NotImplementedError("Equation not in exact domain. Try converting to rational")
else:
return res
else:
# If all the equations are not polynomial.
# Use `substitution` method for the system
result = substitution(
polys_expr + nonpolys, symbols, exclude=denominators)
return result
|
e550a4452e5c95ebe56244e1588b39c16741db74e8a21a9287ce29fd39236fcb | """
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from itertools import combinations_with_replacement
from sympy.simplify import simplify # type: ignore
from sympy.core import Add, S
from sympy.core.compatibility import reduce, is_sequence
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func, hint=hint, simplify=True,
type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'], hints['func'],
hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimizes the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, *, prep=True, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), _xi_1, 4*x - 3*y))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
# Try solving for the function
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
# try direct substitution of the solution into the PDE and simplify
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form:
.. math::
f(x, y) = F(- a y + b x ) e^{- \frac{c (a x + b y)}{a^2 + b^2}}
and can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y}
+ c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is:
.. math::
f(x, y) = \left. \left[F(\eta) + \frac{1}{a^2 + b^2}
\int\limits^{a x + b y} G\left(\frac{a \xi + b \eta}{a^2 + b^2},
\frac{- a \eta + b \xi}{a^2 + b^2} \right)
e^{\frac{c \xi}{a^2 + b^2}}\, d\xi\right]
e^{- \frac{c \xi}{a^2 + b^2}}
\right|_{\substack{\eta=- a y + b x\\ \xi=a x + b y }}\, ,
where `F(\eta)` is an arbitrary single-valued function. The solution
can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u - G(x,y)
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// a*x + b*y \
|| / |
|| | |
|| | c*xi |
|| | ------- |
|| | 2 2 |
|| | /a*xi + b*eta -a*eta + b*xi\ a + b |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ a + b a + b / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ a + b /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-c*xi ||
-------||
2 2||
a + b ||
e ||
||
/|eta=-a*y + b*x, xi=a*x + b*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial
differential equation is
.. math:: a(x, y) \frac{\partial f(x, y)}{\partial x}
+ b(x, y) \frac{\partial f(x, y)}{\partial y}
+ c(x, y) f(x, y) = G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary
functions in `x` and `y`. This PDE is converted into an ODE by
making the following transformation:
1. `\xi` as `x`
2. `\eta` as the constant in the solution to the differential
equation `\frac{dy}{dx} = -\frac{b}{a}`
Making the previous substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - G(\xi, \eta) = 0
which can be solved using ``dsolve``.
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
for key, sym in enumerate(syms):
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), (x, 2))/X(x), Derivative(T(t), (t, 2))/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
raise ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs, 0), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), (x, 2))/X(x), Derivative(Y(y), (y, 2))/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
|
41628ff36fc8874595494c5e1678fcbb8aae32012e79e69320be404666a9dd9e | """Utility functions for classifying and solving
ordinary and partial differential equations.
Contains
========
_preprocess
ode_order
_desolve
"""
from sympy.core import Pow
from sympy.core.function import Derivative, AppliedUndef
from sympy.core.relational import Equality
from sympy.core.symbol import Wild
def _preprocess(expr, func=None, hint='_Integral'):
"""Prepare expr for solving by making sure that differentiation
is done so that only func remains in unevaluated derivatives and
(if hint doesn't end with _Integral) that doit is applied to all
other derivatives. If hint is None, don't do any differentiation.
(Currently this may cause some simple differential equations to
fail.)
In case func is None, an attempt will be made to autodetect the
function to be solved for.
>>> from sympy.solvers.deutils import _preprocess
>>> from sympy import Derivative, Function
>>> from sympy.abc import x, y, z
>>> f, g = map(Function, 'fg')
If f(x)**p == 0 and p>0 then we can solve for f(x)=0
>>> _preprocess((f(x).diff(x)-4)**5, f(x))
(Derivative(f(x), x) - 4, f(x))
Apply doit to derivatives that contain more than the function
of interest:
>>> _preprocess(Derivative(f(x) + x, x))
(Derivative(f(x), x) + 1, f(x))
Do others if the differentiation variable(s) intersect with those
of the function of interest or contain the function of interest:
>>> _preprocess(Derivative(g(x), y, z), f(y))
(0, f(y))
>>> _preprocess(Derivative(f(y), z), f(y))
(0, f(y))
Do others if the hint doesn't end in '_Integral' (the default
assumes that it does):
>>> _preprocess(Derivative(g(x), y), f(x))
(Derivative(g(x), y), f(x))
>>> _preprocess(Derivative(f(x), y), f(x), hint='')
(0, f(x))
Don't do any derivatives if hint is None:
>>> eq = Derivative(f(x) + 1, x) + Derivative(f(x), y)
>>> _preprocess(eq, f(x), hint=None)
(Derivative(f(x) + 1, x) + Derivative(f(x), y), f(x))
If it's not clear what the function of interest is, it must be given:
>>> eq = Derivative(f(x) + g(x), x)
>>> _preprocess(eq, g(x))
(Derivative(f(x), x) + Derivative(g(x), x), g(x))
>>> try: _preprocess(eq)
... except ValueError: print("A ValueError was raised.")
A ValueError was raised.
"""
if isinstance(expr, Pow):
# if f(x)**p=0 then f(x)=0 (p>0)
if (expr.exp).is_positive:
expr = expr.base
derivs = expr.atoms(Derivative)
if not func:
funcs = set().union(*[d.atoms(AppliedUndef) for d in derivs])
if len(funcs) != 1:
raise ValueError('The function cannot be '
'automatically detected for %s.' % expr)
func = funcs.pop()
fvars = set(func.args)
if hint is None:
return expr, func
reps = [(d, d.doit()) for d in derivs if not hint.endswith('_Integral') or
d.has(func) or set(d.variables) & fvars]
eq = expr.subs(reps)
return eq, func
def ode_order(expr, func):
"""
Returns the order of a given differential
equation with respect to func.
This function is implemented recursively.
Examples
========
>>> from sympy import Function
>>> from sympy.solvers.deutils import ode_order
>>> from sympy.abc import x
>>> f, g = map(Function, ['f', 'g'])
>>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +
... f(x).diff(x), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))
3
"""
a = Wild('a', exclude=[func])
if expr.match(a):
return 0
if isinstance(expr, Derivative):
if expr.args[0] == func:
return len(expr.variables)
else:
order = 0
for arg in expr.args[0].args:
order = max(order, ode_order(arg, func) + len(expr.variables))
return order
else:
order = 0
for arg in expr.args:
order = max(order, ode_order(arg, func))
return order
def _desolve(eq, func=None, hint="default", ics=None, simplify=True, *, prep=True, **kwargs):
"""This is a helper function to dsolve and pdsolve in the ode
and pde modules.
If the hint provided to the function is "default", then a dict with
the following keys are returned
'func' - It provides the function for which the differential equation
has to be solved. This is useful when the expression has
more than one function in it.
'default' - The default key as returned by classifier functions in ode
and pde.py
'hint' - The hint given by the user for which the differential equation
is to be solved. If the hint given by the user is 'default',
then the value of 'hint' and 'default' is the same.
'order' - The order of the function as returned by ode_order
'match' - It returns the match as given by the classifier functions, for
the default hint.
If the hint provided to the function is not "default" and is not in
('all', 'all_Integral', 'best'), then a dict with the above mentioned keys
is returned along with the keys which are returned when dict in
classify_ode or classify_pde is set True
If the hint given is in ('all', 'all_Integral', 'best'), then this function
returns a nested dict, with the keys, being the set of classified hints
returned by classifier functions, and the values being the dict of form
as mentioned above.
Key 'eq' is a common key to all the above mentioned hints which returns an
expression if eq given by user is an Equality.
See Also
========
classify_ode(ode.py)
classify_pde(pde.py)
"""
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
# preprocess the equation and find func if not given
if prep or func is None:
eq, func = _preprocess(eq, func)
prep = False
# type is an argument passed by the solve functions in ode and pde.py
# that identifies whether the function caller is an ordinary
# or partial differential equation. Accordingly corresponding
# changes are made in the function.
type = kwargs.get('type', None)
xi = kwargs.get('xi')
eta = kwargs.get('eta')
x0 = kwargs.get('x0', 0)
terms = kwargs.get('n')
if type == 'ode':
from sympy.solvers.ode import classify_ode, allhints
classifier = classify_ode
string = 'ODE '
dummy = ''
elif type == 'pde':
from sympy.solvers.pde import classify_pde, allhints
classifier = classify_pde
string = 'PDE '
dummy = 'p'
# Magic that should only be used internally. Prevents classify_ode from
# being called more than it needs to be by passing its results through
# recursive calls.
if kwargs.get('classify', True):
hints = classifier(eq, func, dict=True, ics=ics, xi=xi, eta=eta,
n=terms, x0=x0, prep=prep)
else:
# Here is what all this means:
#
# hint: The hint method given to _desolve() by the user.
# hints: The dictionary of hints that match the DE, along with other
# information (including the internal pass-through magic).
# default: The default hint to return, the first hint from allhints
# that matches the hint; obtained from classify_ode().
# match: Dictionary containing the match dictionary for each hint
# (the parts of the DE for solving). When going through the
# hints in "all", this holds the match string for the current
# hint.
# order: The order of the DE, as determined by ode_order().
hints = kwargs.get('hint',
{'default': hint,
hint: kwargs['match'],
'order': kwargs['order']})
if not hints['default']:
# classify_ode will set hints['default'] to None if no hints match.
if hint not in allhints and hint != 'default':
raise ValueError("Hint not recognized: " + hint)
elif hint not in hints['ordered_hints'] and hint != 'default':
raise ValueError(string + str(eq) + " does not match hint " + hint)
# If dsolve can't solve the purely algebraic equation then dsolve will raise
# ValueError
elif hints['order'] == 0:
raise ValueError(
str(eq) + " is not a solvable differential equation in " + str(func))
else:
raise NotImplementedError(dummy + "solve" + ": Cannot solve " + str(eq))
if hint == 'default':
return _desolve(eq, func, ics=ics, hint=hints['default'], simplify=simplify,
prep=prep, x0=x0, classify=False, order=hints['order'],
match=hints[hints['default']], xi=xi, eta=eta, n=terms, type=type)
elif hint in ('all', 'all_Integral', 'best'):
retdict = {}
gethints = set(hints) - {'order', 'default', 'ordered_hints'}
if hint == 'all_Integral':
for i in hints:
if i.endswith('_Integral'):
gethints.remove(i[:-len('_Integral')])
# special cases
for k in ["1st_homogeneous_coeff_best", "1st_power_series",
"lie_group", "2nd_power_series_ordinary", "2nd_power_series_regular"]:
if k in gethints:
gethints.remove(k)
for i in gethints:
sol = _desolve(eq, func, ics=ics, hint=i, x0=x0, simplify=simplify, prep=prep,
classify=False, n=terms, order=hints['order'], match=hints[i], type=type)
retdict[i] = sol
retdict['all'] = True
retdict['eq'] = eq
return retdict
elif hint not in allhints: # and hint not in ('default', 'ordered_hints'):
raise ValueError("Hint not recognized: " + hint)
elif hint not in hints:
raise ValueError(string + str(eq) + " does not match hint " + hint)
else:
# Key added to identify the hint needed to solve the equation
hints['hint'] = hint
hints.update({'func': func, 'eq': eq})
return hints
|
fa495579c64fa1f211572a6625d7654afad8c46be569e839f71d86bd4b7b5811 | """Solvers of systems of polynomial equations. """
from sympy.core import S
from sympy.polys import Poly, groebner, roots
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.polys.polyerrors import (ComputationFailed,
PolificationFailed, CoercionFailed)
from sympy.simplify import rcollect
from sympy.utilities import default_sort_key, postfixes
from sympy.utilities.misc import filldedent
class SolveFailed(Exception):
"""Raised when solver's conditions weren't met. """
def solve_poly_system(seq, *gens, **args):
"""
Solve a system of polynomial equations.
Parameters
==========
seq: a list/tuple/set
Listing all the equations that are needed to be solved
gens: generators
generators of the equations in seq for which we want the
solutions
args: Keyword arguments
Special options for solving the equations
Returns
=======
List[Tuple]
A List of tuples. Solutions for symbols that satisfy the
equations listed in seq
Examples
========
>>> from sympy import solve_poly_system
>>> from sympy.abc import x, y
>>> solve_poly_system([x*y - 2*y, 2*y**2 - x**2], x, y)
[(0, 0), (2, -sqrt(2)), (2, sqrt(2))]
"""
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('solve_poly_system', len(seq), exc)
if len(polys) == len(opt.gens) == 2:
f, g = polys
if all(i <= 2 for i in f.degree_list() + g.degree_list()):
try:
return solve_biquadratic(f, g, opt)
except SolveFailed:
pass
return solve_generic(polys, opt)
def solve_biquadratic(f, g, opt):
"""Solve a system of two bivariate quadratic polynomial equations.
Parameters
==========
f: a single Expr or Poly
First equation
g: a single Expr or Poly
Second Equation
opt: an Options object
For specifying keyword arguments and generators
Returns
=======
List[Tuple]
A List of tuples. Solutions for symbols that satisfy the
equations listed in seq.
Examples
========
>>> from sympy.polys import Options, Poly
>>> from sympy.abc import x, y
>>> from sympy.solvers.polysys import solve_biquadratic
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(y**2 - 4 + x, y, x, domain='ZZ')
>>> b = Poly(y*2 + 3*x - 7, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(1/3, 3), (41/27, 11/9)]
>>> a = Poly(y + x**2 - 3, y, x, domain='ZZ')
>>> b = Poly(-y + x - 4, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(7/2 - sqrt(29)/2, -sqrt(29)/2 - 1/2), (sqrt(29)/2 + 7/2, -1/2 + \
sqrt(29)/2)]
"""
G = groebner([f, g])
if len(G) == 1 and G[0].is_ground:
return None
if len(G) != 2:
raise SolveFailed
x, y = opt.gens
p, q = G
if not p.gcd(q).is_ground:
# not 0-dimensional
raise SolveFailed
p = Poly(p, x, expand=False)
p_roots = [rcollect(expr, y) for expr in roots(p).keys()]
q = q.ltrim(-1)
q_roots = list(roots(q).keys())
solutions = []
for q_root in q_roots:
for p_root in p_roots:
solution = (p_root.subs(y, q_root), q_root)
solutions.append(solution)
return sorted(solutions, key=default_sort_key)
def solve_generic(polys, opt):
"""
Solve a generic system of polynomial equations.
Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
set F = { f_1, f_2, ..., f_n } of polynomial equations, using
Groebner basis approach. For now only zero-dimensional systems
are supported, which means F can have at most a finite number
of solutions.
The algorithm works by the fact that, supposing G is the basis
of F with respect to an elimination order (here lexicographic
order is used), G and F generate the same ideal, they have the
same set of solutions. By the elimination property, if G is a
reduced, zero-dimensional Groebner basis, then there exists an
univariate polynomial in G (in its last variable). This can be
solved by computing its roots. Substituting all computed roots
for the last (eliminated) variable in other elements of G, new
polynomial system is generated. Applying the above procedure
recursively, a finite number of solutions can be found.
The ability of finding all solutions by this procedure depends
on the root finding algorithms. If no solutions were found, it
means only that roots() failed, but the system is solvable. To
overcome this difficulty use numerical algorithms instead.
Parameters
==========
polys: a list/tuple/set
Listing all the polynomial equations that are needed to be solved
opt: an Options object
For specifying keyword arguments and generators
Returns
=======
List[Tuple]
A List of tuples. Solutions for symbols that satisfy the
equations listed in seq
References
==========
.. [Buchberger01] B. Buchberger, Groebner Bases: A Short
Introduction for Systems Theorists, In: R. Moreno-Diaz,
B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01,
February, 2001
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties
and Algorithms, Springer, Second Edition, 1997, pp. 112
Examples
========
>>> from sympy.polys import Poly, Options
>>> from sympy.solvers.polysys import solve_generic
>>> from sympy.abc import x, y
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(x - y + 5, x, y, domain='ZZ')
>>> b = Poly(x + y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(-1, 4)]
>>> a = Poly(x - 2*y + 5, x, y, domain='ZZ')
>>> b = Poly(2*x - y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(11/3, 13/3)]
>>> a = Poly(x**2 + y, x, y, domain='ZZ')
>>> b = Poly(x + y*4, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(0, 0), (1/4, -1/16)]
"""
def _is_univariate(f):
"""Returns True if 'f' is univariate in its last variable. """
for monom in f.monoms():
if any(monom[:-1]):
return False
return True
def _subs_root(f, gen, zero):
"""Replace generator with a root so that the result is nice. """
p = f.as_expr({gen: zero})
if f.degree(gen) >= 2:
p = p.expand(deep=False)
return p
def _solve_reduced_system(system, gens, entry=False):
"""Recursively solves reduced polynomial systems. """
if len(system) == len(gens) == 1:
zeros = list(roots(system[0], gens[-1]).keys())
return [(zero,) for zero in zeros]
basis = groebner(system, gens, polys=True)
if len(basis) == 1 and basis[0].is_ground:
if not entry:
return []
else:
return None
univariate = list(filter(_is_univariate, basis))
if len(univariate) == 1:
f = univariate.pop()
else:
raise NotImplementedError(filldedent('''
only zero-dimensional systems supported
(finite number of solutions)
'''))
gens = f.gens
gen = gens[-1]
zeros = list(roots(f.ltrim(gen)).keys())
if not zeros:
return []
if len(basis) == 1:
return [(zero,) for zero in zeros]
solutions = []
for zero in zeros:
new_system = []
new_gens = gens[:-1]
for b in basis[:-1]:
eq = _subs_root(b, gen, zero)
if eq is not S.Zero:
new_system.append(eq)
for solution in _solve_reduced_system(new_system, new_gens):
solutions.append(solution + (zero,))
if solutions and len(solutions[0]) != len(gens):
raise NotImplementedError(filldedent('''
only zero-dimensional systems supported
(finite number of solutions)
'''))
return solutions
try:
result = _solve_reduced_system(polys, opt.gens, entry=True)
except CoercionFailed:
raise NotImplementedError
if result is not None:
return sorted(result, key=default_sort_key)
else:
return None
def solve_triangulated(polys, *gens, **args):
"""
Solve a polynomial system using Gianni-Kalkbrenner algorithm.
The algorithm proceeds by computing one Groebner basis in the ground
domain and then by iteratively computing polynomial factorizations in
appropriately constructed algebraic extensions of the ground domain.
Parameters
==========
polys: a list/tuple/set
Listing all the equations that are needed to be solved
gens: generators
generators of the equations in polys for which we want the
solutions
args: Keyword arguments
Special options for solving the equations
Returns
=======
List[Tuple]
A List of tuples. Solutions for symbols that satisfy the
equations listed in polys
Examples
========
>>> from sympy.solvers.polysys import solve_triangulated
>>> from sympy.abc import x, y, z
>>> F = [x**2 + y + z - 1, x + y**2 + z - 1, x + y + z**2 - 1]
>>> solve_triangulated(F, x, y, z)
[(0, 0, 1), (0, 1, 0), (1, 0, 0)]
References
==========
1. Patrizia Gianni, Teo Mora, Algebraic Solution of System of
Polynomial Equations using Groebner Bases, AAECC-5 on Applied Algebra,
Algebraic Algorithms and Error-Correcting Codes, LNCS 356 247--257, 1989
"""
G = groebner(polys, gens, polys=True)
G = list(reversed(G))
domain = args.get('domain')
if domain is not None:
for i, g in enumerate(G):
G[i] = g.set_domain(domain)
f, G = G[0].ltrim(-1), G[1:]
dom = f.get_domain()
zeros = f.ground_roots()
solutions = set()
for zero in zeros:
solutions.add(((zero,), dom))
var_seq = reversed(gens[:-1])
vars_seq = postfixes(gens[1:])
for var, vars in zip(var_seq, vars_seq):
_solutions = set()
for values, dom in solutions:
H, mapping = [], list(zip(vars, values))
for g in G:
_vars = (var,) + vars
if g.has_only_gens(*_vars) and g.degree(var) != 0:
h = g.ltrim(var).eval(dict(mapping))
if g.degree(var) == h.degree():
H.append(h)
p = min(H, key=lambda h: h.degree())
zeros = p.ground_roots()
for zero in zeros:
if not zero.is_Rational:
dom_zero = dom.algebraic_field(zero)
else:
dom_zero = dom
_solutions.add(((zero,) + values, dom_zero))
solutions = _solutions
solutions = list(solutions)
for i, (solution, _) in enumerate(solutions):
solutions[i] = solution
return sorted(solutions, key=default_sort_key)
|
2e1fd921dca5bc48d487b2c42feb036a6b6763ddafd9cc48952cb3e3e966f952 | """Tools for solving inequalities and systems of inequalities. """
from sympy.core import Symbol, Dummy, sympify
from sympy.core.compatibility import iterable
from sympy.core.exprtools import factor_terms
from sympy.core.relational import Relational, Eq, Ge, Lt
from sympy.sets import Interval
from sympy.sets.sets import FiniteSet, Union, EmptySet, Intersection
from sympy.core.singleton import S
from sympy.core.function import expand_mul
from sympy.functions import Abs
from sympy.logic import And
from sympy.polys import Poly, PolynomialError, parallel_poly_from_expr
from sympy.polys.polyutils import _nsort
from sympy.utilities.iterables import sift
from sympy.utilities.misc import filldedent
def solve_poly_inequality(poly, rel):
"""Solve a polynomial inequality with rational coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import solve_poly_inequality
>>> solve_poly_inequality(Poly(x, x, domain='ZZ'), '==')
[FiniteSet(0)]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '!=')
[Interval.open(-oo, -1), Interval.open(-1, 1), Interval.open(1, oo)]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '==')
[FiniteSet(-1), FiniteSet(1)]
See Also
========
solve_poly_inequalities
"""
if not isinstance(poly, Poly):
raise ValueError(
'For efficiency reasons, `poly` should be a Poly instance')
if poly.as_expr().is_number:
t = Relational(poly.as_expr(), 0, rel)
if t is S.true:
return [S.Reals]
elif t is S.false:
return [S.EmptySet]
else:
raise NotImplementedError(
"could not determine truth value of %s" % t)
reals, intervals = poly.real_roots(multiple=False), []
if rel == '==':
for root, _ in reals:
interval = Interval(root, root)
intervals.append(interval)
elif rel == '!=':
left = S.NegativeInfinity
for right, _ in reals + [(S.Infinity, 1)]:
interval = Interval(left, right, True, True)
intervals.append(interval)
left = right
else:
if poly.LC() > 0:
sign = +1
else:
sign = -1
eq_sign, equal = None, False
if rel == '>':
eq_sign = +1
elif rel == '<':
eq_sign = -1
elif rel == '>=':
eq_sign, equal = +1, True
elif rel == '<=':
eq_sign, equal = -1, True
else:
raise ValueError("'%s' is not a valid relation" % rel)
right, right_open = S.Infinity, True
for left, multiplicity in reversed(reals):
if multiplicity % 2:
if sign == eq_sign:
intervals.insert(
0, Interval(left, right, not equal, right_open))
sign, right, right_open = -sign, left, not equal
else:
if sign == eq_sign and not equal:
intervals.insert(
0, Interval(left, right, True, right_open))
right, right_open = left, True
elif sign != eq_sign and equal:
intervals.insert(0, Interval(left, left))
if sign == eq_sign:
intervals.insert(
0, Interval(S.NegativeInfinity, right, True, right_open))
return intervals
def solve_poly_inequalities(polys):
"""Solve polynomial inequalities with rational coefficients.
Examples
========
>>> from sympy.solvers.inequalities import solve_poly_inequalities
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> solve_poly_inequalities(((
... Poly(x**2 - 3), ">"), (
... Poly(-x**2 + 1), ">")))
Union(Interval.open(-oo, -sqrt(3)), Interval.open(-1, 1), Interval.open(sqrt(3), oo))
"""
from sympy import Union
return Union(*[s for p in polys for s in solve_poly_inequality(*p)])
def solve_rational_inequalities(eqs):
"""Solve a system of rational inequalities with rational coefficients.
Examples
========
>>> from sympy.abc import x
>>> from sympy import Poly
>>> from sympy.solvers.inequalities import solve_rational_inequalities
>>> solve_rational_inequalities([[
... ((Poly(-x + 1), Poly(1, x)), '>='),
... ((Poly(-x + 1), Poly(1, x)), '<=')]])
FiniteSet(1)
>>> solve_rational_inequalities([[
... ((Poly(x), Poly(1, x)), '!='),
... ((Poly(-x + 1), Poly(1, x)), '>=')]])
Union(Interval.open(-oo, 0), Interval.Lopen(0, 1))
See Also
========
solve_poly_inequality
"""
result = S.EmptySet
for _eqs in eqs:
if not _eqs:
continue
global_intervals = [Interval(S.NegativeInfinity, S.Infinity)]
for (numer, denom), rel in _eqs:
numer_intervals = solve_poly_inequality(numer*denom, rel)
denom_intervals = solve_poly_inequality(denom, '==')
intervals = []
for numer_interval in numer_intervals:
for global_interval in global_intervals:
interval = numer_interval.intersect(global_interval)
if interval is not S.EmptySet:
intervals.append(interval)
global_intervals = intervals
intervals = []
for global_interval in global_intervals:
for denom_interval in denom_intervals:
global_interval -= denom_interval
if global_interval is not S.EmptySet:
intervals.append(global_interval)
global_intervals = intervals
if not global_intervals:
break
for interval in global_intervals:
result = result.union(interval)
return result
def reduce_rational_inequalities(exprs, gen, relational=True):
"""Reduce a system of rational inequalities with rational coefficients.
Examples
========
>>> from sympy import Symbol
>>> from sympy.solvers.inequalities import reduce_rational_inequalities
>>> x = Symbol('x', real=True)
>>> reduce_rational_inequalities([[x**2 <= 0]], x)
Eq(x, 0)
>>> reduce_rational_inequalities([[x + 2 > 0]], x)
-2 < x
>>> reduce_rational_inequalities([[(x + 2, ">")]], x)
-2 < x
>>> reduce_rational_inequalities([[x + 2]], x)
Eq(x, -2)
This function find the non-infinite solution set so if the unknown symbol
is declared as extended real rather than real then the result may include
finiteness conditions:
>>> y = Symbol('y', extended_real=True)
>>> reduce_rational_inequalities([[y + 2 > 0]], y)
(-2 < y) & (y < oo)
"""
exact = True
eqs = []
solution = S.Reals if exprs else S.EmptySet
for _exprs in exprs:
_eqs = []
for expr in _exprs:
if isinstance(expr, tuple):
expr, rel = expr
else:
if expr.is_Relational:
expr, rel = expr.lhs - expr.rhs, expr.rel_op
else:
expr, rel = expr, '=='
if expr is S.true:
numer, denom, rel = S.Zero, S.One, '=='
elif expr is S.false:
numer, denom, rel = S.One, S.One, '=='
else:
numer, denom = expr.together().as_numer_denom()
try:
(numer, denom), opt = parallel_poly_from_expr(
(numer, denom), gen)
except PolynomialError:
raise PolynomialError(filldedent('''
only polynomials and rational functions are
supported in this context.
'''))
if not opt.domain.is_Exact:
numer, denom, exact = numer.to_exact(), denom.to_exact(), False
domain = opt.domain.get_exact()
if not (domain.is_ZZ or domain.is_QQ):
expr = numer/denom
expr = Relational(expr, 0, rel)
solution &= solve_univariate_inequality(expr, gen, relational=False)
else:
_eqs.append(((numer, denom), rel))
if _eqs:
eqs.append(_eqs)
if eqs:
solution &= solve_rational_inequalities(eqs)
exclude = solve_rational_inequalities([[((d, d.one), '==')
for i in eqs for ((n, d), _) in i if d.has(gen)]])
solution -= exclude
if not exact and solution:
solution = solution.evalf()
if relational:
solution = solution.as_relational(gen)
return solution
def reduce_abs_inequality(expr, rel, gen):
"""Reduce an inequality with nested absolute values.
Examples
========
>>> from sympy import Abs, Symbol
>>> from sympy.solvers.inequalities import reduce_abs_inequality
>>> x = Symbol('x', real=True)
>>> reduce_abs_inequality(Abs(x - 5) - 3, '<', x)
(2 < x) & (x < 8)
>>> reduce_abs_inequality(Abs(x + 2)*3 - 13, '<', x)
(-19/3 < x) & (x < 7/3)
See Also
========
reduce_abs_inequalities
"""
if gen.is_extended_real is False:
raise TypeError(filldedent('''
can't solve inequalities with absolute values containing
non-real variables.
'''))
def _bottom_up_scan(expr):
exprs = []
if expr.is_Add or expr.is_Mul:
op = expr.func
for arg in expr.args:
_exprs = _bottom_up_scan(arg)
if not exprs:
exprs = _exprs
else:
args = []
for expr, conds in exprs:
for _expr, _conds in _exprs:
args.append((op(expr, _expr), conds + _conds))
exprs = args
elif expr.is_Pow:
n = expr.exp
if not n.is_Integer:
raise ValueError("Only Integer Powers are allowed on Abs.")
_exprs = _bottom_up_scan(expr.base)
for expr, conds in _exprs:
exprs.append((expr**n, conds))
elif isinstance(expr, Abs):
_exprs = _bottom_up_scan(expr.args[0])
for expr, conds in _exprs:
exprs.append(( expr, conds + [Ge(expr, 0)]))
exprs.append((-expr, conds + [Lt(expr, 0)]))
else:
exprs = [(expr, [])]
return exprs
exprs = _bottom_up_scan(expr)
mapping = {'<': '>', '<=': '>='}
inequalities = []
for expr, conds in exprs:
if rel not in mapping.keys():
expr = Relational( expr, 0, rel)
else:
expr = Relational(-expr, 0, mapping[rel])
inequalities.append([expr] + conds)
return reduce_rational_inequalities(inequalities, gen)
def reduce_abs_inequalities(exprs, gen):
"""Reduce a system of inequalities with nested absolute values.
Examples
========
>>> from sympy import Abs, Symbol
>>> from sympy.solvers.inequalities import reduce_abs_inequalities
>>> x = Symbol('x', extended_real=True)
>>> reduce_abs_inequalities([(Abs(3*x - 5) - 7, '<'),
... (Abs(x + 25) - 13, '>')], x)
(-2/3 < x) & (x < 4) & (((-oo < x) & (x < -38)) | ((-12 < x) & (x < oo)))
>>> reduce_abs_inequalities([(Abs(x - 4) + Abs(3*x - 5) - 7, '<')], x)
(1/2 < x) & (x < 4)
See Also
========
reduce_abs_inequality
"""
return And(*[ reduce_abs_inequality(expr, rel, gen)
for expr, rel in exprs ])
def solve_univariate_inequality(expr, gen, relational=True, domain=S.Reals, continuous=False):
"""Solves a real univariate inequality.
Parameters
==========
expr : Relational
The target inequality
gen : Symbol
The variable for which the inequality is solved
relational : bool
A Relational type output is expected or not
domain : Set
The domain over which the equation is solved
continuous: bool
True if expr is known to be continuous over the given domain
(and so continuous_domain() doesn't need to be called on it)
Raises
======
NotImplementedError
The solution of the inequality cannot be determined due to limitation
in :func:`sympy.solvers.solveset.solvify`.
Notes
=====
Currently, we cannot solve all the inequalities due to limitations in
:func:`sympy.solvers.solveset.solvify`. Also, the solution returned for trigonometric inequalities
are restricted in its periodic interval.
See Also
========
sympy.solvers.solveset.solvify: solver returning solveset solutions with solve's output API
Examples
========
>>> from sympy.solvers.inequalities import solve_univariate_inequality
>>> from sympy import Symbol, sin, Interval, S
>>> x = Symbol('x')
>>> solve_univariate_inequality(x**2 >= 4, x)
((2 <= x) & (x < oo)) | ((x <= -2) & (-oo < x))
>>> solve_univariate_inequality(x**2 >= 4, x, relational=False)
Union(Interval(-oo, -2), Interval(2, oo))
>>> domain = Interval(0, S.Infinity)
>>> solve_univariate_inequality(x**2 >= 4, x, False, domain)
Interval(2, oo)
>>> solve_univariate_inequality(sin(x) > 0, x, relational=False)
Interval.open(0, pi)
"""
from sympy import im
from sympy.calculus.util import (continuous_domain, periodicity,
function_range)
from sympy.solvers.solvers import denoms
from sympy.solvers.solveset import solvify, solveset
# This keeps the function independent of the assumptions about `gen`.
# `solveset` makes sure this function is called only when the domain is
# real.
_gen = gen
_domain = domain
if gen.is_extended_real is False:
rv = S.EmptySet
return rv if not relational else rv.as_relational(_gen)
elif gen.is_extended_real is None:
gen = Dummy('gen', extended_real=True)
try:
expr = expr.xreplace({_gen: gen})
except TypeError:
raise TypeError(filldedent('''
When gen is real, the relational has a complex part
which leads to an invalid comparison like I < 0.
'''))
rv = None
if expr is S.true:
rv = domain
elif expr is S.false:
rv = S.EmptySet
else:
e = expr.lhs - expr.rhs
period = periodicity(e, gen)
if period == S.Zero:
e = expand_mul(e)
const = expr.func(e, 0)
if const is S.true:
rv = domain
elif const is S.false:
rv = S.EmptySet
elif period is not None:
frange = function_range(e, gen, domain)
rel = expr.rel_op
if rel == '<' or rel == '<=':
if expr.func(frange.sup, 0):
rv = domain
elif not expr.func(frange.inf, 0):
rv = S.EmptySet
elif rel == '>' or rel == '>=':
if expr.func(frange.inf, 0):
rv = domain
elif not expr.func(frange.sup, 0):
rv = S.EmptySet
inf, sup = domain.inf, domain.sup
if sup - inf is S.Infinity:
domain = Interval(0, period, False, True).intersect(_domain)
_domain = domain
if rv is None:
n, d = e.as_numer_denom()
try:
if gen not in n.free_symbols and len(e.free_symbols) > 1:
raise ValueError
# this might raise ValueError on its own
# or it might give None...
solns = solvify(e, gen, domain)
if solns is None:
# in which case we raise ValueError
raise ValueError
except (ValueError, NotImplementedError):
# replace gen with generic x since it's
# univariate anyway
raise NotImplementedError(filldedent('''
The inequality, %s, cannot be solved using
solve_univariate_inequality.
''' % expr.subs(gen, Symbol('x'))))
expanded_e = expand_mul(e)
def valid(x):
# this is used to see if gen=x satisfies the
# relational by substituting it into the
# expanded form and testing against 0, e.g.
# if expr = x*(x + 1) < 2 then e = x*(x + 1) - 2
# and expanded_e = x**2 + x - 2; the test is
# whether a given value of x satisfies
# x**2 + x - 2 < 0
#
# expanded_e, expr and gen used from enclosing scope
v = expanded_e.subs(gen, expand_mul(x))
try:
r = expr.func(v, 0)
except TypeError:
r = S.false
if r in (S.true, S.false):
return r
if v.is_extended_real is False:
return S.false
else:
v = v.n(2)
if v.is_comparable:
return expr.func(v, 0)
# not comparable or couldn't be evaluated
raise NotImplementedError(
'relationship did not evaluate: %s' % r)
singularities = []
for d in denoms(expr, gen):
singularities.extend(solvify(d, gen, domain))
if not continuous:
domain = continuous_domain(expanded_e, gen, domain)
include_x = '=' in expr.rel_op and expr.rel_op != '!='
try:
discontinuities = set(domain.boundary -
FiniteSet(domain.inf, domain.sup))
# remove points that are not between inf and sup of domain
critical_points = FiniteSet(*(solns + singularities + list(
discontinuities))).intersection(
Interval(domain.inf, domain.sup,
domain.inf not in domain, domain.sup not in domain))
if all(r.is_number for r in critical_points):
reals = _nsort(critical_points, separated=True)[0]
else:
sifted = sift(critical_points, lambda x: x.is_extended_real)
if sifted[None]:
# there were some roots that weren't known
# to be real
raise NotImplementedError
try:
reals = sifted[True]
if len(reals) > 1:
reals = list(sorted(reals))
except TypeError:
raise NotImplementedError
except NotImplementedError:
raise NotImplementedError('sorting of these roots is not supported')
# If expr contains imaginary coefficients, only take real
# values of x for which the imaginary part is 0
make_real = S.Reals
if im(expanded_e) != S.Zero:
check = True
im_sol = FiniteSet()
try:
a = solveset(im(expanded_e), gen, domain)
if not isinstance(a, Interval):
for z in a:
if z not in singularities and valid(z) and z.is_extended_real:
im_sol += FiniteSet(z)
else:
start, end = a.inf, a.sup
for z in _nsort(critical_points + FiniteSet(end)):
valid_start = valid(start)
if start != end:
valid_z = valid(z)
pt = _pt(start, z)
if pt not in singularities and pt.is_extended_real and valid(pt):
if valid_start and valid_z:
im_sol += Interval(start, z)
elif valid_start:
im_sol += Interval.Ropen(start, z)
elif valid_z:
im_sol += Interval.Lopen(start, z)
else:
im_sol += Interval.open(start, z)
start = z
for s in singularities:
im_sol -= FiniteSet(s)
except (TypeError):
im_sol = S.Reals
check = False
if isinstance(im_sol, EmptySet):
raise ValueError(filldedent('''
%s contains imaginary parts which cannot be
made 0 for any value of %s satisfying the
inequality, leading to relations like I < 0.
''' % (expr.subs(gen, _gen), _gen)))
make_real = make_real.intersect(im_sol)
sol_sets = [S.EmptySet]
start = domain.inf
if start in domain and valid(start) and start.is_finite:
sol_sets.append(FiniteSet(start))
for x in reals:
end = x
if valid(_pt(start, end)):
sol_sets.append(Interval(start, end, True, True))
if x in singularities:
singularities.remove(x)
else:
if x in discontinuities:
discontinuities.remove(x)
_valid = valid(x)
else: # it's a solution
_valid = include_x
if _valid:
sol_sets.append(FiniteSet(x))
start = end
end = domain.sup
if end in domain and valid(end) and end.is_finite:
sol_sets.append(FiniteSet(end))
if valid(_pt(start, end)):
sol_sets.append(Interval.open(start, end))
if im(expanded_e) != S.Zero and check:
rv = (make_real).intersect(_domain)
else:
rv = Intersection(
(Union(*sol_sets)), make_real, _domain).subs(gen, _gen)
return rv if not relational else rv.as_relational(_gen)
def _pt(start, end):
"""Return a point between start and end"""
if not start.is_infinite and not end.is_infinite:
pt = (start + end)/2
elif start.is_infinite and end.is_infinite:
pt = S.Zero
else:
if (start.is_infinite and start.is_extended_positive is None or
end.is_infinite and end.is_extended_positive is None):
raise ValueError('cannot proceed with unsigned infinite values')
if (end.is_infinite and end.is_extended_negative or
start.is_infinite and start.is_extended_positive):
start, end = end, start
# if possible, use a multiple of self which has
# better behavior when checking assumptions than
# an expression obtained by adding or subtracting 1
if end.is_infinite:
if start.is_extended_positive:
pt = start*2
elif start.is_extended_negative:
pt = start*S.Half
else:
pt = start + 1
elif start.is_infinite:
if end.is_extended_positive:
pt = end*S.Half
elif end.is_extended_negative:
pt = end*2
else:
pt = end - 1
return pt
def _solve_inequality(ie, s, linear=False):
"""Return the inequality with s isolated on the left, if possible.
If the relationship is non-linear, a solution involving And or Or
may be returned. False or True are returned if the relationship
is never True or always True, respectively.
If `linear` is True (default is False) an `s`-dependent expression
will be isolated on the left, if possible
but it will not be solved for `s` unless the expression is linear
in `s`. Furthermore, only "safe" operations which don't change the
sense of the relationship are applied: no division by an unsigned
value is attempted unless the relationship involves Eq or Ne and
no division by a value not known to be nonzero is ever attempted.
Examples
========
>>> from sympy import Eq, Symbol
>>> from sympy.solvers.inequalities import _solve_inequality as f
>>> from sympy.abc import x, y
For linear expressions, the symbol can be isolated:
>>> f(x - 2 < 0, x)
x < 2
>>> f(-x - 6 < x, x)
x > -3
Sometimes nonlinear relationships will be False
>>> f(x**2 + 4 < 0, x)
False
Or they may involve more than one region of values:
>>> f(x**2 - 4 < 0, x)
(-2 < x) & (x < 2)
To restrict the solution to a relational, set linear=True
and only the x-dependent portion will be isolated on the left:
>>> f(x**2 - 4 < 0, x, linear=True)
x**2 < 4
Division of only nonzero quantities is allowed, so x cannot
be isolated by dividing by y:
>>> y.is_nonzero is None # it is unknown whether it is 0 or not
True
>>> f(x*y < 1, x)
x*y < 1
And while an equality (or inequality) still holds after dividing by a
non-zero quantity
>>> nz = Symbol('nz', nonzero=True)
>>> f(Eq(x*nz, 1), x)
Eq(x, 1/nz)
the sign must be known for other inequalities involving > or <:
>>> f(x*nz <= 1, x)
nz*x <= 1
>>> p = Symbol('p', positive=True)
>>> f(x*p <= 1, x)
x <= 1/p
When there are denominators in the original expression that
are removed by expansion, conditions for them will be returned
as part of the result:
>>> f(x < x*(2/x - 1), x)
(x < 1) & Ne(x, 0)
"""
from sympy.solvers.solvers import denoms
if s not in ie.free_symbols:
return ie
if ie.rhs == s:
ie = ie.reversed
if ie.lhs == s and s not in ie.rhs.free_symbols:
return ie
def classify(ie, s, i):
# return True or False if ie evaluates when substituting s with
# i else None (if unevaluated) or NaN (when there is an error
# in evaluating)
try:
v = ie.subs(s, i)
if v is S.NaN:
return v
elif v not in (True, False):
return
return v
except TypeError:
return S.NaN
rv = None
oo = S.Infinity
expr = ie.lhs - ie.rhs
try:
p = Poly(expr, s)
if p.degree() == 0:
rv = ie.func(p.as_expr(), 0)
elif not linear and p.degree() > 1:
# handle in except clause
raise NotImplementedError
except (PolynomialError, NotImplementedError):
if not linear:
try:
rv = reduce_rational_inequalities([[ie]], s)
except PolynomialError:
rv = solve_univariate_inequality(ie, s)
# remove restrictions wrt +/-oo that may have been
# applied when using sets to simplify the relationship
okoo = classify(ie, s, oo)
if okoo is S.true and classify(rv, s, oo) is S.false:
rv = rv.subs(s < oo, True)
oknoo = classify(ie, s, -oo)
if (oknoo is S.true and
classify(rv, s, -oo) is S.false):
rv = rv.subs(-oo < s, True)
rv = rv.subs(s > -oo, True)
if rv is S.true:
rv = (s <= oo) if okoo is S.true else (s < oo)
if oknoo is not S.true:
rv = And(-oo < s, rv)
else:
p = Poly(expr)
conds = []
if rv is None:
e = p.as_expr() # this is in expanded form
# Do a safe inversion of e, moving non-s terms
# to the rhs and dividing by a nonzero factor if
# the relational is Eq/Ne; for other relationals
# the sign must also be positive or negative
rhs = 0
b, ax = e.as_independent(s, as_Add=True)
e -= b
rhs -= b
ef = factor_terms(e)
a, e = ef.as_independent(s, as_Add=False)
if (a.is_zero != False or # don't divide by potential 0
a.is_negative ==
a.is_positive is None and # if sign is not known then
ie.rel_op not in ('!=', '==')): # reject if not Eq/Ne
e = ef
a = S.One
rhs /= a
if a.is_positive:
rv = ie.func(e, rhs)
else:
rv = ie.reversed.func(e, rhs)
# return conditions under which the value is
# valid, too.
beginning_denoms = denoms(ie.lhs) | denoms(ie.rhs)
current_denoms = denoms(rv)
for d in beginning_denoms - current_denoms:
c = _solve_inequality(Eq(d, 0), s, linear=linear)
if isinstance(c, Eq) and c.lhs == s:
if classify(rv, s, c.rhs) is S.true:
# rv is permitting this value but it shouldn't
conds.append(~c)
for i in (-oo, oo):
if (classify(rv, s, i) is S.true and
classify(ie, s, i) is not S.true):
conds.append(s < i if i is oo else i < s)
conds.append(rv)
return And(*conds)
def _reduce_inequalities(inequalities, symbols):
# helper for reduce_inequalities
poly_part, abs_part = {}, {}
other = []
for inequality in inequalities:
expr, rel = inequality.lhs, inequality.rel_op # rhs is 0
# check for gens using atoms which is more strict than free_symbols to
# guard against EX domain which won't be handled by
# reduce_rational_inequalities
gens = expr.atoms(Symbol)
if len(gens) == 1:
gen = gens.pop()
else:
common = expr.free_symbols & symbols
if len(common) == 1:
gen = common.pop()
other.append(_solve_inequality(Relational(expr, 0, rel), gen))
continue
else:
raise NotImplementedError(filldedent('''
inequality has more than one symbol of interest.
'''))
if expr.is_polynomial(gen):
poly_part.setdefault(gen, []).append((expr, rel))
else:
components = expr.find(lambda u:
u.has(gen) and (
u.is_Function or u.is_Pow and not u.exp.is_Integer))
if components and all(isinstance(i, Abs) for i in components):
abs_part.setdefault(gen, []).append((expr, rel))
else:
other.append(_solve_inequality(Relational(expr, 0, rel), gen))
poly_reduced = []
abs_reduced = []
for gen, exprs in poly_part.items():
poly_reduced.append(reduce_rational_inequalities([exprs], gen))
for gen, exprs in abs_part.items():
abs_reduced.append(reduce_abs_inequalities(exprs, gen))
return And(*(poly_reduced + abs_reduced + other))
def reduce_inequalities(inequalities, symbols=[]):
"""Reduce a system of inequalities with rational coefficients.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.inequalities import reduce_inequalities
>>> reduce_inequalities(0 <= x + 3, [])
(-3 <= x) & (x < oo)
>>> reduce_inequalities(0 <= x + y*2 - 1, [x])
(x < oo) & (x >= 1 - 2*y)
"""
if not iterable(inequalities):
inequalities = [inequalities]
inequalities = [sympify(i) for i in inequalities]
gens = set().union(*[i.free_symbols for i in inequalities])
if not iterable(symbols):
symbols = [symbols]
symbols = (set(symbols) or gens) & gens
if any(i.is_extended_real is False for i in symbols):
raise TypeError(filldedent('''
inequalities cannot contain symbols that are not real.
'''))
# make vanilla symbol real
recast = {i: Dummy(i.name, extended_real=True)
for i in gens if i.is_extended_real is None}
inequalities = [i.xreplace(recast) for i in inequalities]
symbols = {i.xreplace(recast) for i in symbols}
# prefilter
keep = []
for i in inequalities:
if isinstance(i, Relational):
i = i.func(i.lhs.as_expr() - i.rhs.as_expr(), 0)
elif i not in (True, False):
i = Eq(i, 0)
if i == True:
continue
elif i == False:
return S.false
if i.lhs.is_number:
raise NotImplementedError(
"could not determine truth value of %s" % i)
keep.append(i)
inequalities = keep
del keep
# solve system
rv = _reduce_inequalities(inequalities, symbols)
# restore original symbols and return
return rv.xreplace({v: k for k, v in recast.items()})
|
27d45b731ee4f7e2f96b62eaaf80d520e37cfd204dc9a7fec1446d6a2e0455ef | """
This module contain solvers for all kinds of equations:
- algebraic or transcendental, use solve()
- recurrence, use rsolve()
- differential, use dsolve()
- nonlinear (numerically), use nsolve()
(you will need a good starting point)
"""
from sympy import divisors, binomial, expand_func
from sympy.core.assumptions import check_assumptions
from sympy.core.compatibility import (iterable, is_sequence, ordered,
default_sort_key)
from sympy.core.sympify import sympify
from sympy.core import (S, Add, Symbol, Equality, Dummy, Expr, Mul,
Pow, Unequality, Wild)
from sympy.core.exprtools import factor_terms
from sympy.core.function import (expand_mul, expand_log,
Derivative, AppliedUndef, UndefinedFunction, nfloat,
Function, expand_power_exp, _mexpand, expand)
from sympy.integrals.integrals import Integral
from sympy.core.numbers import ilcm, Float, Rational
from sympy.core.relational import Relational
from sympy.core.logic import fuzzy_not
from sympy.core.power import integer_log
from sympy.logic.boolalg import And, Or, BooleanAtom
from sympy.core.basic import preorder_traversal
from sympy.functions import (log, exp, LambertW, cos, sin, tan, acos, asin, atan,
Abs, re, im, arg, sqrt, atan2)
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.simplify import (simplify, collect, powsimp, posify, # type: ignore
powdenest, nsimplify, denom, logcombine, sqrtdenest, fraction,
separatevars)
from sympy.simplify.sqrtdenest import sqrt_depth
from sympy.simplify.fu import TR1, TR2i
from sympy.matrices.common import NonInvertibleMatrixError
from sympy.matrices import Matrix, zeros
from sympy.polys import roots, cancel, factor, Poly, degree
from sympy.polys.polyerrors import GeneratorsNeeded, PolynomialError
from sympy.polys.solvers import sympy_eqs_to_ring, solve_lin_sys
from sympy.functions.elementary.piecewise import piecewise_fold, Piecewise
from sympy.utilities.lambdify import lambdify
from sympy.utilities.misc import filldedent
from sympy.utilities.iterables import (cartes, connected_components, flatten,
generate_bell, uniq, sift)
from sympy.utilities.decorator import conserve_mpmath_dps
from mpmath import findroot
from sympy.solvers.polysys import solve_poly_system
from sympy.solvers.inequalities import reduce_inequalities
from types import GeneratorType
from collections import defaultdict
import warnings
def recast_to_symbols(eqs, symbols):
"""
Return (e, s, d) where e and s are versions of *eqs* and
*symbols* in which any non-Symbol objects in *symbols* have
been replaced with generic Dummy symbols and d is a dictionary
that can be used to restore the original expressions.
Examples
========
>>> from sympy.solvers.solvers import recast_to_symbols
>>> from sympy import symbols, Function
>>> x, y = symbols('x y')
>>> fx = Function('f')(x)
>>> eqs, syms = [fx + 1, x, y], [fx, y]
>>> e, s, d = recast_to_symbols(eqs, syms); (e, s, d)
([_X0 + 1, x, y], [_X0, y], {_X0: f(x)})
The original equations and symbols can be restored using d:
>>> assert [i.xreplace(d) for i in eqs] == eqs
>>> assert [d.get(i, i) for i in s] == syms
"""
if not iterable(eqs) and iterable(symbols):
raise ValueError('Both eqs and symbols must be iterable')
new_symbols = list(symbols)
swap_sym = {}
for i, s in enumerate(symbols):
if not isinstance(s, Symbol) and s not in swap_sym:
swap_sym[s] = Dummy('X%d' % i)
new_symbols[i] = swap_sym[s]
new_f = []
for i in eqs:
isubs = getattr(i, 'subs', None)
if isubs is not None:
new_f.append(isubs(swap_sym))
else:
new_f.append(i)
swap_sym = {v: k for k, v in swap_sym.items()}
return new_f, new_symbols, swap_sym
def _ispow(e):
"""Return True if e is a Pow or is exp."""
return isinstance(e, Expr) and (e.is_Pow or isinstance(e, exp))
def _simple_dens(f, symbols):
# when checking if a denominator is zero, we can just check the
# base of powers with nonzero exponents since if the base is zero
# the power will be zero, too. To keep it simple and fast, we
# limit simplification to exponents that are Numbers
dens = set()
for d in denoms(f, symbols):
if d.is_Pow and d.exp.is_Number:
if d.exp.is_zero:
continue # foo**0 is never 0
d = d.base
dens.add(d)
return dens
def denoms(eq, *symbols):
"""
Return (recursively) set of all denominators that appear in *eq*
that contain any symbol in *symbols*; if *symbols* are not
provided then all denominators will be returned.
Examples
========
>>> from sympy.solvers.solvers import denoms
>>> from sympy.abc import x, y, z
>>> denoms(x/y)
{y}
>>> denoms(x/(y*z))
{y, z}
>>> denoms(3/x + y/z)
{x, z}
>>> denoms(x/2 + y/z)
{2, z}
If *symbols* are provided then only denominators containing
those symbols will be returned:
>>> denoms(1/x + 1/y + 1/z, y, z)
{y, z}
"""
pot = preorder_traversal(eq)
dens = set()
for p in pot:
# Here p might be Tuple or Relational
# Expr subtrees (e.g. lhs and rhs) will be traversed after by pot
if not isinstance(p, Expr):
continue
den = denom(p)
if den is S.One:
continue
for d in Mul.make_args(den):
dens.add(d)
if not symbols:
return dens
elif len(symbols) == 1:
if iterable(symbols[0]):
symbols = symbols[0]
rv = []
for d in dens:
free = d.free_symbols
if any(s in free for s in symbols):
rv.append(d)
return set(rv)
def checksol(f, symbol, sol=None, **flags):
"""
Checks whether sol is a solution of equation f == 0.
Explanation
===========
Input can be either a single symbol and corresponding value
or a dictionary of symbols and values. When given as a dictionary
and flag ``simplify=True``, the values in the dictionary will be
simplified. *f* can be a single equation or an iterable of equations.
A solution must satisfy all equations in *f* to be considered valid;
if a solution does not satisfy any equation, False is returned; if one or
more checks are inconclusive (and none are False) then None is returned.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers import checksol
>>> x, y = symbols('x,y')
>>> checksol(x**4 - 1, x, 1)
True
>>> checksol(x**4 - 1, x, 0)
False
>>> checksol(x**2 + y**2 - 5**2, {x: 3, y: 4})
True
To check if an expression is zero using ``checksol()``, pass it
as *f* and send an empty dictionary for *symbol*:
>>> checksol(x**2 + x - x*(x + 1), {})
True
None is returned if ``checksol()`` could not conclude.
flags:
'numerical=True (default)'
do a fast numerical check if ``f`` has only one symbol.
'minimal=True (default is False)'
a very fast, minimal testing.
'warn=True (default is False)'
show a warning if checksol() could not conclude.
'simplify=True (default)'
simplify solution before substituting into function and
simplify the function before trying specific simplifications
'force=True (default is False)'
make positive all symbols without assumptions regarding sign.
"""
from sympy.physics.units import Unit
minimal = flags.get('minimal', False)
if sol is not None:
sol = {symbol: sol}
elif isinstance(symbol, dict):
sol = symbol
else:
msg = 'Expecting (sym, val) or ({sym: val}, None) but got (%s, %s)'
raise ValueError(msg % (symbol, sol))
if iterable(f):
if not f:
raise ValueError('no functions to check')
rv = True
for fi in f:
check = checksol(fi, sol, **flags)
if check:
continue
if check is False:
return False
rv = None # don't return, wait to see if there's a False
return rv
if isinstance(f, Poly):
f = f.as_expr()
elif isinstance(f, (Equality, Unequality)):
if f.rhs in (S.true, S.false):
f = f.reversed
B, E = f.args
if isinstance(B, BooleanAtom):
f = f.subs(sol)
if not f.is_Boolean:
return
else:
f = f.rewrite(Add, evaluate=False)
if isinstance(f, BooleanAtom):
return bool(f)
elif not f.is_Relational and not f:
return True
if sol and not f.free_symbols & set(sol.keys()):
# if f(y) == 0, x=3 does not set f(y) to zero...nor does it not
return None
illegal = {S.NaN,
S.ComplexInfinity,
S.Infinity,
S.NegativeInfinity}
if any(sympify(v).atoms() & illegal for k, v in sol.items()):
return False
was = f
attempt = -1
numerical = flags.get('numerical', True)
while 1:
attempt += 1
if attempt == 0:
val = f.subs(sol)
if isinstance(val, Mul):
val = val.as_independent(Unit)[0]
if val.atoms() & illegal:
return False
elif attempt == 1:
if not val.is_number:
if not val.is_constant(*list(sol.keys()), simplify=not minimal):
return False
# there are free symbols -- simple expansion might work
_, val = val.as_content_primitive()
val = _mexpand(val.as_numer_denom()[0], recursive=True)
elif attempt == 2:
if minimal:
return
if flags.get('simplify', True):
for k in sol:
sol[k] = simplify(sol[k])
# start over without the failed expanded form, possibly
# with a simplified solution
val = simplify(f.subs(sol))
if flags.get('force', True):
val, reps = posify(val)
# expansion may work now, so try again and check
exval = _mexpand(val, recursive=True)
if exval.is_number:
# we can decide now
val = exval
else:
# if there are no radicals and no functions then this can't be
# zero anymore -- can it?
pot = preorder_traversal(expand_mul(val))
seen = set()
saw_pow_func = False
for p in pot:
if p in seen:
continue
seen.add(p)
if p.is_Pow and not p.exp.is_Integer:
saw_pow_func = True
elif p.is_Function:
saw_pow_func = True
elif isinstance(p, UndefinedFunction):
saw_pow_func = True
if saw_pow_func:
break
if saw_pow_func is False:
return False
if flags.get('force', True):
# don't do a zero check with the positive assumptions in place
val = val.subs(reps)
nz = fuzzy_not(val.is_zero)
if nz is not None:
# issue 5673: nz may be True even when False
# so these are just hacks to keep a false positive
# from being returned
# HACK 1: LambertW (issue 5673)
if val.is_number and val.has(LambertW):
# don't eval this to verify solution since if we got here,
# numerical must be False
return None
# add other HACKs here if necessary, otherwise we assume
# the nz value is correct
return not nz
break
if val == was:
continue
elif val.is_Rational:
return val == 0
if numerical and val.is_number:
return (abs(val.n(18).n(12, chop=True)) < 1e-9) is S.true
was = val
if flags.get('warn', False):
warnings.warn("\n\tWarning: could not verify solution %s." % sol)
# returns None if it can't conclude
# TODO: improve solution testing
def solve(f, *symbols, **flags):
r"""
Algebraically solves equations and systems of equations.
Explanation
===========
Currently supported:
- polynomial
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- systems containing relational expressions
Examples
========
The output varies according to the input and can be seen by example:
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b
>>> f = Function('f')
Boolean or univariate Relational:
>>> solve(x < 3)
(-oo < x) & (x < 3)
To always get a list of solution mappings, use flag dict=True:
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> sol = solve([x - 3, y - 1], dict=True)
>>> sol
[{x: 3, y: 1}]
>>> sol[0][x]
3
>>> sol[0][y]
1
To get a list of *symbols* and set of solution(s) use flag set=True:
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], {(-sqrt(3), 1), (sqrt(3), 1)})
Single expression and single symbol that is in the expression:
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], {(-y,), (y,)})
>>> solve(x**4 - 1, x, set=True)
([x], {(-1,), (1,), (-I,), (I,)})
Single expression with no symbol that is in the expression:
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
Single expression with no symbol given. In this case, all free *symbols*
will be selected as potential *symbols* to solve for. If the equation is
univariate then a list of solutions is returned; otherwise - as is the case
when *symbols* are given as an iterable of length greater than 1 - a list of
mappings will be returned:
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save you from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method:
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a symbol implicitly, use implicit=True:
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: -2*x/(x + 3) - 6/(x + 3) + sqrt(3)/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* If you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use ``dsolve``.
Single expression and more than one symbol:
* When there is a linear solution:
>>> solve(x - y**2, x, y)
[(y**2, y)]
>>> solve(x**2 - y, x, y)
[(x, x**2)]
>>> solve(x**2 - y, x, y, dict=True)
[{y: x**2}]
* When undetermined coefficients are identified:
* That are linear:
>>> solve((a + b)*x - b + 2, a, b)
{a: -2, b: 2}
* That are nonlinear:
>>> solve((a + b)*x - b**2 + 2, a, b, set=True)
([a, b], {(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))})
* If there is no linear solution, then the first successful
attempt for a nonlinear solution will be returned:
>>> solve(x**2 - y**2, x, y, dict=True)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y, dict=True)
[{x: 2*LambertW(-y/2)}, {x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[(-x*sqrt(exp(x)), x), (x*sqrt(exp(x)), x)]
Iterable of one or more of the above:
* Involving relationals or bools:
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* When the system is linear:
* With a solution:
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: 2 - 5*y, z: 21*y - 6}
* Without a solution:
>>> solve([x + 3, x - 3])
[]
* When the system is not linear:
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], {(-2, -2), (0, 2), (2, -2)})
* If no *symbols* are given, all free *symbols* will be selected and a
list of mappings returned:
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], {f(x), x})
[{x: 2, f(x): -4}]
* If any equation does not depend on the symbol(s) given, it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest:
>>> solve([x - y, y - 3], x)
{x: y}
**Additional Examples**
``solve()`` with check=True (default) will run through the symbol tags to
elimate unwanted solutions. If no assumptions are included, all possible
solutions will be returned:
>>> from sympy import Symbol, solve
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By using the positive tag, only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
Assumptions are not checked when ``solve()`` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions,
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False, then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since $\sin(x)/x$ has the well
known limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
**Disabling High-Order Explicit Solutions**
When solving polynomial expressions, you might not want explicit solutions
(which can be quite long). If the expression is univariate, ``CRootOf``
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) - (-1/2 -
sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3, -(-1/2 +
sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 - 1/((-1/2 +
sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)), -(3*sqrt(69)/2 +
27/2)**(1/3)/3 - 1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
**Solving Equations Involving Radicals**
Because of SymPy's use of the principle root, some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example, there is only a single solution to the
equation. Other expressions will yield spurious roots which
must be checked manually; roots which give a negative argument
to odd-powered radicals will also need special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*x**5 - 7*x**3 + 1, 1)**15,
CRootOf(7*x**5 - 7*x**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so ``real_root`` must be used to see that it
satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be
necessary to find the roots, especially for higher order equations.
Consider the following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The ``solve`` function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function ``unrad``, however, can be used to get a form of the equation
for which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the
solution can only be verified with ``expr1``:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
Parameters
==========
f :
- a single Expr or Poly that must be zero
- an Equality
- a Relational expression
- a Boolean
- iterable of one or more of the above
symbols : (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
(e.g., ``solve(f, x, y)``)
- ordered iterable of symbols
(e.g., ``solve(f, [x, y])``)
flags :
dict=True (default is False)
Return list (perhaps empty) of solution mappings.
set=True (default is False)
Return list of symbols and set of tuple(s) of solution(s).
exclude=[] (default)
Do not try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
check=True (default)
If False, do not do any testing of solutions. This can be
useful if you want to include solutions that make any
denominator zero.
numerical=True (default)
Do a fast numerical check if *f* has only one symbol.
minimal=True (default is False)
A very fast, minimal testing.
warn=True (default is False)
Show a warning if ``checksol()`` could not conclude.
simplify=True (default)
Simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero.
force=True (default is False)
Make positive all symbols without assumptions regarding sign.
rational=True (default)
Recast Floats as Rational; if this option is not used, the
system containing Floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
manual=True (default is False)
Do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually."
implicit=True (default is False)
Allows ``solve`` to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ect.
particular=True (default is False)
Instructs ``solve`` to try to find a particular solution to a linear
system with as many zeros as possible; this is very expensive.
quick=True (default is False)
When using particular=True, use a fast heuristic to find a
solution with many zeros (instead of using the very slow method
guaranteed to find the largest number of zeros possible).
cubics=True (default)
Return explicit solutions when cubic expressions are encountered.
quartics=True (default)
Return explicit solutions when quartic expressions are encountered.
quintics=True (default)
Return explicit solutions (if possible) when quintic expressions
are encountered.
See Also
========
rsolve: For solving recurrence relationships
dsolve: For solving differential equations
"""
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
ordered_symbols = (symbols and
symbols[0] and
(isinstance(symbols[0], Symbol) or
is_sequence(symbols[0],
include=GeneratorType)
)
)
f, symbols = (_sympified_list(w) for w in [f, symbols])
if isinstance(f, list):
f = [s for s in f if s is not S.true and s is not True]
implicit = flags.get('implicit', False)
# preprocess symbol(s)
###########################################################################
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
symbols = list(symbols)
ordered_symbols = False
elif len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
# remove symbols the user is not interested in
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, (Equality, Unequality)):
if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]:
fi = fi.lhs - fi.rhs
else:
L, R = fi.args
if isinstance(R, BooleanAtom):
L, R = R, L
if isinstance(L, BooleanAtom):
if isinstance(fi, Unequality):
L = ~L
if R.is_Relational:
fi = ~R if L is S.false else R
elif R.is_Symbol:
return L
elif R.is_Boolean and (~R).is_Symbol:
return ~L
else:
raise NotImplementedError(filldedent('''
Unanticipated argument of Eq when other arg
is True or False.
'''))
else:
fi = fi.rewrite(Add, evaluate=False)
f[i] = fi
if fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
if isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction),
lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# real/imag handling -----------------------------
if any(isinstance(fi, (bool, BooleanAtom)) for fi in f):
if flags.get('set', False):
return [], set()
return []
for i, fi in enumerate(f):
# Abs
while True:
was = fi
fi = fi.replace(Abs, lambda arg:
separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols)
else Abs(arg))
if was == fi:
break
for e in fi.find(Abs):
if e.has(*symbols):
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % e)
# arg
fi = fi.replace(arg, lambda a: arg(a).rewrite(atan2).rewrite(atan))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
freim = [fi for fi in f if fi.has(re, im)]
if freim:
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in freim):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
for i, fi in enumerate(f):
f[i] = fi.xreplace({s: rhs})
f.append(s - rhs)
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
symbols = list(uniq(symbols))
if not ordered_symbols:
# we do this to make the results returned canonical in case f
# contains a system of nonlinear equations; all other cases should
# be unambiguous
symbols = sorted(symbols, key=default_sort_key)
# we can solve for non-symbol entities by replacing them with Dummy symbols
f, symbols, swap_sym = recast_to_symbols(f, symbols)
# this is needed in the next two events
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.free_symbols & symset:
ok = True
else:
if fi.is_number:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif not p in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
# Both xreplace and subs are needed below: xreplace to force substitution
# inside Derivative, subs to handle non-straightforward substitutions
non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# capture any denominators before rewriting since
# they may disappear after the rewrite, e.g. issue 14779
flags['_denominators'] = _simple_dens(f[0], symbols)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any([_has_piecewise(a) for a in e.args])
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return {k: v.subs(non_inverts) for k, v in
solution.items()}
for i in range(1):
if isinstance(solution, dict):
solution = _do_dict(solution)
break
elif solution and isinstance(solution, list):
if isinstance(solution[0], dict):
solution = [_do_dict(s) for s in solution]
break
elif isinstance(solution[0], tuple):
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
break
else:
solution = [v.subs(non_inverts) for v in solution]
break
elif not solution:
break
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if swap_sym:
symbols = [swap_sym.get(k, k) for k in symbols]
if isinstance(solution, dict):
solution = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in solution.items()}
elif solution and isinstance(solution, list) and isinstance(solution[0], dict):
for i, sol in enumerate(solution):
solution[i] = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in sol.items()}
# undo the dictionary solutions returned when the system was only partially
# solved with poly-system if all symbols are present
if (
not flags.get('dict', False) and
solution and
ordered_symbols and
not isinstance(solution, dict) and
all(isinstance(sol, dict) for sol in solution)
):
solution = [tuple([r.get(s, s) for s in symbols]) for r in solution]
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if isinstance(solution, tuple):
# this has already been checked and is in as_set form
return solution
elif isinstance(solution, list):
if isinstance(solution[0], tuple):
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif isinstance(solution[0], dict):
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif isinstance(solution, dict):
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
can't be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if not as_set and isinstance(solution, list):
# Make sure that a list of solutions is ordered in a canonical way.
solution.sort(key=default_sort_key)
if not as_dict and not as_set:
return solution or []
# return a list of mappings or []
if not solution:
solution = []
else:
if isinstance(solution, dict):
solution = [solution]
elif iterable(solution[0]):
solution = [dict(list(zip(symbols, s))) for s in solution]
elif isinstance(solution[0], dict):
pass
else:
if len(symbols) != 1:
raise ValueError("Length should be 1")
solution = [{symbols[0]: s} for s in solution]
if as_dict:
return solution
assert as_set
if not solution:
return [], set()
k = list(ordered(solution[0].keys()))
return k, {tuple([s[ki] for ki in k]) for s in solution}
def _solve(f, *symbols, **flags):
"""
Return a checked solution for *f* in terms of one or more of the
symbols. A list should be returned except for the case when a linear
undetermined-coefficients equation is encountered (in which case
a dictionary is returned).
If no method is implemented to solve the equation, a NotImplementedError
will be raised. In the case that conversion of an expression to a Poly
gives None a ValueError will be raised.
"""
not_impl_msg = "No algorithms are implemented to solve equation %s"
if len(symbols) != 1:
soln = None
free = f.free_symbols
ex = free - set(symbols)
if len(ex) != 1:
ind, dep = f.as_independent(*symbols)
ex = ind.free_symbols & dep.free_symbols
if len(ex) == 1:
ex = ex.pop()
try:
# soln may come back as dict, list of dicts or tuples, or
# tuple of symbol list and set of solution tuples
soln = solve_undetermined_coeffs(f, symbols, ex, **flags)
except NotImplementedError:
pass
if soln:
if flags.get('simplify', True):
if isinstance(soln, dict):
for k in soln:
soln[k] = simplify(soln[k])
elif isinstance(soln, list):
if isinstance(soln[0], dict):
for d in soln:
for k in d:
d[k] = simplify(d[k])
elif isinstance(soln[0], tuple):
soln = [tuple(simplify(i) for i in j) for j in soln]
else:
raise TypeError('unrecognized args in list')
elif isinstance(soln, tuple):
sym, sols = soln
soln = sym, {tuple(simplify(i) for i in j) for j in sols}
else:
raise TypeError('unrecognized solution type')
return soln
# find first successful solution
failed = []
got_s = set()
result = []
for s in symbols:
xi, v = solve_linear(f, symbols=[s])
if xi == s:
# no need to check but we should simplify if desired
if flags.get('simplify', True):
v = simplify(v)
vfree = v.free_symbols
if got_s and any([ss in vfree for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(xi)
result.append({xi: v})
elif xi: # there might be a non-linear solution if xi is not 0
failed.append(s)
if not failed:
return result
for s in failed:
try:
soln = _solve(f, s, **flags)
for sol in soln:
if got_s and any([ss in sol.free_symbols for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(s)
result.append({s: sol})
except NotImplementedError:
continue
if got_s:
return result
else:
raise NotImplementedError(not_impl_msg % f)
symbol = symbols[0]
#expand binomials only if it has the unknown symbol
f = f.replace(lambda e: isinstance(e, binomial) and e.has(symbol),
lambda e: expand_func(e))
# /!\ capture this flag then set it to False so that no checking in
# recursive calls will be done; only the final answer is checked
flags['check'] = checkdens = check = flags.pop('check', True)
# build up solutions if f is a Mul
if f.is_Mul:
result = set()
for m in f.args:
if m in {S.NegativeInfinity, S.ComplexInfinity, S.Infinity}:
result = set()
break
soln = _solve(m, symbol, **flags)
result.update(set(soln))
result = list(result)
if check:
# all solutions have been checked but now we must
# check that the solutions do not set denominators
# in any factor to zero
dens = flags.get('_denominators', _simple_dens(f, symbols))
result = [s for s in result if
all(not checksol(den, {symbol: s}, **flags) for den in
dens)]
# set flags for quick exit at end; solutions for each
# factor were already checked and simplified
check = False
flags['simplify'] = False
elif f.is_Piecewise:
result = set()
for i, (expr, cond) in enumerate(f.args):
if expr.is_zero:
raise NotImplementedError(
'solve cannot represent interval solutions')
candidates = _solve(expr, symbol, **flags)
# the explicit condition for this expr is the current cond
# and none of the previous conditions
args = [~c for _, c in f.args[:i]] + [cond]
cond = And(*args)
for candidate in candidates:
if candidate in result:
# an unconditional value was already there
continue
try:
v = cond.subs(symbol, candidate)
_eval_simplify = getattr(v, '_eval_simplify', None)
if _eval_simplify is not None:
# unconditionally take the simpification of v
v = _eval_simplify(ratio=2, measure=lambda x: 1)
except TypeError:
# incompatible type with condition(s)
continue
if v == False:
continue
if v == True:
result.add(candidate)
else:
result.add(Piecewise(
(candidate, v),
(S.NaN, True)))
# set flags for quick exit at end; solutions for each
# piece were already checked and simplified
check = False
flags['simplify'] = False
else:
# first see if it really depends on symbol and whether there
# is only a linear solution
f_num, sol = solve_linear(f, symbols=symbols)
if f_num.is_zero or sol is S.NaN:
return []
elif f_num.is_Symbol:
# no need to check but simplify if desired
if flags.get('simplify', True):
sol = simplify(sol)
return [sol]
poly = None
# check for a single non-symbol generator
dums = f_num.atoms(Dummy)
D = f_num.replace(
lambda i: isinstance(i, Add) and symbol in i.free_symbols,
lambda i: Dummy())
if not D.is_Dummy:
dgen = D.atoms(Dummy) - dums
if len(dgen) == 1:
d = dgen.pop()
w = Wild('g')
gen = f_num.match(D.xreplace({d: w}))[w]
spart = gen.as_independent(symbol)[1].as_base_exp()[0]
if spart == symbol:
try:
poly = Poly(f_num, spart)
except PolynomialError:
pass
result = False # no solution was obtained
msg = '' # there is no failure message
# Poly is generally robust enough to convert anything to
# a polynomial and tell us the different generators that it
# contains, so we will inspect the generators identified by
# polys to figure out what to do.
# try to identify a single generator that will allow us to solve this
# as a polynomial, followed (perhaps) by a change of variables if the
# generator is not a symbol
try:
if poly is None:
poly = Poly(f_num)
if poly is None:
raise ValueError('could not convert %s to Poly' % f_num)
except GeneratorsNeeded:
simplified_f = simplify(f_num)
if simplified_f != f_num:
return _solve(simplified_f, symbol, **flags)
raise ValueError('expression appears to be a constant')
gens = [g for g in poly.gens if g.has(symbol)]
def _as_base_q(x):
"""Return (b**e, q) for x = b**(p*e/q) where p/q is the leading
Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3)
"""
b, e = x.as_base_exp()
if e.is_Rational:
return b, e.q
if not e.is_Mul:
return x, 1
c, ee = e.as_coeff_Mul()
if c.is_Rational and c is not S.One: # c could be a Float
return b**ee, c.q
return x, 1
if len(gens) > 1:
# If there is more than one generator, it could be that the
# generators have the same base but different powers, e.g.
# >>> Poly(exp(x) + 1/exp(x))
# Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ')
#
# If unrad was not disabled then there should be no rational
# exponents appearing as in
# >>> Poly(sqrt(x) + sqrt(sqrt(x)))
# Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ')
bases, qs = list(zip(*[_as_base_q(g) for g in gens]))
bases = set(bases)
if len(bases) > 1 or not all(q == 1 for q in qs):
funcs = {b for b in bases if b.is_Function}
trig = {_ for _ in funcs if
isinstance(_, TrigonometricFunction)}
other = funcs - trig
if not other and len(funcs.intersection(trig)) > 1:
newf = None
if f_num.is_Add and len(f_num.args) == 2:
# check for sin(x)**p = cos(x)**p
_args = f_num.args
t = a, b = [i.atoms(Function).intersection(
trig) for i in _args]
if all(len(i) == 1 for i in t):
a, b = [i.pop() for i in t]
if isinstance(a, cos):
a, b = b, a
_args = _args[::-1]
if isinstance(a, sin) and isinstance(b, cos
) and a.args[0] == b.args[0]:
# sin(x) + cos(x) = 0 -> tan(x) + 1 = 0
newf, _d = (TR2i(_args[0]/_args[1]) + 1
).as_numer_denom()
if not _d.is_Number:
newf = None
if newf is None:
newf = TR1(f_num).rewrite(tan)
if newf != f_num:
# don't check the rewritten form --check
# solutions in the un-rewritten form below
flags['check'] = False
result = _solve(newf, symbol, **flags)
flags['check'] = check
# just a simple case - see if replacement of single function
# clears all symbol-dependent functions, e.g.
# log(x) - log(log(x) - 1) - 3 can be solved even though it has
# two generators.
if result is False and funcs:
funcs = list(ordered(funcs)) # put shallowest function first
f1 = funcs[0]
t = Dummy('t')
# perform the substitution
ftry = f_num.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not ftry.has(symbol):
cv_sols = _solve(ftry, t, **flags)
cv_inv = _solve(t - f1, symbol, **flags)[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
result = list(ordered(sols))
if result is False:
msg = 'multiple generators %s' % gens
else:
# e.g. case where gens are exp(x), exp(-x)
u = bases.pop()
t = Dummy('t')
inv = _solve(u - t, symbol, **flags)
if isinstance(u, (Pow, exp)):
# this will be resolved by factor in _tsolve but we might
# as well try a simple expansion here to get things in
# order so something like the following will work now without
# having to factor:
#
# >>> eq = (exp(I*(-x-2))+exp(I*(x+2)))
# >>> eq.subs(exp(x),y) # fails
# exp(I*(-x - 2)) + exp(I*(x + 2))
# >>> eq.expand().subs(exp(x),y) # works
# y**I*exp(2*I) + y**(-I)*exp(-2*I)
def _expand(p):
b, e = p.as_base_exp()
e = expand_mul(e)
return expand_power_exp(b**e)
ftry = f_num.replace(
lambda w: w.is_Pow or isinstance(w, exp),
_expand).subs(u, t)
if not ftry.has(symbol):
soln = _solve(ftry, t, **flags)
sols = list()
for sol in soln:
for i in inv:
sols.append(i.subs(t, sol))
result = list(ordered(sols))
elif len(gens) == 1:
# There is only one generator that we are interested in, but
# there may have been more than one generator identified by
# polys (e.g. for symbols other than the one we are interested
# in) so recast the poly in terms of our generator of interest.
# Also use composite=True with f_num since Poly won't update
# poly as documented in issue 8810.
poly = Poly(f_num, gens[0], composite=True)
# if we aren't on the tsolve-pass, use roots
if not flags.pop('tsolve', False):
soln = None
deg = poly.degree()
flags['tsolve'] = True
solvers = {k: flags.get(k, True) for k in
('cubics', 'quartics', 'quintics')}
soln = roots(poly, **solvers)
if sum(soln.values()) < deg:
# e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 +
# 5000*x**2 + 6250*x + 3189) -> {}
# so all_roots is used and RootOf instances are
# returned *unless* the system is multivariate
# or high-order EX domain.
try:
soln = poly.all_roots()
except NotImplementedError:
if not flags.get('incomplete', True):
raise NotImplementedError(
filldedent('''
Neither high-order multivariate polynomials
nor sorting of EX-domain polynomials is supported.
If you want to see any results, pass keyword incomplete=True to
solve; to see numerical values of roots
for univariate expressions, use nroots.
'''))
else:
pass
else:
soln = list(soln.keys())
if soln is not None:
u = poly.gen
if u != symbol:
try:
t = Dummy('t')
iv = _solve(u - t, symbol, **flags)
soln = list(ordered({i.subs(t, s) for i in iv for s in soln}))
except NotImplementedError:
# perhaps _tsolve can handle f_num
soln = None
else:
check = False # only dens need to be checked
if soln is not None:
if len(soln) > 2:
# if the flag wasn't set then unset it since high-order
# results are quite long. Perhaps one could base this
# decision on a certain critical length of the
# roots. In addition, wester test M2 has an expression
# whose roots can be shown to be real with the
# unsimplified form of the solution whereas only one of
# the simplified forms appears to be real.
flags['simplify'] = flags.get('simplify', False)
result = soln
# fallback if above fails
# -----------------------
if result is False:
# try unrad
if flags.pop('_unrad', True):
try:
u = unrad(f_num, symbol)
except (ValueError, NotImplementedError):
u = False
if u:
eq, cov = u
if cov:
isym, ieq = cov
inv = _solve(ieq, symbol, **flags)[0]
rv = {inv.subs(isym, xi) for xi in _solve(eq, isym, **flags)}
else:
try:
rv = set(_solve(eq, symbol, **flags))
except NotImplementedError:
rv = None
if rv is not None:
result = list(ordered(rv))
# if the flag wasn't set then unset it since unrad results
# can be quite long or of very high order
flags['simplify'] = flags.get('simplify', False)
else:
pass # for coverage
# try _tsolve
if result is False:
flags.pop('tsolve', None) # allow tsolve to be used on next pass
try:
soln = _tsolve(f_num, symbol, **flags)
if soln is not None:
result = soln
except PolynomialError:
pass
# ----------- end of fallback ----------------------------
if result is False:
raise NotImplementedError('\n'.join([msg, not_impl_msg % f]))
if flags.get('simplify', True):
result = list(map(simplify, result))
# we just simplified the solution so we now set the flag to
# False so the simplification doesn't happen again in checksol()
flags['simplify'] = False
if checkdens:
# reject any result that makes any denom. affirmatively 0;
# if in doubt, keep it
dens = _simple_dens(f, symbols)
result = [s for s in result if
all(not checksol(d, {symbol: s}, **flags)
for d in dens)]
if check:
# keep only results if the check is not False
result = [r for r in result if
checksol(f_num, {symbol: r}, **flags) is not False]
return result
def _solve_system(exprs, symbols, **flags):
if not exprs:
return []
if flags.pop('_split', True):
# Split the system into connected components
V = exprs
symsset = set(symbols)
exprsyms = {e: e.free_symbols & symsset for e in exprs}
E = []
for n, e1 in enumerate(exprs):
for e2 in exprs[:n]:
# Equations are connected if they share a symbol
if exprsyms[e1] & exprsyms[e2]:
E.append((e1, e2))
G = V, E
subexprs = connected_components(G)
if len(subexprs) > 1:
subsols = []
for subexpr in subexprs:
subsyms = set()
for e in subexpr:
subsyms |= exprsyms[e]
subsyms = list(ordered(subsyms))
# use canonical subset to solve these equations
# since there may be redundant equations in the set:
# take the first equation of several that may have the
# same sub-maximal free symbols of interest; the
# other equations that weren't used should be checked
# to see that they did not fail -- does the solver
# take care of that?
choices = sift(subexpr, lambda x: tuple(ordered(exprsyms[x])))
subexpr = choices.pop(tuple(ordered(subsyms)), [])
for k in choices:
subexpr.append(next(ordered(choices[k])))
flags['_split'] = False # skip split step
subsol = _solve_system(subexpr, subsyms, **flags)
if not isinstance(subsol, list):
subsol = [subsol]
subsols.append(subsol)
# Full solution is cartesion product of subsystems
sols = []
for soldicts in cartes(*subsols):
sols.append(dict(item for sd in soldicts
for item in sd.items()))
# Return a list with one dict as just the dict
if len(sols) == 1:
return sols[0]
return sols
polys = []
dens = set()
failed = []
result = False
linear = False
manual = flags.get('manual', False)
checkdens = check = flags.get('check', True)
for j, g in enumerate(exprs):
dens.update(_simple_dens(g, symbols))
i, d = _invert(g, *symbols)
g = d - i
g = g.as_numer_denom()[0]
if manual:
failed.append(g)
continue
poly = g.as_poly(*symbols, extension=True)
if poly is not None:
polys.append(poly)
else:
failed.append(g)
if not polys:
solved_syms = []
else:
if all(p.is_linear for p in polys):
n, m = len(polys), len(symbols)
matrix = zeros(n, m + 1)
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = monom.index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
# returns a dictionary ({symbols: values}) or None
if flags.pop('particular', False):
result = minsolve_linear_system(matrix, *symbols, **flags)
else:
result = solve_linear_system(matrix, *symbols, **flags)
if failed:
if result:
solved_syms = list(result.keys())
else:
solved_syms = []
else:
linear = True
else:
if len(symbols) > len(polys):
from sympy.utilities.iterables import subsets
free = set().union(*[p.free_symbols for p in polys])
free = list(ordered(free.intersection(symbols)))
got_s = set()
result = []
for syms in subsets(free, len(polys)):
try:
# returns [] or list of tuples of solutions for syms
res = solve_poly_system(polys, *syms)
if res:
for r in res:
skip = False
for r1 in r:
if got_s and any([ss in r1.free_symbols
for ss in got_s]):
# sol depends on previously
# solved symbols: discard it
skip = True
if not skip:
got_s.update(syms)
result.extend([dict(list(zip(syms, r)))])
except NotImplementedError:
pass
if got_s:
solved_syms = list(got_s)
else:
raise NotImplementedError('no valid subset found')
else:
try:
result = solve_poly_system(polys, *symbols)
if result:
solved_syms = symbols
# we don't know here if the symbols provided
# were given or not, so let solve resolve that.
# A list of dictionaries is going to always be
# returned from here.
result = [dict(list(zip(solved_syms, r))) for r in result]
except NotImplementedError:
failed.extend([g.as_expr() for g in polys])
solved_syms = []
result = None
if result:
if isinstance(result, dict):
result = [result]
else:
result = [{}]
if failed:
# For each failed equation, see if we can solve for one of the
# remaining symbols from that equation. If so, we update the
# solution set and continue with the next failed equation,
# repeating until we are done or we get an equation that can't
# be solved.
def _ok_syms(e, sort=False):
rv = (e.free_symbols - solved_syms) & legal
if sort:
rv = list(rv)
rv.sort(key=default_sort_key)
return rv
solved_syms = set(solved_syms) # set of symbols we have solved for
legal = set(symbols) # what we are interested in
# sort so equation with the fewest potential symbols is first
u = Dummy() # used in solution checking
for eq in ordered(failed, lambda _: len(_ok_syms(_))):
newresult = []
bad_results = []
got_s = set()
hit = False
for r in result:
# update eq with everything that is known so far
eq2 = eq.subs(r)
# if check is True then we see if it satisfies this
# equation, otherwise we just accept it
if check and r:
b = checksol(u, u, eq2, minimal=True)
if b is not None:
# this solution is sufficient to know whether
# it is valid or not so we either accept or
# reject it, then continue
if b:
newresult.append(r)
else:
bad_results.append(r)
continue
# search for a symbol amongst those available that
# can be solved for
ok_syms = _ok_syms(eq2, sort=True)
if not ok_syms:
if r:
newresult.append(r)
break # skip as it's independent of desired symbols
for s in ok_syms:
try:
soln = _solve(eq2, s, **flags)
except NotImplementedError:
continue
# put each solution in r and append the now-expanded
# result in the new result list; use copy since the
# solution for s in being added in-place
for sol in soln:
if got_s and any([ss in sol.free_symbols for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
rnew = r.copy()
for k, v in r.items():
rnew[k] = v.subs(s, sol)
# and add this new solution
rnew[s] = sol
newresult.append(rnew)
hit = True
got_s.add(s)
if not hit:
raise NotImplementedError('could not solve %s' % eq2)
else:
result = newresult
for b in bad_results:
if b in result:
result.remove(b)
default_simplify = bool(failed) # rely on system-solvers to simplify
if flags.get('simplify', default_simplify):
for r in result:
for k in r:
r[k] = simplify(r[k])
flags['simplify'] = False # don't need to do so in checksol now
if checkdens:
result = [r for r in result
if not any(checksol(d, r, **flags) for d in dens)]
if check and not linear:
result = [r for r in result
if not any(checksol(e, r, **flags) is False for e in exprs)]
result = [r for r in result if r]
if linear and result:
result = result[0]
return result
def solve_linear(lhs, rhs=0, symbols=[], exclude=[]):
r"""
Return a tuple derived from ``f = lhs - rhs`` that is one of
the following: ``(0, 1)``, ``(0, 0)``, ``(symbol, solution)``, ``(n, d)``.
Explanation
===========
``(0, 1)`` meaning that ``f`` is independent of the symbols in *symbols*
that are not in *exclude*.
``(0, 0)`` meaning that there is no solution to the equation amongst the
symbols given. If the first element of the tuple is not zero, then the
function is guaranteed to be dependent on a symbol in *symbols*.
``(symbol, solution)`` where symbol appears linearly in the numerator of
``f``, is in *symbols* (if given), and is not in *exclude* (if given). No
simplification is done to ``f`` other than a ``mul=True`` expansion, so the
solution will correspond strictly to a unique solution.
``(n, d)`` where ``n`` and ``d`` are the numerator and denominator of ``f``
when the numerator was not linear in any symbol of interest; ``n`` will
never be a symbol unless a solution for that symbol was found (in which case
the second element is the solution, not the denominator).
Examples
========
>>> from sympy.core.power import Pow
>>> from sympy.polys.polytools import cancel
``f`` is independent of the symbols in *symbols* that are not in
*exclude*:
>>> from sympy.solvers.solvers import solve_linear
>>> from sympy.abc import x, y, z
>>> from sympy import cos, sin
>>> eq = y*cos(x)**2 + y*sin(x)**2 - y # = y*(1 - 1) = 0
>>> solve_linear(eq)
(0, 1)
>>> eq = cos(x)**2 + sin(x)**2 # = 1
>>> solve_linear(eq)
(0, 1)
>>> solve_linear(x, exclude=[x])
(0, 1)
The variable ``x`` appears as a linear variable in each of the
following:
>>> solve_linear(x + y**2)
(x, -y**2)
>>> solve_linear(1/x - y**2)
(x, y**(-2))
When not linear in ``x`` or ``y`` then the numerator and denominator are
returned:
>>> solve_linear(x**2/y**2 - 3)
(x**2 - 3*y**2, y**2)
If the numerator of the expression is a symbol, then ``(0, 0)`` is
returned if the solution for that symbol would have set any
denominator to 0:
>>> eq = 1/(1/x - 2)
>>> eq.as_numer_denom()
(x, 1 - 2*x)
>>> solve_linear(eq)
(0, 0)
But automatic rewriting may cause a symbol in the denominator to
appear in the numerator so a solution will be returned:
>>> (1/x)**-1
x
>>> solve_linear((1/x)**-1)
(x, 0)
Use an unevaluated expression to avoid this:
>>> solve_linear(Pow(1/x, -1, evaluate=False))
(0, 0)
If ``x`` is allowed to cancel in the following expression, then it
appears to be linear in ``x``, but this sort of cancellation is not
done by ``solve_linear`` so the solution will always satisfy the
original expression without causing a division by zero error.
>>> eq = x**2*(1/x - z**2/x)
>>> solve_linear(cancel(eq))
(x, 0)
>>> solve_linear(eq)
(x**2*(1 - z**2), x)
A list of symbols for which a solution is desired may be given:
>>> solve_linear(x + y + z, symbols=[y])
(y, -x - z)
A list of symbols to ignore may also be given:
>>> solve_linear(x + y + z, exclude=[x])
(y, -x - z)
(A solution for ``y`` is obtained because it is the first variable
from the canonically sorted list of symbols that had a linear
solution.)
"""
if isinstance(lhs, Equality):
if rhs:
raise ValueError(filldedent('''
If lhs is an Equality, rhs must be 0 but was %s''' % rhs))
rhs = lhs.rhs
lhs = lhs.lhs
dens = None
eq = lhs - rhs
n, d = eq.as_numer_denom()
if not n:
return S.Zero, S.One
free = n.free_symbols
if not symbols:
symbols = free
else:
bad = [s for s in symbols if not s.is_Symbol]
if bad:
if len(bad) == 1:
bad = bad[0]
if len(symbols) == 1:
eg = 'solve(%s, %s)' % (eq, symbols[0])
else:
eg = 'solve(%s, *%s)' % (eq, list(symbols))
raise ValueError(filldedent('''
solve_linear only handles symbols, not %s. To isolate
non-symbols use solve, e.g. >>> %s <<<.
''' % (bad, eg)))
symbols = free.intersection(symbols)
symbols = symbols.difference(exclude)
if not symbols:
return S.Zero, S.One
# derivatives are easy to do but tricky to analyze to see if they
# are going to disallow a linear solution, so for simplicity we
# just evaluate the ones that have the symbols of interest
derivs = defaultdict(list)
for der in n.atoms(Derivative):
csym = der.free_symbols & symbols
for c in csym:
derivs[c].append(der)
all_zero = True
for xi in sorted(symbols, key=default_sort_key): # canonical order
# if there are derivatives in this var, calculate them now
if isinstance(derivs[xi], list):
derivs[xi] = {der: der.doit() for der in derivs[xi]}
newn = n.subs(derivs[xi])
dnewn_dxi = newn.diff(xi)
# dnewn_dxi can be nonzero if it survives differentation by any
# of its free symbols
free = dnewn_dxi.free_symbols
if dnewn_dxi and (not free or any(dnewn_dxi.diff(s) for s in free)):
all_zero = False
if dnewn_dxi is S.NaN:
break
if xi not in dnewn_dxi.free_symbols:
vi = -1/dnewn_dxi*(newn.subs(xi, 0))
if dens is None:
dens = _simple_dens(eq, symbols)
if not any(checksol(di, {xi: vi}, minimal=True) is True
for di in dens):
# simplify any trivial integral
irep = [(i, i.doit()) for i in vi.atoms(Integral) if
i.function.is_number]
# do a slight bit of simplification
vi = expand_mul(vi.subs(irep))
return xi, vi
if all_zero:
return S.Zero, S.One
if n.is_Symbol: # no solution for this symbol was found
return S.Zero, S.Zero
return n, d
def minsolve_linear_system(system, *symbols, **flags):
r"""
Find a particular solution to a linear system.
Explanation
===========
In particular, try to find a solution with the minimal possible number
of non-zero variables using a naive algorithm with exponential complexity.
If ``quick=True``, a heuristic is used.
"""
quick = flags.get('quick', False)
# Check if there are any non-zero solutions at all
s0 = solve_linear_system(system, *symbols, **flags)
if not s0 or all(v == 0 for v in s0.values()):
return s0
if quick:
# We just solve the system and try to heuristically find a nice
# solution.
s = solve_linear_system(system, *symbols)
def update(determined, solution):
delete = []
for k, v in solution.items():
solution[k] = v.subs(determined)
if not solution[k].free_symbols:
delete.append(k)
determined[k] = solution[k]
for k in delete:
del solution[k]
determined = {}
update(determined, s)
while s:
# NOTE sort by default_sort_key to get deterministic result
k = max((k for k in s.values()),
key=lambda x: (len(x.free_symbols), default_sort_key(x)))
x = max(k.free_symbols, key=default_sort_key)
if len(k.free_symbols) != 1:
determined[x] = S.Zero
else:
val = solve(k)[0]
if val == 0 and all(v.subs(x, val) == 0 for v in s.values()):
determined[x] = S.One
else:
determined[x] = val
update(determined, s)
return determined
else:
# We try to select n variables which we want to be non-zero.
# All others will be assumed zero. We try to solve the modified system.
# If there is a non-trivial solution, just set the free variables to
# one. If we do this for increasing n, trying all combinations of
# variables, we will find an optimal solution.
# We speed up slightly by starting at one less than the number of
# variables the quick method manages.
from itertools import combinations
from sympy.utilities.misc import debug
N = len(symbols)
bestsol = minsolve_linear_system(system, *symbols, quick=True)
n0 = len([x for x in bestsol.values() if x != 0])
for n in range(n0 - 1, 1, -1):
debug('minsolve: %s' % n)
thissol = None
for nonzeros in combinations(list(range(N)), n):
subm = Matrix([system.col(i).T for i in nonzeros] + [system.col(-1).T]).T
s = solve_linear_system(subm, *[symbols[i] for i in nonzeros])
if s and not all(v == 0 for v in s.values()):
subs = [(symbols[v], S.One) for v in nonzeros]
for k, v in s.items():
s[k] = v.subs(subs)
for sym in symbols:
if sym not in s:
if symbols.index(sym) in nonzeros:
s[sym] = S.One
else:
s[sym] = S.Zero
thissol = s
break
if thissol is None:
break
bestsol = thissol
return bestsol
def solve_linear_system(system, *symbols, **flags):
r"""
Solve system of $N$ linear equations with $M$ variables, which means
both under- and overdetermined systems are supported.
Explanation
===========
The possible number of solutions is zero, one, or infinite. Respectively,
this procedure will return None or a dictionary with solutions. In the
case of underdetermined systems, all arbitrary parameters are skipped.
This may cause a situation in which an empty dictionary is returned.
In that case, all symbols can be assigned arbitrary values.
Input to this function is a $N\times M + 1$ matrix, which means it has
to be in augmented form. If you prefer to enter $N$ equations and $M$
unknowns then use ``solve(Neqs, *Msymbols)`` instead. Note: a local
copy of the matrix is made by this routine so the matrix that is
passed will not be modified.
The algorithm used here is fraction-free Gaussian elimination,
which results, after elimination, in an upper-triangular matrix.
Then solutions are found using back-substitution. This approach
is more efficient and compact than the Gauss-Jordan method.
Examples
========
>>> from sympy import Matrix, solve_linear_system
>>> from sympy.abc import x, y
Solve the following system::
x + 4 y == 2
-2 x + y == 14
>>> system = Matrix(( (1, 4, 2), (-2, 1, 14)))
>>> solve_linear_system(system, x, y)
{x: -6, y: 2}
A degenerate system returns an empty dictionary:
>>> system = Matrix(( (0,0,0), (0,0,0) ))
>>> solve_linear_system(system, x, y)
{}
"""
assert system.shape[1] == len(symbols) + 1
# This is just a wrapper for solve_lin_sys
eqs = list(system * Matrix(symbols + (-1,)))
eqs, ring = sympy_eqs_to_ring(eqs, symbols)
sol = solve_lin_sys(eqs, ring, _raw=False)
if sol is not None:
sol = {sym:val for sym, val in sol.items() if sym != val}
return sol
def solve_undetermined_coeffs(equ, coeffs, sym, **flags):
r"""
Solve equation of a type $p(x; a_1, \ldots, a_k) = q(x)$ where both
$p$ and $q$ are univariate polynomials that depend on $k$ parameters.
Explanation
===========
The result of this function is a dictionary with symbolic values of those
parameters with respect to coefficients in $q$.
This function accepts both equations class instances and ordinary
SymPy expressions. Specification of parameters and variables is
obligatory for efficiency and simplicity reasons.
Examples
========
>>> from sympy import Eq
>>> from sympy.abc import a, b, c, x
>>> from sympy.solvers import solve_undetermined_coeffs
>>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x)
{a: 1/c, b: -1/c}
"""
if isinstance(equ, Equality):
# got equation, so move all the
# terms to the left hand side
equ = equ.lhs - equ.rhs
equ = cancel(equ).as_numer_denom()[0]
system = list(collect(equ.expand(), sym, evaluate=False).values())
if not any(equ.has(sym) for equ in system):
# consecutive powers in the input expressions have
# been successfully collected, so solve remaining
# system using Gaussian elimination algorithm
return solve(system, *coeffs, **flags)
else:
return None # no solutions
def solve_linear_system_LU(matrix, syms):
"""
Solves the augmented matrix system using ``LUsolve`` and returns a
dictionary in which solutions are keyed to the symbols of *syms* as ordered.
Explanation
===========
The matrix must be invertible.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.solvers import solve_linear_system_LU
>>> solve_linear_system_LU(Matrix([
... [1, 2, 0, 1],
... [3, 2, 2, 1],
... [2, 0, 0, 1]]), [x, y, z])
{x: 1/2, y: 1/4, z: -1/2}
See Also
========
LUsolve
"""
if matrix.rows != matrix.cols - 1:
raise ValueError("Rows should be equal to columns - 1")
A = matrix[:matrix.rows, :matrix.rows]
b = matrix[:, matrix.cols - 1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i, 0]
return solutions
def det_perm(M):
"""
Return the determinant of *M* by using permutations to select factors.
Explanation
===========
For sizes larger than 8 the number of permutations becomes prohibitively
large, or if there are no symbols in the matrix, it is better to use the
standard determinant routines (e.g., ``M.det()``.)
See Also
========
det_minor
det_quick
"""
args = []
s = True
n = M.rows
list_ = getattr(M, '_mat', None)
if list_ is None:
list_ = flatten(M.tolist())
for perm in generate_bell(n):
fac = []
idx = 0
for j in perm:
fac.append(list_[idx + j])
idx += n
term = Mul(*fac) # disaster with unevaluated Mul -- takes forever for n=7
args.append(term if s else -term)
s = not s
return Add(*args)
def det_minor(M):
"""
Return the ``det(M)`` computed from minors without
introducing new nesting in products.
See Also
========
det_perm
det_quick
"""
n = M.rows
if n == 2:
return M[0, 0]*M[1, 1] - M[1, 0]*M[0, 1]
else:
return sum([(1, -1)[i % 2]*Add(*[M[0, i]*d for d in
Add.make_args(det_minor(M.minor_submatrix(0, i)))])
if M[0, i] else S.Zero for i in range(n)])
def det_quick(M, method=None):
"""
Return ``det(M)`` assuming that either
there are lots of zeros or the size of the matrix
is small. If this assumption is not met, then the normal
Matrix.det function will be used with method = ``method``.
See Also
========
det_minor
det_perm
"""
if any(i.has(Symbol) for i in M):
if M.rows < 8 and all(i.has(Symbol) for i in M):
return det_perm(M)
return det_minor(M)
else:
return M.det(method=method) if method else M.det()
def inv_quick(M):
"""Return the inverse of ``M``, assuming that either
there are lots of zeros or the size of the matrix
is small.
"""
from sympy.matrices import zeros
if not all(i.is_Number for i in M):
if not any(i.is_Number for i in M):
det = lambda _: det_perm(_)
else:
det = lambda _: det_minor(_)
else:
return M.inv()
n = M.rows
d = det(M)
if d == S.Zero:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible")
ret = zeros(n)
s1 = -1
for i in range(n):
s = s1 = -s1
for j in range(n):
di = det(M.minor_submatrix(i, j))
ret[j, i] = s*di/d
s = -s
return ret
# these are functions that have multiple inverse values per period
multi_inverses = {
sin: lambda x: (asin(x), S.Pi - asin(x)),
cos: lambda x: (acos(x), 2*S.Pi - acos(x)),
}
def _tsolve(eq, sym, **flags):
"""
Helper for ``_solve`` that solves a transcendental equation with respect
to the given symbol. Various equations containing powers and logarithms,
can be solved.
There is currently no guarantee that all solutions will be returned or
that a real solution will be favored over a complex one.
Either a list of potential solutions will be returned or None will be
returned (in the case that no method was known to get a solution
for the equation). All other errors (like the inability to cast an
expression as a Poly) are unhandled.
Examples
========
>>> from sympy import log
>>> from sympy.solvers.solvers import _tsolve as tsolve
>>> from sympy.abc import x
>>> tsolve(3**(2*x + 5) - 4, x)
[-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)]
>>> tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
if 'tsolve_saw' not in flags:
flags['tsolve_saw'] = []
if eq in flags['tsolve_saw']:
return None
else:
flags['tsolve_saw'].append(eq)
rhs, lhs = _invert(eq, sym)
if lhs == sym:
return [rhs]
try:
if lhs.is_Add:
# it's time to try factoring; powdenest is used
# to try get powers in standard form for better factoring
f = factor(powdenest(lhs - rhs))
if f.is_Mul:
return _solve(f, sym, **flags)
if rhs:
f = logcombine(lhs, force=flags.get('force', True))
if f.count(log) != lhs.count(log):
if isinstance(f, log):
return _solve(f.args[0] - exp(rhs), sym, **flags)
return _tsolve(f - rhs, sym, **flags)
elif lhs.is_Pow:
if lhs.exp.is_Integer:
if lhs - rhs != eq:
return _solve(lhs - rhs, sym, **flags)
if sym not in lhs.exp.free_symbols:
return _solve(lhs.base - rhs**(1/lhs.exp), sym, **flags)
# _tsolve calls this with Dummy before passing the actual number in.
if any(t.is_Dummy for t in rhs.free_symbols):
raise NotImplementedError # _tsolve will call here again...
# a ** g(x) == 0
if not rhs:
# f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at
# the same place
sol_base = _solve(lhs.base, sym, **flags)
return [s for s in sol_base if lhs.exp.subs(sym, s) != 0]
# a ** g(x) == b
if not lhs.base.has(sym):
if lhs.base == 0:
return _solve(lhs.exp, sym, **flags) if rhs != 0 else []
# Gets most solutions...
if lhs.base == rhs.as_base_exp()[0]:
# handles case when bases are equal
sol = _solve(lhs.exp - rhs.as_base_exp()[1], sym, **flags)
else:
# handles cases when bases are not equal and exp
# may or may not be equal
sol = _solve(exp(log(lhs.base)*lhs.exp)-exp(log(rhs)), sym, **flags)
# Check for duplicate solutions
def equal(expr1, expr2):
_ = Dummy()
eq = checksol(expr1 - _, _, expr2)
if eq is None:
if nsimplify(expr1) != nsimplify(expr2):
return False
# they might be coincidentally the same
# so check more rigorously
eq = expr1.equals(expr2)
return eq
# Guess a rational exponent
e_rat = nsimplify(log(abs(rhs))/log(abs(lhs.base)))
e_rat = simplify(posify(e_rat)[0])
n, d = fraction(e_rat)
if expand(lhs.base**n - rhs**d) == 0:
sol = [s for s in sol if not equal(lhs.exp.subs(sym, s), e_rat)]
sol.extend(_solve(lhs.exp - e_rat, sym, **flags))
return list(ordered(set(sol)))
# f(x) ** g(x) == c
else:
sol = []
logform = lhs.exp*log(lhs.base) - log(rhs)
if logform != lhs - rhs:
try:
sol.extend(_solve(logform, sym, **flags))
except NotImplementedError:
pass
# Collect possible solutions and check with substitution later.
check = []
if rhs == 1:
# f(x) ** g(x) = 1 -- g(x)=0 or f(x)=+-1
check.extend(_solve(lhs.exp, sym, **flags))
check.extend(_solve(lhs.base - 1, sym, **flags))
check.extend(_solve(lhs.base + 1, sym, **flags))
elif rhs.is_Rational:
for d in (i for i in divisors(abs(rhs.p)) if i != 1):
e, t = integer_log(rhs.p, d)
if not t:
continue # rhs.p != d**b
for s in divisors(abs(rhs.q)):
if s**e== rhs.q:
r = Rational(d, s)
check.extend(_solve(lhs.base - r, sym, **flags))
check.extend(_solve(lhs.base + r, sym, **flags))
check.extend(_solve(lhs.exp - e, sym, **flags))
elif rhs.is_irrational:
b_l, e_l = lhs.base.as_base_exp()
n, d = (e_l*lhs.exp).as_numer_denom()
b, e = sqrtdenest(rhs).as_base_exp()
check = [sqrtdenest(i) for i in (_solve(lhs.base - b, sym, **flags))]
check.extend([sqrtdenest(i) for i in (_solve(lhs.exp - e, sym, **flags))])
if e_l*d != 1:
check.extend(_solve(b_l**n - rhs**(e_l*d), sym, **flags))
for s in check:
ok = checksol(eq, sym, s)
if ok is None:
ok = eq.subs(sym, s).equals(0)
if ok:
sol.append(s)
return list(ordered(set(sol)))
elif lhs.is_Function and len(lhs.args) == 1:
if lhs.func in multi_inverses:
# sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3))
soln = []
for i in multi_inverses[lhs.func](rhs):
soln.extend(_solve(lhs.args[0] - i, sym, **flags))
return list(ordered(soln))
elif lhs.func == LambertW:
return _solve(lhs.args[0] - rhs*exp(rhs), sym, **flags)
rewrite = lhs.rewrite(exp)
if rewrite != lhs:
return _solve(rewrite - rhs, sym, **flags)
except NotImplementedError:
pass
# maybe it is a lambert pattern
if flags.pop('bivariate', True):
# lambert forms may need some help being recognized, e.g. changing
# 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1
# to 2**(3*x) + (x*log(2) + 1)**3
g = _filtered_gens(eq.as_poly(), sym)
up_or_log = set()
for gi in g:
if isinstance(gi, exp) or isinstance(gi, log):
up_or_log.add(gi)
elif gi.is_Pow:
gisimp = powdenest(expand_power_exp(gi))
if gisimp.is_Pow and sym in gisimp.exp.free_symbols:
up_or_log.add(gi)
eq_down = expand_log(expand_power_exp(eq)).subs(
dict(list(zip(up_or_log, [0]*len(up_or_log)))))
eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down))
rhs, lhs = _invert(eq, sym)
if lhs.has(sym):
try:
poly = lhs.as_poly()
g = _filtered_gens(poly, sym)
_eq = lhs - rhs
sols = _solve_lambert(_eq, sym, g)
# use a simplified form if it satisfies eq
# and has fewer operations
for n, s in enumerate(sols):
ns = nsimplify(s)
if ns != s and ns.count_ops() <= s.count_ops():
ok = checksol(_eq, sym, ns)
if ok is None:
ok = _eq.subs(sym, ns).equals(0)
if ok:
sols[n] = ns
return sols
except NotImplementedError:
# maybe it's a convoluted function
if len(g) == 2:
try:
gpu = bivariate_type(lhs - rhs, *g)
if gpu is None:
raise NotImplementedError
g, p, u = gpu
flags['bivariate'] = False
inversion = _tsolve(g - u, sym, **flags)
if inversion:
sol = _solve(p, u, **flags)
return list(ordered({i.subs(u, s)
for i in inversion for s in sol}))
except NotImplementedError:
pass
else:
pass
if flags.pop('force', True):
flags['force'] = False
pos, reps = posify(lhs - rhs)
if rhs == S.ComplexInfinity:
return []
for u, s in reps.items():
if s == sym:
break
else:
u = sym
if pos.has(u):
try:
soln = _solve(pos, u, **flags)
return list(ordered([s.subs(reps) for s in soln]))
except NotImplementedError:
pass
else:
pass # here for coverage
return # here for coverage
# TODO: option for calculating J numerically
@conserve_mpmath_dps
def nsolve(*args, dict=False, **kwargs):
r"""
Solve a nonlinear equation system numerically: ``nsolve(f, [args,] x0,
modules=['mpmath'], **kwargs)``.
Explanation
===========
``f`` is a vector function of symbolic expressions representing the system.
*args* are the variables. If there is only one variable, this argument can
be omitted. ``x0`` is a starting vector close to a solution.
Use the modules keyword to specify which modules should be used to
evaluate the function and the Jacobian matrix. Make sure to use a module
that supports matrices. For more information on the syntax, please see the
docstring of ``lambdify``.
If the keyword arguments contain ``dict=True`` (default is False) ``nsolve``
will return a list (perhaps empty) of solution mappings. This might be
especially useful if you want to use ``nsolve`` as a fallback to solve since
using the dict argument for both methods produces return values of
consistent type structure. Please note: to keep this consistent with
``solve``, the solution will be returned in a list even though ``nsolve``
(currently at least) only finds one solution at a time.
Overdetermined systems are supported.
Examples
========
>>> from sympy import Symbol, nsolve
>>> import mpmath
>>> mpmath.mp.dps = 15
>>> x1 = Symbol('x1')
>>> x2 = Symbol('x2')
>>> f1 = 3 * x1**2 - 2 * x2**2 - 1
>>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
>>> print(nsolve((f1, f2), (x1, x2), (-1, 1)))
Matrix([[-1.19287309935246], [1.27844411169911]])
For one-dimensional functions the syntax is simplified:
>>> from sympy import sin, nsolve
>>> from sympy.abc import x
>>> nsolve(sin(x), x, 2)
3.14159265358979
>>> nsolve(sin(x), 2)
3.14159265358979
To solve with higher precision than the default, use the prec argument:
>>> from sympy import cos
>>> nsolve(cos(x) - x, 1)
0.739085133215161
>>> nsolve(cos(x) - x, 1, prec=50)
0.73908513321516064165531208767387340401341175890076
>>> cos(_)
0.73908513321516064165531208767387340401341175890076
To solve for complex roots of real functions, a nonreal initial point
must be specified:
>>> from sympy import I
>>> nsolve(x**2 + 2, I)
1.4142135623731*I
``mpmath.findroot`` is used and you can find their more extensive
documentation, especially concerning keyword parameters and
available solvers. Note, however, that functions which are very
steep near the root, the verification of the solution may fail. In
this case you should use the flag ``verify=False`` and
independently verify the solution.
>>> from sympy import cos, cosh
>>> f = cos(x)*cosh(x) - 1
>>> nsolve(f, 3.14*100)
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (1.39267e+230 > 2.1684e-19)
>>> ans = nsolve(f, 3.14*100, verify=False); ans
312.588469032184
>>> f.subs(x, ans).n(2)
2.1e+121
>>> (f/f.diff(x)).subs(x, ans).n(2)
7.4e-15
One might safely skip the verification if bounds of the root are known
and a bisection method is used:
>>> bounds = lambda i: (3.14*i, 3.14*(i + 1))
>>> nsolve(f, bounds(100), solver='bisect', verify=False)
315.730061685774
Alternatively, a function may be better behaved when the
denominator is ignored. Since this is not always the case, however,
the decision of what function to use is left to the discretion of
the user.
>>> eq = x**2/(1 - x)/(1 - 2*x)**2 - 100
>>> nsolve(eq, 0.46)
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (10000 > 2.1684e-19)
Try another starting point or tweak arguments.
>>> nsolve(eq.as_numer_denom()[0], 0.46)
0.46792545969349058
"""
# there are several other SymPy functions that use method= so
# guard against that here
if 'method' in kwargs:
raise ValueError(filldedent('''
Keyword "method" should not be used in this context. When using
some mpmath solvers directly, the keyword "method" is
used, but when using nsolve (and findroot) the keyword to use is
"solver".'''))
if 'prec' in kwargs:
prec = kwargs.pop('prec')
import mpmath
mpmath.mp.dps = prec
else:
prec = None
# keyword argument to return result as a dictionary
as_dict = dict
from builtins import dict # to unhide the builtin
# interpret arguments
if len(args) == 3:
f = args[0]
fargs = args[1]
x0 = args[2]
if iterable(fargs) and iterable(x0):
if len(x0) != len(fargs):
raise TypeError('nsolve expected exactly %i guess vectors, got %i'
% (len(fargs), len(x0)))
elif len(args) == 2:
f = args[0]
fargs = None
x0 = args[1]
if iterable(f):
raise TypeError('nsolve expected 3 arguments, got 2')
elif len(args) < 2:
raise TypeError('nsolve expected at least 2 arguments, got %i'
% len(args))
else:
raise TypeError('nsolve expected at most 3 arguments, got %i'
% len(args))
modules = kwargs.get('modules', ['mpmath'])
if iterable(f):
f = list(f)
for i, fi in enumerate(f):
if isinstance(fi, Equality):
f[i] = fi.lhs - fi.rhs
f = Matrix(f).T
if iterable(x0):
x0 = list(x0)
if not isinstance(f, Matrix):
# assume it's a sympy expression
if isinstance(f, Equality):
f = f.lhs - f.rhs
syms = f.free_symbols
if fargs is None:
fargs = syms.copy().pop()
if not (len(syms) == 1 and (fargs in syms or fargs[0] in syms)):
raise ValueError(filldedent('''
expected a one-dimensional and numerical function'''))
# the function is much better behaved if there is no denominator
# but sending the numerator is left to the user since sometimes
# the function is better behaved when the denominator is present
# e.g., issue 11768
f = lambdify(fargs, f, modules)
x = sympify(findroot(f, x0, **kwargs))
if as_dict:
return [{fargs: x}]
return x
if len(fargs) > f.cols:
raise NotImplementedError(filldedent('''
need at least as many equations as variables'''))
verbose = kwargs.get('verbose', False)
if verbose:
print('f(x):')
print(f)
# derive Jacobian
J = f.jacobian(fargs)
if verbose:
print('J(x):')
print(J)
# create functions
f = lambdify(fargs, f.T, modules)
J = lambdify(fargs, J, modules)
# solve the system numerically
x = findroot(f, x0, J=J, **kwargs)
if as_dict:
return [dict(zip(fargs, [sympify(xi) for xi in x]))]
return Matrix(x)
def _invert(eq, *symbols, **kwargs):
"""
Return tuple (i, d) where ``i`` is independent of *symbols* and ``d``
contains symbols.
Explanation
===========
``i`` and ``d`` are obtained after recursively using algebraic inversion
until an uninvertible ``d`` remains. If there are no free symbols then
``d`` will be zero. Some (but not necessarily all) solutions to the
expression ``i - d`` will be related to the solutions of the original
expression.
Examples
========
>>> from sympy.solvers.solvers import _invert as invert
>>> from sympy import sqrt, cos
>>> from sympy.abc import x, y
>>> invert(x - 3)
(3, x)
>>> invert(3)
(3, 0)
>>> invert(2*cos(x) - 1)
(1/2, cos(x))
>>> invert(sqrt(x) - 3)
(3, sqrt(x))
>>> invert(sqrt(x) + y, x)
(-y, sqrt(x))
>>> invert(sqrt(x) + y, y)
(-sqrt(x), y)
>>> invert(sqrt(x) + y, x, y)
(0, sqrt(x) + y)
If there is more than one symbol in a power's base and the exponent
is not an Integer, then the principal root will be used for the
inversion:
>>> invert(sqrt(x + y) - 2)
(4, x + y)
>>> invert(sqrt(x + y) - 2)
(4, x + y)
If the exponent is an Integer, setting ``integer_power`` to True
will force the principal root to be selected:
>>> invert(x**2 - 4, integer_power=True)
(2, x)
"""
eq = sympify(eq)
if eq.args:
# make sure we are working with flat eq
eq = eq.func(*eq.args)
free = eq.free_symbols
if not symbols:
symbols = free
if not free & set(symbols):
return eq, S.Zero
dointpow = bool(kwargs.get('integer_power', False))
lhs = eq
rhs = S.Zero
while True:
was = lhs
while True:
indep, dep = lhs.as_independent(*symbols)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep.is_zero:
break
lhs = dep
rhs -= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep is S.One:
break
lhs = dep
rhs /= indep
# collect like-terms in symbols
if lhs.is_Add:
terms = {}
for a in lhs.args:
i, d = a.as_independent(*symbols)
terms.setdefault(d, []).append(i)
if any(len(v) > 1 for v in terms.values()):
args = []
for d, i in terms.items():
if len(i) > 1:
args.append(Add(*i)*d)
else:
args.append(i[0]*d)
lhs = Add(*args)
# if it's a two-term Add with rhs = 0 and two powers we can get the
# dependent terms together, e.g. 3*f(x) + 2*g(x) -> f(x)/g(x) = -2/3
if lhs.is_Add and not rhs and len(lhs.args) == 2 and \
not lhs.is_polynomial(*symbols):
a, b = ordered(lhs.args)
ai, ad = a.as_independent(*symbols)
bi, bd = b.as_independent(*symbols)
if any(_ispow(i) for i in (ad, bd)):
a_base, a_exp = ad.as_base_exp()
b_base, b_exp = bd.as_base_exp()
if a_base == b_base:
# a = -b
lhs = powsimp(powdenest(ad/bd))
rhs = -bi/ai
else:
rat = ad/bd
_lhs = powsimp(ad/bd)
if _lhs != rat:
lhs = _lhs
rhs = -bi/ai
elif ai == -bi:
if isinstance(ad, Function) and ad.func == bd.func:
if len(ad.args) == len(bd.args) == 1:
lhs = ad.args[0] - bd.args[0]
elif len(ad.args) == len(bd.args):
# should be able to solve
# f(x, y) - f(2 - x, 0) == 0 -> x == 1
raise NotImplementedError(
'equal function with more than 1 argument')
else:
raise ValueError(
'function with different numbers of args')
elif lhs.is_Mul and any(_ispow(a) for a in lhs.args):
lhs = powsimp(powdenest(lhs))
if lhs.is_Function:
if hasattr(lhs, 'inverse') and len(lhs.args) == 1:
# -1
# f(x) = g -> x = f (g)
#
# /!\ inverse should not be defined if there are multiple values
# for the function -- these are handled in _tsolve
#
rhs = lhs.inverse()(rhs)
lhs = lhs.args[0]
elif isinstance(lhs, atan2):
y, x = lhs.args
lhs = 2*atan(y/(sqrt(x**2 + y**2) + x))
elif lhs.func == rhs.func:
if len(lhs.args) == len(rhs.args) == 1:
lhs = lhs.args[0]
rhs = rhs.args[0]
elif len(lhs.args) == len(rhs.args):
# should be able to solve
# f(x, y) == f(2, 3) -> x == 2
# f(x, x + y) == f(2, 3) -> x == 2
raise NotImplementedError(
'equal function with more than 1 argument')
else:
raise ValueError(
'function with different numbers of args')
if rhs and lhs.is_Pow and lhs.exp.is_Integer and lhs.exp < 0:
lhs = 1/lhs
rhs = 1/rhs
# base**a = b -> base = b**(1/a) if
# a is an Integer and dointpow=True (this gives real branch of root)
# a is not an Integer and the equation is multivariate and the
# base has more than 1 symbol in it
# The rationale for this is that right now the multi-system solvers
# doesn't try to resolve generators to see, for example, if the whole
# system is written in terms of sqrt(x + y) so it will just fail, so we
# do that step here.
if lhs.is_Pow and (
lhs.exp.is_Integer and dointpow or not lhs.exp.is_Integer and
len(symbols) > 1 and len(lhs.base.free_symbols & set(symbols)) > 1):
rhs = rhs**(1/lhs.exp)
lhs = lhs.base
if lhs == was:
break
return rhs, lhs
def unrad(eq, *syms, **flags):
"""
Remove radicals with symbolic arguments and return (eq, cov),
None, or raise an error.
Explanation
===========
None is returned if there are no radicals to remove.
NotImplementedError is raised if there are radicals and they cannot be
removed or if the relationship between the original symbols and the
change of variable needed to rewrite the system as a polynomial cannot
be solved.
Otherwise the tuple, ``(eq, cov)``, is returned where:
*eq*, ``cov``
*eq* is an equation without radicals (in the symbol(s) of
interest) whose solutions are a superset of the solutions to the
original expression. *eq* might be rewritten in terms of a new
variable; the relationship to the original variables is given by
``cov`` which is a list containing ``v`` and ``v**p - b`` where
``p`` is the power needed to clear the radical and ``b`` is the
radical now expressed as a polynomial in the symbols of interest.
For example, for sqrt(2 - x) the tuple would be
``(c, c**2 - 2 + x)``. The solutions of *eq* will contain
solutions to the original equation (if there are any).
*syms*
An iterable of symbols which, if provided, will limit the focus of
radical removal: only radicals with one or more of the symbols of
interest will be cleared. All free symbols are used if *syms* is not
set.
*flags* are used internally for communication during recursive calls.
Two options are also recognized:
``take``, when defined, is interpreted as a single-argument function
that returns True if a given Pow should be handled.
Radicals can be removed from an expression if:
* All bases of the radicals are the same; a change of variables is
done in this case.
* If all radicals appear in one term of the expression.
* There are only four terms with sqrt() factors or there are less than
four terms having sqrt() factors.
* There are only two terms with radicals.
Examples
========
>>> from sympy.solvers.solvers import unrad
>>> from sympy.abc import x
>>> from sympy import sqrt, Rational, root
>>> unrad(sqrt(x)*x**Rational(1, 3) + 2)
(x**5 - 64, [])
>>> unrad(sqrt(x) + root(x + 1, 3))
(x**3 - x**2 - 2*x - 1, [])
>>> eq = sqrt(x) + root(x, 3) - 2
>>> unrad(eq)
(_p**3 + _p**2 - 2, [_p, _p**6 - x])
"""
uflags = dict(check=False, simplify=False)
def _cov(p, e):
if cov:
# XXX - uncovered
oldp, olde = cov
if Poly(e, p).degree(p) in (1, 2):
cov[:] = [p, olde.subs(oldp, _solve(e, p, **uflags)[0])]
else:
raise NotImplementedError
else:
cov[:] = [p, e]
def _canonical(eq, cov):
if cov:
# change symbol to vanilla so no solutions are eliminated
p, e = cov
rep = {p: Dummy(p.name)}
eq = eq.xreplace(rep)
cov = [p.xreplace(rep), e.xreplace(rep)]
# remove constants and powers of factors since these don't change
# the location of the root; XXX should factor or factor_terms be used?
eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True), clear=True)
if eq.is_Mul:
args = []
for f in eq.args:
if f.is_number:
continue
if f.is_Pow and _take(f, True):
args.append(f.base)
else:
args.append(f)
eq = Mul(*args) # leave as Mul for more efficient solving
# make the sign canonical
free = eq.free_symbols
if len(free) == 1:
if eq.coeff(free.pop()**degree(eq)).could_extract_minus_sign():
eq = -eq
elif eq.could_extract_minus_sign():
eq = -eq
return eq, cov
def _Q(pow):
# return leading Rational of denominator of Pow's exponent
c = pow.as_base_exp()[1].as_coeff_Mul()[0]
if not c.is_Rational:
return S.One
return c.q
# define the _take method that will determine whether a term is of interest
def _take(d, take_int_pow):
# return True if coefficient of any factor's exponent's den is not 1
for pow in Mul.make_args(d):
if not (pow.is_Symbol or pow.is_Pow):
continue
b, e = pow.as_base_exp()
if not b.has(*syms):
continue
if not take_int_pow and _Q(pow) == 1:
continue
free = pow.free_symbols
if free.intersection(syms):
return True
return False
_take = flags.setdefault('_take', _take)
cov, nwas, rpt = [flags.setdefault(k, v) for k, v in
sorted(dict(cov=[], n=None, rpt=0).items())]
# preconditioning
eq = powdenest(factor_terms(eq, radical=True, clear=True))
if isinstance(eq, Relational):
eq, d = eq, 1
else:
eq, d = eq.as_numer_denom()
eq = _mexpand(eq, recursive=True)
if eq.is_number:
return
syms = set(syms) or eq.free_symbols
poly = eq.as_poly()
gens = [g for g in poly.gens if _take(g, True)]
if not gens:
return
# check for trivial case
# - already a polynomial in integer powers
if all(_Q(g) == 1 for g in gens):
if (len(gens) == len(poly.gens) and d!=1):
return eq, []
else:
return
# - an exponent has a symbol of interest (don't handle)
if any(g.as_base_exp()[1].has(*syms) for g in gens):
return
def _rads_bases_lcm(poly):
# if all the bases are the same or all the radicals are in one
# term, `lcm` will be the lcm of the denominators of the
# exponents of the radicals
lcm = 1
rads = set()
bases = set()
for g in poly.gens:
if not _take(g, False):
continue
q = _Q(g)
if q != 1:
rads.add(g)
lcm = ilcm(lcm, q)
bases.add(g.base)
return rads, bases, lcm
rads, bases, lcm = _rads_bases_lcm(poly)
if not rads:
return
covsym = Dummy('p', nonnegative=True)
# only keep in syms symbols that actually appear in radicals;
# and update gens
newsyms = set()
for r in rads:
newsyms.update(syms & r.free_symbols)
if newsyms != syms:
syms = newsyms
gens = [g for g in gens if g.free_symbols & syms]
# get terms together that have common generators
drad = dict(list(zip(rads, list(range(len(rads))))))
rterms = {(): []}
args = Add.make_args(poly.as_expr())
for t in args:
if _take(t, False):
common = set(t.as_poly().gens).intersection(rads)
key = tuple(sorted([drad[i] for i in common]))
else:
key = ()
rterms.setdefault(key, []).append(t)
others = Add(*rterms.pop(()))
rterms = [Add(*rterms[k]) for k in rterms.keys()]
# the output will depend on the order terms are processed, so
# make it canonical quickly
rterms = list(reversed(list(ordered(rterms))))
ok = False # we don't have a solution yet
depth = sqrt_depth(eq)
if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2):
eq = rterms[0]**lcm - ((-others)**lcm)
ok = True
else:
if len(rterms) == 1 and rterms[0].is_Add:
rterms = list(rterms[0].args)
if len(bases) == 1:
b = bases.pop()
if len(syms) > 1:
free = b.free_symbols
x = {g for g in gens if g.is_Symbol} & free
if not x:
x = free
x = ordered(x)
else:
x = syms
x = list(x)[0]
try:
inv = _solve(covsym**lcm - b, x, **uflags)
if not inv:
raise NotImplementedError
eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0])
_cov(covsym, covsym**lcm - b)
return _canonical(eq, cov)
except NotImplementedError:
pass
else:
# no longer consider integer powers as generators
gens = [g for g in gens if _Q(g) != 1]
if len(rterms) == 2:
if not others:
eq = rterms[0]**lcm - (-rterms[1])**lcm
ok = True
elif not log(lcm, 2).is_Integer:
# the lcm-is-power-of-two case is handled below
r0, r1 = rterms
if flags.get('_reverse', False):
r1, r0 = r0, r1
i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly())
i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly())
for reverse in range(2):
if reverse:
i0, i1 = i1, i0
r0, r1 = r1, r0
_rads1, _, lcm1 = i1
_rads1 = Mul(*_rads1)
t1 = _rads1**lcm1
c = covsym**lcm1 - t1
for x in syms:
try:
sol = _solve(c, x, **uflags)
if not sol:
raise NotImplementedError
neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \
others
tmp = unrad(neweq, covsym)
if tmp:
eq, newcov = tmp
if newcov:
newp, newc = newcov
_cov(newp, c.subs(covsym,
_solve(newc, covsym, **uflags)[0]))
else:
_cov(covsym, c)
else:
eq = neweq
_cov(covsym, c)
ok = True
break
except NotImplementedError:
if reverse:
raise NotImplementedError(
'no successful change of variable found')
else:
pass
if ok:
break
elif len(rterms) == 3:
# two cube roots and another with order less than 5
# (so an analytical solution can be found) or a base
# that matches one of the cube root bases
info = [_rads_bases_lcm(i.as_poly()) for i in rterms]
RAD = 0
BASES = 1
LCM = 2
if info[0][LCM] != 3:
info.append(info.pop(0))
rterms.append(rterms.pop(0))
elif info[1][LCM] != 3:
info.append(info.pop(1))
rterms.append(rterms.pop(1))
if info[0][LCM] == info[1][LCM] == 3:
if info[1][BASES] != info[2][BASES]:
info[0], info[1] = info[1], info[0]
rterms[0], rterms[1] = rterms[1], rterms[0]
if info[1][BASES] == info[2][BASES]:
eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3
ok = True
elif info[2][LCM] < 5:
# a*root(A, 3) + b*root(B, 3) + others = c
a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB']
# zz represents the unraded expression into which the
# specifics for this case are substituted
zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 -
3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 +
3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 -
63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 -
21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d +
45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 -
18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 +
9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 +
3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 -
60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 +
3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 -
126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 -
9*c*d**8 + d**9)
def _t(i):
b = Mul(*info[i][RAD])
return cancel(rterms[i]/b), Mul(*info[i][BASES])
aa, AA = _t(0)
bb, BB = _t(1)
cc = -rterms[2]
dd = others
eq = zz.xreplace(dict(zip(
(a, A, b, B, c, d),
(aa, AA, bb, BB, cc, dd))))
ok = True
# handle power-of-2 cases
if not ok:
if log(lcm, 2).is_Integer and (not others and
len(rterms) == 4 or len(rterms) < 4):
def _norm2(a, b):
return a**2 + b**2 + 2*a*b
if len(rterms) == 4:
# (r0+r1)**2 - (r2+r3)**2
r0, r1, r2, r3 = rterms
eq = _norm2(r0, r1) - _norm2(r2, r3)
ok = True
elif len(rterms) == 3:
# (r1+r2)**2 - (r0+others)**2
r0, r1, r2 = rterms
eq = _norm2(r1, r2) - _norm2(r0, others)
ok = True
elif len(rterms) == 2:
# r0**2 - (r1+others)**2
r0, r1 = rterms
eq = r0**2 - _norm2(r1, others)
ok = True
new_depth = sqrt_depth(eq) if ok else depth
rpt += 1 # XXX how many repeats with others unchanging is enough?
if not ok or (
nwas is not None and len(rterms) == nwas and
new_depth is not None and new_depth == depth and
rpt > 3):
raise NotImplementedError('Cannot remove all radicals')
flags.update(dict(cov=cov, n=len(rterms), rpt=rpt))
neq = unrad(eq, *syms, **flags)
if neq:
eq, cov = neq
eq, cov = _canonical(eq, cov)
return eq, cov
from sympy.solvers.bivariate import (
bivariate_type, _solve_lambert, _filtered_gens)
|
125b7cab1477fa0901681cd031d9cd8b30882465b3e85a56ba9585313f42c370 | from sympy import Order, S, log, limit, lcm_list, im, re, Dummy
from sympy.core import Add, Mul, Pow
from sympy.core.basic import Basic
from sympy.core.compatibility import iterable
from sympy.core.expr import AtomicExpr, Expr
from sympy.core.function import expand_mul
from sympy.core.numbers import _sympifyit, oo
from sympy.core.relational import is_le, is_lt, is_ge, is_gt
from sympy.core.sympify import _sympify
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.logic.boolalg import And
from sympy.sets.sets import (Interval, Intersection, FiniteSet, Union,
Complement, EmptySet)
from sympy.sets.fancysets import ImageSet
from sympy.solvers.inequalities import solve_univariate_inequality
from sympy.utilities import filldedent
from sympy.multipledispatch import dispatch
def continuous_domain(f, symbol, domain):
"""
Returns the intervals in the given domain for which the function
is continuous.
This method is limited by the ability to determine the various
singularities and discontinuities of the given function.
Parameters
==========
f : Expr
The concerned function.
symbol : Symbol
The variable for which the intervals are to be determined.
domain : Interval
The domain over which the continuity of the symbol has to be checked.
Examples
========
>>> from sympy import Symbol, S, tan, log, pi, sqrt
>>> from sympy.sets import Interval
>>> from sympy.calculus.util import continuous_domain
>>> x = Symbol('x')
>>> continuous_domain(1/x, x, S.Reals)
Union(Interval.open(-oo, 0), Interval.open(0, oo))
>>> continuous_domain(tan(x), x, Interval(0, pi))
Union(Interval.Ropen(0, pi/2), Interval.Lopen(pi/2, pi))
>>> continuous_domain(sqrt(x - 2), x, Interval(-5, 5))
Interval(2, 5)
>>> continuous_domain(log(2*x - 1), x, S.Reals)
Interval.open(1/2, oo)
Returns
=======
Interval
Union of all intervals where the function is continuous.
Raises
======
NotImplementedError
If the method to determine continuity of such a function
has not yet been developed.
"""
from sympy.solvers.inequalities import solve_univariate_inequality
from sympy.solvers.solveset import _has_rational_power
from sympy.calculus.singularities import singularities
if domain.is_subset(S.Reals):
constrained_interval = domain
for atom in f.atoms(Pow):
predicate, denomin = _has_rational_power(atom, symbol)
if predicate and denomin == 2:
constraint = solve_univariate_inequality(atom.base >= 0,
symbol).as_set()
constrained_interval = Intersection(constraint,
constrained_interval)
for atom in f.atoms(log):
constraint = solve_univariate_inequality(atom.args[0] > 0,
symbol).as_set()
constrained_interval = Intersection(constraint,
constrained_interval)
return constrained_interval - singularities(f, symbol, domain)
def function_range(f, symbol, domain):
"""
Finds the range of a function in a given domain.
This method is limited by the ability to determine the singularities and
determine limits.
Parameters
==========
f : Expr
The concerned function.
symbol : Symbol
The variable for which the range of function is to be determined.
domain : Interval
The domain under which the range of the function has to be found.
Examples
========
>>> from sympy import Symbol, S, exp, log, pi, sqrt, sin, tan
>>> from sympy.sets import Interval
>>> from sympy.calculus.util import function_range
>>> x = Symbol('x')
>>> function_range(sin(x), x, Interval(0, 2*pi))
Interval(-1, 1)
>>> function_range(tan(x), x, Interval(-pi/2, pi/2))
Interval(-oo, oo)
>>> function_range(1/x, x, S.Reals)
Union(Interval.open(-oo, 0), Interval.open(0, oo))
>>> function_range(exp(x), x, S.Reals)
Interval.open(0, oo)
>>> function_range(log(x), x, S.Reals)
Interval(-oo, oo)
>>> function_range(sqrt(x), x , Interval(-5, 9))
Interval(0, 3)
Returns
=======
Interval
Union of all ranges for all intervals under domain where function is
continuous.
Raises
======
NotImplementedError
If any of the intervals, in the given domain, for which function
is continuous are not finite or real,
OR if the critical points of the function on the domain can't be found.
"""
from sympy.solvers.solveset import solveset
if isinstance(domain, EmptySet):
return S.EmptySet
period = periodicity(f, symbol)
if period == S.Zero:
# the expression is constant wrt symbol
return FiniteSet(f.expand())
if period is not None:
if isinstance(domain, Interval):
if (domain.inf - domain.sup).is_infinite:
domain = Interval(0, period)
elif isinstance(domain, Union):
for sub_dom in domain.args:
if isinstance(sub_dom, Interval) and \
((sub_dom.inf - sub_dom.sup).is_infinite):
domain = Interval(0, period)
intervals = continuous_domain(f, symbol, domain)
range_int = S.EmptySet
if isinstance(intervals,(Interval, FiniteSet)):
interval_iter = (intervals,)
elif isinstance(intervals, Union):
interval_iter = intervals.args
else:
raise NotImplementedError(filldedent('''
Unable to find range for the given domain.
'''))
for interval in interval_iter:
if isinstance(interval, FiniteSet):
for singleton in interval:
if singleton in domain:
range_int += FiniteSet(f.subs(symbol, singleton))
elif isinstance(interval, Interval):
vals = S.EmptySet
critical_points = S.EmptySet
critical_values = S.EmptySet
bounds = ((interval.left_open, interval.inf, '+'),
(interval.right_open, interval.sup, '-'))
for is_open, limit_point, direction in bounds:
if is_open:
critical_values += FiniteSet(limit(f, symbol, limit_point, direction))
vals += critical_values
else:
vals += FiniteSet(f.subs(symbol, limit_point))
solution = solveset(f.diff(symbol), symbol, interval)
if not iterable(solution):
raise NotImplementedError(
'Unable to find critical points for {}'.format(f))
if isinstance(solution, ImageSet):
raise NotImplementedError(
'Infinite number of critical points for {}'.format(f))
critical_points += solution
for critical_point in critical_points:
vals += FiniteSet(f.subs(symbol, critical_point))
left_open, right_open = False, False
if critical_values is not S.EmptySet:
if critical_values.inf == vals.inf:
left_open = True
if critical_values.sup == vals.sup:
right_open = True
range_int += Interval(vals.inf, vals.sup, left_open, right_open)
else:
raise NotImplementedError(filldedent('''
Unable to find range for the given domain.
'''))
return range_int
def not_empty_in(finset_intersection, *syms):
"""
Finds the domain of the functions in `finite_set` in which the
`finite_set` is not-empty
Parameters
==========
finset_intersection : The unevaluated intersection of FiniteSet containing
real-valued functions with Union of Sets
syms : Tuple of symbols
Symbol for which domain is to be found
Raises
======
NotImplementedError
The algorithms to find the non-emptiness of the given FiniteSet are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report it to the github issue tracker
(https://github.com/sympy/sympy/issues).
Examples
========
>>> from sympy import FiniteSet, Interval, not_empty_in, oo
>>> from sympy.abc import x
>>> not_empty_in(FiniteSet(x/2).intersect(Interval(0, 1)), x)
Interval(0, 2)
>>> not_empty_in(FiniteSet(x, x**2).intersect(Interval(1, 2)), x)
Union(Interval(1, 2), Interval(-sqrt(2), -1))
>>> not_empty_in(FiniteSet(x**2/(x + 2)).intersect(Interval(1, oo)), x)
Union(Interval.Lopen(-2, -1), Interval(2, oo))
"""
# TODO: handle piecewise defined functions
# TODO: handle transcendental functions
# TODO: handle multivariate functions
if len(syms) == 0:
raise ValueError("One or more symbols must be given in syms.")
if finset_intersection is S.EmptySet:
return S.EmptySet
if isinstance(finset_intersection, Union):
elm_in_sets = finset_intersection.args[0]
return Union(not_empty_in(finset_intersection.args[1], *syms),
elm_in_sets)
if isinstance(finset_intersection, FiniteSet):
finite_set = finset_intersection
_sets = S.Reals
else:
finite_set = finset_intersection.args[1]
_sets = finset_intersection.args[0]
if not isinstance(finite_set, FiniteSet):
raise ValueError('A FiniteSet must be given, not %s: %s' %
(type(finite_set), finite_set))
if len(syms) == 1:
symb = syms[0]
else:
raise NotImplementedError('more than one variables %s not handled' %
(syms,))
def elm_domain(expr, intrvl):
""" Finds the domain of an expression in any given interval """
from sympy.solvers.solveset import solveset
_start = intrvl.start
_end = intrvl.end
_singularities = solveset(expr.as_numer_denom()[1], symb,
domain=S.Reals)
if intrvl.right_open:
if _end is S.Infinity:
_domain1 = S.Reals
else:
_domain1 = solveset(expr < _end, symb, domain=S.Reals)
else:
_domain1 = solveset(expr <= _end, symb, domain=S.Reals)
if intrvl.left_open:
if _start is S.NegativeInfinity:
_domain2 = S.Reals
else:
_domain2 = solveset(expr > _start, symb, domain=S.Reals)
else:
_domain2 = solveset(expr >= _start, symb, domain=S.Reals)
# domain in the interval
expr_with_sing = Intersection(_domain1, _domain2)
expr_domain = Complement(expr_with_sing, _singularities)
return expr_domain
if isinstance(_sets, Interval):
return Union(*[elm_domain(element, _sets) for element in finite_set])
if isinstance(_sets, Union):
_domain = S.EmptySet
for intrvl in _sets.args:
_domain_element = Union(*[elm_domain(element, intrvl)
for element in finite_set])
_domain = Union(_domain, _domain_element)
return _domain
def periodicity(f, symbol, check=False):
"""
Tests the given function for periodicity in the given symbol.
Parameters
==========
f : Expr.
The concerned function.
symbol : Symbol
The variable for which the period is to be determined.
check : Boolean, optional
The flag to verify whether the value being returned is a period or not.
Returns
=======
period
The period of the function is returned.
`None` is returned when the function is aperiodic or has a complex period.
The value of `0` is returned as the period of a constant function.
Raises
======
NotImplementedError
The value of the period computed cannot be verified.
Notes
=====
Currently, we do not support functions with a complex period.
The period of functions having complex periodic values such
as `exp`, `sinh` is evaluated to `None`.
The value returned might not be the "fundamental" period of the given
function i.e. it may not be the smallest periodic value of the function.
The verification of the period through the `check` flag is not reliable
due to internal simplification of the given expression. Hence, it is set
to `False` by default.
Examples
========
>>> from sympy import Symbol, sin, cos, tan, exp
>>> from sympy.calculus.util import periodicity
>>> x = Symbol('x')
>>> f = sin(x) + sin(2*x) + sin(3*x)
>>> periodicity(f, x)
2*pi
>>> periodicity(sin(x)*cos(x), x)
pi
>>> periodicity(exp(tan(2*x) - 1), x)
pi/2
>>> periodicity(sin(4*x)**cos(2*x), x)
pi
>>> periodicity(exp(x), x)
"""
from sympy.core.mod import Mod
from sympy.core.relational import Relational
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.trigonometric import (
TrigonometricFunction, sin, cos, csc, sec)
from sympy.simplify.simplify import simplify
from sympy.solvers.decompogen import decompogen
from sympy.polys.polytools import degree
temp = Dummy('x', real=True)
f = f.subs(symbol, temp)
symbol = temp
def _check(orig_f, period):
'''Return the checked period or raise an error.'''
new_f = orig_f.subs(symbol, symbol + period)
if new_f.equals(orig_f):
return period
else:
raise NotImplementedError(filldedent('''
The period of the given function cannot be verified.
When `%s` was replaced with `%s + %s` in `%s`, the result
was `%s` which was not recognized as being the same as
the original function.
So either the period was wrong or the two forms were
not recognized as being equal.
Set check=False to obtain the value.''' %
(symbol, symbol, period, orig_f, new_f)))
orig_f = f
period = None
if isinstance(f, Relational):
f = f.lhs - f.rhs
f = simplify(f)
if symbol not in f.free_symbols:
return S.Zero
if isinstance(f, TrigonometricFunction):
try:
period = f.period(symbol)
except NotImplementedError:
pass
if isinstance(f, Abs):
arg = f.args[0]
if isinstance(arg, (sec, csc, cos)):
# all but tan and cot might have a
# a period that is half as large
# so recast as sin
arg = sin(arg.args[0])
period = periodicity(arg, symbol)
if period is not None and isinstance(arg, sin):
# the argument of Abs was a trigonometric other than
# cot or tan; test to see if the half-period
# is valid. Abs(arg) has behaviour equivalent to
# orig_f, so use that for test:
orig_f = Abs(arg)
try:
return _check(orig_f, period/2)
except NotImplementedError as err:
if check:
raise NotImplementedError(err)
# else let new orig_f and period be
# checked below
if isinstance(f, exp):
f = f.func(expand_mul(f.args[0]))
if im(f) != 0:
period_real = periodicity(re(f), symbol)
period_imag = periodicity(im(f), symbol)
if period_real is not None and period_imag is not None:
period = lcim([period_real, period_imag])
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if base_has_sym and not expo_has_sym:
period = periodicity(base, symbol)
elif expo_has_sym and not base_has_sym:
period = periodicity(expo, symbol)
else:
period = _periodicity(f.args, symbol)
elif f.is_Mul:
coeff, g = f.as_independent(symbol, as_Add=False)
if isinstance(g, TrigonometricFunction) or coeff is not S.One:
period = periodicity(g, symbol)
else:
period = _periodicity(g.args, symbol)
elif f.is_Add:
k, g = f.as_independent(symbol)
if k is not S.Zero:
return periodicity(g, symbol)
period = _periodicity(g.args, symbol)
elif isinstance(f, Mod):
a, n = f.args
if a == symbol:
period = n
elif isinstance(a, TrigonometricFunction):
period = periodicity(a, symbol)
#check if 'f' is linear in 'symbol'
elif (a.is_polynomial(symbol) and degree(a, symbol) == 1 and
symbol not in n.free_symbols):
period = Abs(n / a.diff(symbol))
elif period is None:
from sympy.solvers.decompogen import compogen
g_s = decompogen(f, symbol)
num_of_gs = len(g_s)
if num_of_gs > 1:
for index, g in enumerate(reversed(g_s)):
start_index = num_of_gs - 1 - index
g = compogen(g_s[start_index:], symbol)
if g != orig_f and g != f: # Fix for issue 12620
period = periodicity(g, symbol)
if period is not None:
break
if period is not None:
if check:
return _check(orig_f, period)
return period
return None
def _periodicity(args, symbol):
"""
Helper for `periodicity` to find the period of a list of simpler
functions.
It uses the `lcim` method to find the least common period of
all the functions.
Parameters
==========
args : Tuple of Symbol
All the symbols present in a function.
symbol : Symbol
The symbol over which the function is to be evaluated.
Returns
=======
period
The least common period of the function for all the symbols
of the function.
None if for at least one of the symbols the function is aperiodic
"""
periods = []
for f in args:
period = periodicity(f, symbol)
if period is None:
return None
if period is not S.Zero:
periods.append(period)
if len(periods) > 1:
return lcim(periods)
if periods:
return periods[0]
def lcim(numbers):
"""Returns the least common integral multiple of a list of numbers.
The numbers can be rational or irrational or a mixture of both.
`None` is returned for incommensurable numbers.
Parameters
==========
numbers : list
Numbers (rational and/or irrational) for which lcim is to be found.
Returns
=======
number
lcim if it exists, otherwise `None` for incommensurable numbers.
Examples
========
>>> from sympy import S, pi
>>> from sympy.calculus.util import lcim
>>> lcim([S(1)/2, S(3)/4, S(5)/6])
15/2
>>> lcim([2*pi, 3*pi, pi, pi/2])
6*pi
>>> lcim([S(1), 2*pi])
"""
result = None
if all(num.is_irrational for num in numbers):
factorized_nums = list(map(lambda num: num.factor(), numbers))
factors_num = list(
map(lambda num: num.as_coeff_Mul(),
factorized_nums))
term = factors_num[0][1]
if all(factor == term for coeff, factor in factors_num):
common_term = term
coeffs = [coeff for coeff, factor in factors_num]
result = lcm_list(coeffs) * common_term
elif all(num.is_rational for num in numbers):
result = lcm_list(numbers)
else:
pass
return result
def is_convex(f, *syms, domain=S.Reals):
"""Determines the convexity of the function passed in the argument.
Parameters
==========
f : Expr
The concerned function.
syms : Tuple of symbols
The variables with respect to which the convexity is to be determined.
domain : Interval, optional
The domain over which the convexity of the function has to be checked.
If unspecified, S.Reals will be the default domain.
Returns
=======
Boolean
The method returns `True` if the function is convex otherwise it
returns `False`.
Raises
======
NotImplementedError
The check for the convexity of multivariate functions is not implemented yet.
Notes
=====
To determine concavity of a function pass `-f` as the concerned function.
To determine logarithmic convexity of a function pass log(f) as
concerned function.
To determine logartihmic concavity of a function pass -log(f) as
concerned function.
Currently, convexity check of multivariate functions is not handled.
Examples
========
>>> from sympy import symbols, exp, oo, Interval
>>> from sympy.calculus.util import is_convex
>>> x = symbols('x')
>>> is_convex(exp(x), x)
True
>>> is_convex(x**3, x, domain = Interval(-1, oo))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Convex_function
.. [2] http://www.ifp.illinois.edu/~angelia/L3_convfunc.pdf
.. [3] https://en.wikipedia.org/wiki/Logarithmically_convex_function
.. [4] https://en.wikipedia.org/wiki/Logarithmically_concave_function
.. [5] https://en.wikipedia.org/wiki/Concave_function
"""
if len(syms) > 1:
raise NotImplementedError(
"The check for the convexity of multivariate functions is not implemented yet.")
f = _sympify(f)
var = syms[0]
condition = f.diff(var, 2) < 0
if solve_univariate_inequality(condition, var, False, domain):
return False
return True
def stationary_points(f, symbol, domain=S.Reals):
"""
Returns the stationary points of a function (where derivative of the
function is 0) in the given domain.
Parameters
==========
f : Expr
The concerned function.
symbol : Symbol
The variable for which the stationary points are to be determined.
domain : Interval
The domain over which the stationary points have to be checked.
If unspecified, S.Reals will be the default domain.
Returns
=======
Set
A set of stationary points for the function. If there are no
stationary point, an EmptySet is returned.
Examples
========
>>> from sympy import Symbol, S, sin, pi, pprint, stationary_points
>>> from sympy.sets import Interval
>>> x = Symbol('x')
>>> stationary_points(1/x, x, S.Reals)
EmptySet
>>> pprint(stationary_points(sin(x), x), use_unicode=False)
pi 3*pi
{2*n*pi + -- | n in Integers} U {2*n*pi + ---- | n in Integers}
2 2
>>> stationary_points(sin(x),x, Interval(0, 4*pi))
FiniteSet(pi/2, 3*pi/2, 5*pi/2, 7*pi/2)
"""
from sympy import solveset, diff
if isinstance(domain, EmptySet):
return S.EmptySet
domain = continuous_domain(f, symbol, domain)
set = solveset(diff(f, symbol), symbol, domain)
return set
def maximum(f, symbol, domain=S.Reals):
"""
Returns the maximum value of a function in the given domain.
Parameters
==========
f : Expr
The concerned function.
symbol : Symbol
The variable for maximum value needs to be determined.
domain : Interval
The domain over which the maximum have to be checked.
If unspecified, then Global maximum is returned.
Returns
=======
number
Maximum value of the function in given domain.
Examples
========
>>> from sympy import Symbol, S, sin, cos, pi, maximum
>>> from sympy.sets import Interval
>>> x = Symbol('x')
>>> f = -x**2 + 2*x + 5
>>> maximum(f, x, S.Reals)
6
>>> maximum(sin(x), x, Interval(-pi, pi/4))
sqrt(2)/2
>>> maximum(sin(x)*cos(x), x)
1/2
"""
from sympy import Symbol
if isinstance(symbol, Symbol):
if isinstance(domain, EmptySet):
raise ValueError("Maximum value not defined for empty domain.")
return function_range(f, symbol, domain).sup
else:
raise ValueError("%s is not a valid symbol." % symbol)
def minimum(f, symbol, domain=S.Reals):
"""
Returns the minimum value of a function in the given domain.
Parameters
==========
f : Expr
The concerned function.
symbol : Symbol
The variable for minimum value needs to be determined.
domain : Interval
The domain over which the minimum have to be checked.
If unspecified, then Global minimum is returned.
Returns
=======
number
Minimum value of the function in the given domain.
Examples
========
>>> from sympy import Symbol, S, sin, cos, minimum
>>> from sympy.sets import Interval
>>> x = Symbol('x')
>>> f = x**2 + 2*x + 5
>>> minimum(f, x, S.Reals)
4
>>> minimum(sin(x), x, Interval(2, 3))
sin(3)
>>> minimum(sin(x)*cos(x), x)
-1/2
"""
from sympy import Symbol
if isinstance(symbol, Symbol):
if isinstance(domain, EmptySet):
raise ValueError("Minimum value not defined for empty domain.")
return function_range(f, symbol, domain).inf
else:
raise ValueError("%s is not a valid symbol." % symbol)
class AccumulationBounds(AtomicExpr):
r"""
# Note AccumulationBounds has an alias: AccumBounds
AccumulationBounds represent an interval `[a, b]`, which is always closed
at the ends. Here `a` and `b` can be any value from extended real numbers.
The intended meaning of AccummulationBounds is to give an approximate
location of the accumulation points of a real function at a limit point.
Let `a` and `b` be reals such that a <= b.
`\left\langle a, b\right\rangle = \{x \in \mathbb{R} \mid a \le x \le b\}`
`\left\langle -\infty, b\right\rangle = \{x \in \mathbb{R} \mid x \le b\} \cup \{-\infty, \infty\}`
`\left\langle a, \infty \right\rangle = \{x \in \mathbb{R} \mid a \le x\} \cup \{-\infty, \infty\}`
`\left\langle -\infty, \infty \right\rangle = \mathbb{R} \cup \{-\infty, \infty\}`
`oo` and `-oo` are added to the second and third definition respectively,
since if either `-oo` or `oo` is an argument, then the other one should
be included (though not as an end point). This is forced, since we have,
for example, `1/AccumBounds(0, 1) = AccumBounds(1, oo)`, and the limit at
`0` is not one-sided. As x tends to `0-`, then `1/x -> -oo`, so `-oo`
should be interpreted as belonging to `AccumBounds(1, oo)` though it need
not appear explicitly.
In many cases it suffices to know that the limit set is bounded.
However, in some other cases more exact information could be useful.
For example, all accumulation values of cos(x) + 1 are non-negative.
(AccumBounds(-1, 1) + 1 = AccumBounds(0, 2))
A AccumulationBounds object is defined to be real AccumulationBounds,
if its end points are finite reals.
Let `X`, `Y` be real AccumulationBounds, then their sum, difference,
product are defined to be the following sets:
`X + Y = \{ x+y \mid x \in X \cap y \in Y\}`
`X - Y = \{ x-y \mid x \in X \cap y \in Y\}`
`X * Y = \{ x*y \mid x \in X \cap y \in Y\}`
There is, however, no consensus on Interval division.
`X / Y = \{ z \mid \exists x \in X, y \in Y \mid y \neq 0, z = x/y\}`
Note: According to this definition the quotient of two AccumulationBounds
may not be a AccumulationBounds object but rather a union of
AccumulationBounds.
Note
====
The main focus in the interval arithmetic is on the simplest way to
calculate upper and lower endpoints for the range of values of a
function in one or more variables. These barriers are not necessarily
the supremum or infimum, since the precise calculation of those values
can be difficult or impossible.
Examples
========
>>> from sympy import AccumBounds, sin, exp, log, pi, E, S, oo
>>> from sympy.abc import x
>>> AccumBounds(0, 1) + AccumBounds(1, 2)
AccumBounds(1, 3)
>>> AccumBounds(0, 1) - AccumBounds(0, 2)
AccumBounds(-2, 1)
>>> AccumBounds(-2, 3)*AccumBounds(-1, 1)
AccumBounds(-3, 3)
>>> AccumBounds(1, 2)*AccumBounds(3, 5)
AccumBounds(3, 10)
The exponentiation of AccumulationBounds is defined
as follows:
If 0 does not belong to `X` or `n > 0` then
`X^n = \{ x^n \mid x \in X\}`
otherwise
`X^n = \{ x^n \mid x \neq 0, x \in X\} \cup \{-\infty, \infty\}`
Here for fractional `n`, the part of `X` resulting in a complex
AccumulationBounds object is neglected.
>>> AccumBounds(-1, 4)**(S(1)/2)
AccumBounds(0, 2)
>>> AccumBounds(1, 2)**2
AccumBounds(1, 4)
>>> AccumBounds(-1, oo)**(-1)
AccumBounds(-oo, oo)
Note: `<a, b>^2` is not same as `<a, b>*<a, b>`
>>> AccumBounds(-1, 1)**2
AccumBounds(0, 1)
>>> AccumBounds(1, 3) < 4
True
>>> AccumBounds(1, 3) < -1
False
Some elementary functions can also take AccumulationBounds as input.
A function `f` evaluated for some real AccumulationBounds `<a, b>`
is defined as `f(\left\langle a, b\right\rangle) = \{ f(x) \mid a \le x \le b \}`
>>> sin(AccumBounds(pi/6, pi/3))
AccumBounds(1/2, sqrt(3)/2)
>>> exp(AccumBounds(0, 1))
AccumBounds(1, E)
>>> log(AccumBounds(1, E))
AccumBounds(0, 1)
Some symbol in an expression can be substituted for a AccumulationBounds
object. But it doesn't necessarily evaluate the AccumulationBounds for
that expression.
Same expression can be evaluated to different values depending upon
the form it is used for substitution. For example:
>>> (x**2 + 2*x + 1).subs(x, AccumBounds(-1, 1))
AccumBounds(-1, 4)
>>> ((x + 1)**2).subs(x, AccumBounds(-1, 1))
AccumBounds(0, 4)
References
==========
.. [1] https://en.wikipedia.org/wiki/Interval_arithmetic
.. [2] http://fab.cba.mit.edu/classes/S62.12/docs/Hickey_interval.pdf
Notes
=====
Do not use ``AccumulationBounds`` for floating point interval arithmetic
calculations, use ``mpmath.iv`` instead.
"""
is_extended_real = True
def __new__(cls, min, max):
min = _sympify(min)
max = _sympify(max)
# Only allow real intervals (use symbols with 'is_extended_real=True').
if not min.is_extended_real or not max.is_extended_real:
raise ValueError("Only real AccumulationBounds are supported")
# Make sure that the created AccumBounds object will be valid.
if max.is_comparable and min.is_comparable:
if max < min:
raise ValueError(
"Lower limit should be smaller than upper limit")
if max == min:
return max
return Basic.__new__(cls, min, max)
# setting the operation priority
_op_priority = 11.0
def _eval_is_real(self):
if self.min.is_real and self.max.is_real:
return True
@property
def min(self):
"""
Returns the minimum possible value attained by AccumulationBounds
object.
Examples
========
>>> from sympy import AccumBounds
>>> AccumBounds(1, 3).min
1
"""
return self.args[0]
@property
def max(self):
"""
Returns the maximum possible value attained by AccumulationBounds
object.
Examples
========
>>> from sympy import AccumBounds
>>> AccumBounds(1, 3).max
3
"""
return self.args[1]
@property
def delta(self):
"""
Returns the difference of maximum possible value attained by
AccumulationBounds object and minimum possible value attained
by AccumulationBounds object.
Examples
========
>>> from sympy import AccumBounds
>>> AccumBounds(1, 3).delta
2
"""
return self.max - self.min
@property
def mid(self):
"""
Returns the mean of maximum possible value attained by
AccumulationBounds object and minimum possible value
attained by AccumulationBounds object.
Examples
========
>>> from sympy import AccumBounds
>>> AccumBounds(1, 3).mid
2
"""
return (self.min + self.max) / 2
@_sympifyit('other', NotImplemented)
def _eval_power(self, other):
return self.__pow__(other)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Expr):
if isinstance(other, AccumBounds):
return AccumBounds(
Add(self.min, other.min),
Add(self.max, other.max))
if other is S.Infinity and self.min is S.NegativeInfinity or \
other is S.NegativeInfinity and self.max is S.Infinity:
return AccumBounds(-oo, oo)
elif other.is_extended_real:
if self.min is S.NegativeInfinity and self.max is S.Infinity:
return AccumBounds(-oo, oo)
elif self.min is S.NegativeInfinity:
return AccumBounds(-oo, self.max + other)
elif self.max is S.Infinity:
return AccumBounds(self.min + other, oo)
else:
return AccumBounds(Add(self.min, other), Add(self.max, other))
return Add(self, other, evaluate=False)
return NotImplemented
__radd__ = __add__
def __neg__(self):
return AccumBounds(-self.max, -self.min)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Expr):
if isinstance(other, AccumBounds):
return AccumBounds(
Add(self.min, -other.max),
Add(self.max, -other.min))
if other is S.NegativeInfinity and self.min is S.NegativeInfinity or \
other is S.Infinity and self.max is S.Infinity:
return AccumBounds(-oo, oo)
elif other.is_extended_real:
if self.min is S.NegativeInfinity and self.max is S.Infinity:
return AccumBounds(-oo, oo)
elif self.min is S.NegativeInfinity:
return AccumBounds(-oo, self.max - other)
elif self.max is S.Infinity:
return AccumBounds(self.min - other, oo)
else:
return AccumBounds(
Add(self.min, -other),
Add(self.max, -other))
return Add(self, -other, evaluate=False)
return NotImplemented
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
return self.__neg__() + other
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Expr):
if isinstance(other, AccumBounds):
return AccumBounds(Min(Mul(self.min, other.min),
Mul(self.min, other.max),
Mul(self.max, other.min),
Mul(self.max, other.max)),
Max(Mul(self.min, other.min),
Mul(self.min, other.max),
Mul(self.max, other.min),
Mul(self.max, other.max)))
if other is S.Infinity:
if self.min.is_zero:
return AccumBounds(0, oo)
if self.max.is_zero:
return AccumBounds(-oo, 0)
if other is S.NegativeInfinity:
if self.min.is_zero:
return AccumBounds(-oo, 0)
if self.max.is_zero:
return AccumBounds(0, oo)
if other.is_extended_real:
if other.is_zero:
if self == AccumBounds(-oo, oo):
return AccumBounds(-oo, oo)
if self.max is S.Infinity:
return AccumBounds(0, oo)
if self.min is S.NegativeInfinity:
return AccumBounds(-oo, 0)
return S.Zero
if other.is_extended_positive:
return AccumBounds(
Mul(self.min, other),
Mul(self.max, other))
elif other.is_extended_negative:
return AccumBounds(
Mul(self.max, other),
Mul(self.min, other))
if isinstance(other, Order):
return other
return Mul(self, other, evaluate=False)
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Expr):
if isinstance(other, AccumBounds):
if other.min.is_positive or other.max.is_negative:
return self * AccumBounds(1/other.max, 1/other.min)
if (self.min.is_extended_nonpositive and self.max.is_extended_nonnegative and
other.min.is_extended_nonpositive and other.max.is_extended_nonnegative):
if self.min.is_zero and other.min.is_zero:
return AccumBounds(0, oo)
if self.max.is_zero and other.min.is_zero:
return AccumBounds(-oo, 0)
return AccumBounds(-oo, oo)
if self.max.is_extended_negative:
if other.min.is_extended_negative:
if other.max.is_zero:
return AccumBounds(self.max / other.min, oo)
if other.max.is_extended_positive:
# the actual answer is a Union of AccumBounds,
# Union(AccumBounds(-oo, self.max/other.max),
# AccumBounds(self.max/other.min, oo))
return AccumBounds(-oo, oo)
if other.min.is_zero and other.max.is_extended_positive:
return AccumBounds(-oo, self.max / other.max)
if self.min.is_extended_positive:
if other.min.is_extended_negative:
if other.max.is_zero:
return AccumBounds(-oo, self.min / other.min)
if other.max.is_extended_positive:
# the actual answer is a Union of AccumBounds,
# Union(AccumBounds(-oo, self.min/other.min),
# AccumBounds(self.min/other.max, oo))
return AccumBounds(-oo, oo)
if other.min.is_zero and other.max.is_extended_positive:
return AccumBounds(self.min / other.max, oo)
elif other.is_extended_real:
if other is S.Infinity or other is S.NegativeInfinity:
if self == AccumBounds(-oo, oo):
return AccumBounds(-oo, oo)
if self.max is S.Infinity:
return AccumBounds(Min(0, other), Max(0, other))
if self.min is S.NegativeInfinity:
return AccumBounds(Min(0, -other), Max(0, -other))
if other.is_extended_positive:
return AccumBounds(self.min / other, self.max / other)
elif other.is_extended_negative:
return AccumBounds(self.max / other, self.min / other)
if (1 / other) is S.ComplexInfinity:
return Mul(self, 1 / other, evaluate=False)
else:
return Mul(self, 1 / other)
return NotImplemented
@_sympifyit('other', NotImplemented)
def __rtruediv__(self, other):
if isinstance(other, Expr):
if other.is_extended_real:
if other.is_zero:
return S.Zero
if (self.min.is_extended_nonpositive and self.max.is_extended_nonnegative):
if self.min.is_zero:
if other.is_extended_positive:
return AccumBounds(Mul(other, 1 / self.max), oo)
if other.is_extended_negative:
return AccumBounds(-oo, Mul(other, 1 / self.max))
if self.max.is_zero:
if other.is_extended_positive:
return AccumBounds(-oo, Mul(other, 1 / self.min))
if other.is_extended_negative:
return AccumBounds(Mul(other, 1 / self.min), oo)
return AccumBounds(-oo, oo)
else:
return AccumBounds(Min(other / self.min, other / self.max),
Max(other / self.min, other / self.max))
return Mul(other, 1 / self, evaluate=False)
else:
return NotImplemented
@_sympifyit('other', NotImplemented)
def __pow__(self, other):
from sympy.functions.elementary.miscellaneous import real_root
if isinstance(other, Expr):
if other is S.Infinity:
if self.min.is_extended_nonnegative:
if self.max < 1:
return S.Zero
if self.min > 1:
return S.Infinity
return AccumBounds(0, oo)
elif self.max.is_extended_negative:
if self.min > -1:
return S.Zero
if self.max < -1:
return FiniteSet(-oo, oo)
return AccumBounds(-oo, oo)
else:
if self.min > -1:
if self.max < 1:
return S.Zero
return AccumBounds(0, oo)
return AccumBounds(-oo, oo)
if other is S.NegativeInfinity:
return (1 / self)**oo
if other.is_extended_real and other.is_number:
if other.is_zero:
return S.One
if other.is_Integer:
if self.min.is_extended_positive:
return AccumBounds(
Min(self.min ** other, self.max ** other),
Max(self.min ** other, self.max ** other))
elif self.max.is_extended_negative:
return AccumBounds(
Min(self.max ** other, self.min ** other),
Max(self.max ** other, self.min ** other))
if other % 2 == 0:
if other.is_extended_negative:
if self.min.is_zero:
return AccumBounds(self.max**other, oo)
if self.max.is_zero:
return AccumBounds(self.min**other, oo)
return AccumBounds(0, oo)
return AccumBounds(
S.Zero, Max(self.min**other, self.max**other))
else:
if other.is_extended_negative:
if self.min.is_zero:
return AccumBounds(self.max**other, oo)
if self.max.is_zero:
return AccumBounds(-oo, self.min**other)
return AccumBounds(-oo, oo)
return AccumBounds(self.min**other, self.max**other)
num, den = other.as_numer_denom()
if num == S.One:
if den % 2 == 0:
if S.Zero in self:
if self.min.is_extended_negative:
return AccumBounds(0, real_root(self.max, den))
return AccumBounds(real_root(self.min, den),
real_root(self.max, den))
if den!=1:
num_pow = self**num
return num_pow**(1 / den)
return AccumBounds(-oo, oo)
return NotImplemented
def __abs__(self):
if self.max.is_extended_negative:
return self.__neg__()
elif self.min.is_extended_negative:
return AccumBounds(S.Zero, Max(abs(self.min), self.max))
else:
return self
def __contains__(self, other):
"""
Returns True if other is contained in self, where other
belongs to extended real numbers, False if not contained,
otherwise TypeError is raised.
Examples
========
>>> from sympy import AccumBounds, oo
>>> 1 in AccumBounds(-1, 3)
True
-oo and oo go together as limits (in AccumulationBounds).
>>> -oo in AccumBounds(1, oo)
True
>>> oo in AccumBounds(-oo, 0)
True
"""
other = _sympify(other)
if other is S.Infinity or other is S.NegativeInfinity:
if self.min is S.NegativeInfinity or self.max is S.Infinity:
return True
return False
rv = And(self.min <= other, self.max >= other)
if rv not in (True, False):
raise TypeError("input failed to evaluate")
return rv
def intersection(self, other):
"""
Returns the intersection of 'self' and 'other'.
Here other can be an instance of FiniteSet or AccumulationBounds.
Parameters
==========
other: AccumulationBounds
Another AccumulationBounds object with which the intersection
has to be computed.
Returns
=======
AccumulationBounds
Intersection of 'self' and 'other'.
Examples
========
>>> from sympy import AccumBounds, FiniteSet
>>> AccumBounds(1, 3).intersection(AccumBounds(2, 4))
AccumBounds(2, 3)
>>> AccumBounds(1, 3).intersection(AccumBounds(4, 6))
EmptySet
>>> AccumBounds(1, 4).intersection(FiniteSet(1, 2, 5))
FiniteSet(1, 2)
"""
if not isinstance(other, (AccumBounds, FiniteSet)):
raise TypeError(
"Input must be AccumulationBounds or FiniteSet object")
if isinstance(other, FiniteSet):
fin_set = S.EmptySet
for i in other:
if i in self:
fin_set = fin_set + FiniteSet(i)
return fin_set
if self.max < other.min or self.min > other.max:
return S.EmptySet
if self.min <= other.min:
if self.max <= other.max:
return AccumBounds(other.min, self.max)
if self.max > other.max:
return other
if other.min <= self.min:
if other.max < self.max:
return AccumBounds(self.min, other.max)
if other.max > self.max:
return self
def union(self, other):
# TODO : Devise a better method for Union of AccumBounds
# this method is not actually correct and
# can be made better
if not isinstance(other, AccumBounds):
raise TypeError(
"Input must be AccumulationBounds or FiniteSet object")
if self.min <= other.min and self.max >= other.min:
return AccumBounds(self.min, Max(self.max, other.max))
if other.min <= self.min and other.max >= self.min:
return AccumBounds(other.min, Max(self.max, other.max))
@dispatch(AccumulationBounds, AccumulationBounds) # type: ignore # noqa:F811
def _eval_is_le(lhs, rhs): # noqa:F811
if is_le(lhs.max, rhs.min):
return True
if is_gt(lhs.min, rhs.max):
return False
@dispatch(AccumulationBounds, Basic) # type: ignore # noqa:F811
def _eval_is_le(lhs, rhs): # noqa: F811
"""
Returns True if range of values attained by `self` AccumulationBounds
object is greater than the range of values attained by `other`,
where other may be any value of type AccumulationBounds object or
extended real number value, False if `other` satisfies
the same property, else an unevaluated Relational.
Examples
========
>>> from sympy import AccumBounds, oo
>>> AccumBounds(1, 3) > AccumBounds(4, oo)
False
>>> AccumBounds(1, 4) > AccumBounds(3, 4)
AccumBounds(1, 4) > AccumBounds(3, 4)
>>> AccumBounds(1, oo) > -1
True
"""
if not rhs.is_extended_real:
raise TypeError(
"Invalid comparison of %s %s" %
(type(rhs), rhs))
elif rhs.is_comparable:
if is_le(lhs.max, rhs):
return True
if is_gt(lhs.min, rhs):
return False
@dispatch(AccumulationBounds, AccumulationBounds)
def _eval_is_ge(lhs, rhs): # noqa:F811
if is_ge(lhs.min, rhs.max):
return True
if is_lt(lhs.max, rhs.min):
return False
@dispatch(AccumulationBounds, Expr) # type:ignore
def _eval_is_ge(lhs, rhs): # noqa: F811
"""
Returns True if range of values attained by `lhs` AccumulationBounds
object is less that the range of values attained by `rhs`, where
other may be any value of type AccumulationBounds object or extended
real number value, False if `rhs` satisfies the same
property, else an unevaluated Relational.
Examples
========
>>> from sympy import AccumBounds, oo
>>> AccumBounds(1, 3) >= AccumBounds(4, oo)
False
>>> AccumBounds(1, 4) >= AccumBounds(3, 4)
AccumBounds(1, 4) >= AccumBounds(3, 4)
>>> AccumBounds(1, oo) >= 1
True
"""
if not rhs.is_extended_real:
raise TypeError(
"Invalid comparison of %s %s" %
(type(rhs), rhs))
elif rhs.is_comparable:
if is_ge(lhs.min, rhs):
return True
if is_lt(lhs.max, rhs):
return False
@dispatch(Expr, AccumulationBounds) # type:ignore
def _eval_is_ge(lhs, rhs): # noqa:F811
if not lhs.is_extended_real:
raise TypeError(
"Invalid comparison of %s %s" %
(type(lhs), lhs))
elif lhs.is_comparable:
if is_le(rhs.max, lhs):
return True
if is_gt(rhs.min, lhs):
return False
@dispatch(AccumulationBounds, AccumulationBounds) # type:ignore
def _eval_is_ge(lhs, rhs): # noqa:F811
if is_ge(lhs.min, rhs.max):
return True
if is_lt(lhs.max, rhs.min):
return False
# setting an alias for AccumulationBounds
AccumBounds = AccumulationBounds
|
cb53b82ad984aa09934edfa2044d7e26a9f507a4ed59cb19979d31434af9427d | from sympy.tensor import Indexed
from sympy import Integral, Dummy, sympify, Tuple
class IndexedIntegral(Integral):
"""
Experimental class to test integration by indexed variables.
Usage is analogue to ``Integral``, it simply adds awareness of
integration over indices.
Contraction of non-identical index symbols referring to the same
``IndexedBase`` is not yet supported.
Examples
========
>>> from sympy.sandbox.indexed_integrals import IndexedIntegral
>>> from sympy import IndexedBase, symbols
>>> A = IndexedBase('A')
>>> i, j = symbols('i j', integer=True)
>>> ii = IndexedIntegral(A[i], A[i])
>>> ii
Integral(_A[i], _A[i])
>>> ii.doit()
A[i]**2/2
If the indices are different, indexed objects are considered to be
different variables:
>>> i2 = IndexedIntegral(A[j], A[i])
>>> i2
Integral(A[j], _A[i])
>>> i2.doit()
A[i]*A[j]
"""
def __new__(cls, function, *limits, **assumptions):
repl, limits = IndexedIntegral._indexed_process_limits(limits)
function = sympify(function)
function = function.xreplace(repl)
obj = Integral.__new__(cls, function, *limits, **assumptions)
obj._indexed_repl = repl
obj._indexed_reverse_repl = {val: key for key, val in repl.items()}
return obj
def doit(self):
res = super().doit()
return res.xreplace(self._indexed_reverse_repl)
@staticmethod
def _indexed_process_limits(limits):
repl = {}
newlimits = []
for i in limits:
if isinstance(i, (tuple, list, Tuple)):
v = i[0]
vrest = i[1:]
else:
v = i
vrest = ()
if isinstance(v, Indexed):
if v not in repl:
r = Dummy(str(v))
repl[v] = r
newlimits.append((r,)+vrest)
else:
newlimits.append(i)
return repl, newlimits
|
d6b3a9921058d13b3a5171efaa34567e10ce2f0221951c83af4caf2e7414c18e | """ Generic SymPy-Independent Strategies """
from sympy.core.compatibility import get_function_name
identity = lambda x: x
def exhaust(rule):
""" Apply a rule repeatedly until it has no effect """
def exhaustive_rl(expr):
new, old = rule(expr), expr
while new != old:
new, old = rule(new), new
return new
return exhaustive_rl
def memoize(rule):
""" Memoized version of a rule """
cache = {}
def memoized_rl(expr):
if expr in cache:
return cache[expr]
else:
result = rule(expr)
cache[expr] = result
return result
return memoized_rl
def condition(cond, rule):
""" Only apply rule if condition is true """
def conditioned_rl(expr):
if cond(expr):
return rule(expr)
else:
return expr
return conditioned_rl
def chain(*rules):
"""
Compose a sequence of rules so that they apply to the expr sequentially
"""
def chain_rl(expr):
for rule in rules:
expr = rule(expr)
return expr
return chain_rl
def debug(rule, file=None):
""" Print out before and after expressions each time rule is used """
if file is None:
from sys import stdout
file = stdout
def debug_rl(*args, **kwargs):
expr = args[0]
result = rule(*args, **kwargs)
if result != expr:
file.write("Rule: %s\n" % get_function_name(rule))
file.write("In: %s\nOut: %s\n\n"%(expr, result))
return result
return debug_rl
def null_safe(rule):
""" Return original expr if rule returns None """
def null_safe_rl(expr):
result = rule(expr)
if result is None:
return expr
else:
return result
return null_safe_rl
def tryit(rule, exception):
""" Return original expr if rule raises exception """
def try_rl(expr):
try:
return rule(expr)
except exception:
return expr
return try_rl
def do_one(*rules):
""" Try each of the rules until one works. Then stop. """
def do_one_rl(expr):
for rl in rules:
result = rl(expr)
if result != expr:
return result
return expr
return do_one_rl
def switch(key, ruledict):
""" Select a rule based on the result of key called on the function """
def switch_rl(expr):
rl = ruledict.get(key(expr), identity)
return rl(expr)
return switch_rl
def minimize(*rules, objective=identity):
""" Select result of rules that minimizes objective
>>> from sympy.strategies import minimize
>>> inc = lambda x: x + 1
>>> dec = lambda x: x - 1
>>> rl = minimize(inc, dec)
>>> rl(4)
3
>>> rl = minimize(inc, dec, objective=lambda x: -x) # maximize
>>> rl(4)
5
"""
def minrule(expr):
return min([rule(expr) for rule in rules], key=objective)
return minrule
|
1e1ed6be8c816053f910f8055fe5ed2e445fedf8ac067a1b31de13e8baecc02a | from functools import partial
from sympy.strategies import chain, minimize
import sympy.strategies.branch as branch
from sympy.strategies.branch import yieldify
identity = lambda x: x
def treeapply(tree, join, leaf=identity):
""" Apply functions onto recursive containers (tree)
join - a dictionary mapping container types to functions
e.g. ``{list: minimize, tuple: chain}``
Keys are containers/iterables. Values are functions [a] -> a.
Examples
========
>>> from sympy.strategies.tree import treeapply
>>> tree = [(3, 2), (4, 1)]
>>> treeapply(tree, {list: max, tuple: min})
2
>>> add = lambda *args: sum(args)
>>> def mul(*args):
... total = 1
... for arg in args:
... total *= arg
... return total
>>> treeapply(tree, {list: mul, tuple: add})
25
"""
for typ in join:
if isinstance(tree, typ):
return join[typ](*map(partial(treeapply, join=join, leaf=leaf),
tree))
return leaf(tree)
def greedy(tree, objective=identity, **kwargs):
""" Execute a strategic tree. Select alternatives greedily
Trees
-----
Nodes in a tree can be either
function - a leaf
list - a selection among operations
tuple - a sequence of chained operations
Textual examples
----------------
Text: Run f, then run g, e.g. ``lambda x: g(f(x))``
Code: ``(f, g)``
Text: Run either f or g, whichever minimizes the objective
Code: ``[f, g]``
Textx: Run either f or g, whichever is better, then run h
Code: ``([f, g], h)``
Text: Either expand then simplify or try factor then foosimp. Finally print
Code: ``([(expand, simplify), (factor, foosimp)], print)``
Objective
---------
"Better" is determined by the objective keyword. This function makes
choices to minimize the objective. It defaults to the identity.
Examples
========
>>> from sympy.strategies.tree import greedy
>>> inc = lambda x: x + 1
>>> dec = lambda x: x - 1
>>> double = lambda x: 2*x
>>> tree = [inc, (dec, double)] # either inc or dec-then-double
>>> fn = greedy(tree)
>>> fn(4) # lowest value comes from the inc
5
>>> fn(1) # lowest value comes from dec then double
0
This function selects between options in a tuple. The result is chosen that
minimizes the objective function.
>>> fn = greedy(tree, objective=lambda x: -x) # maximize
>>> fn(4) # highest value comes from the dec then double
6
>>> fn(1) # highest value comes from the inc
2
Greediness
----------
This is a greedy algorithm. In the example:
([a, b], c) # do either a or b, then do c
the choice between running ``a`` or ``b`` is made without foresight to c
"""
optimize = partial(minimize, objective=objective)
return treeapply(tree, {list: optimize, tuple: chain}, **kwargs)
def allresults(tree, leaf=yieldify):
""" Execute a strategic tree. Return all possibilities.
Returns a lazy iterator of all possible results
Exhaustiveness
--------------
This is an exhaustive algorithm. In the example
([a, b], [c, d])
All of the results from
(a, c), (b, c), (a, d), (b, d)
are returned. This can lead to combinatorial blowup.
See sympy.strategies.greedy for details on input
"""
return treeapply(tree, {list: branch.multiplex, tuple: branch.chain},
leaf=leaf)
def brute(tree, objective=identity, **kwargs):
return lambda expr: min(tuple(allresults(tree, **kwargs)(expr)),
key=objective)
|
02d0ea37d52030f3e60fbd8e3a296de3f14ad6e43ffa1167feff7c68785636d3 | from . import rl
from .core import do_one, exhaust, switch
from .traverse import top_down
def subs(d, **kwargs):
""" Full simultaneous exact substitution
Examples
========
>>> from sympy.strategies.tools import subs
>>> from sympy import Basic
>>> mapping = {1: 4, 4: 1, Basic(5): Basic(6, 7)}
>>> expr = Basic(1, Basic(2, 3), Basic(4, Basic(5)))
>>> subs(mapping)(expr)
Basic(4, Basic(2, 3), Basic(1, Basic(6, 7)))
"""
if d:
return top_down(do_one(*map(rl.subs, *zip(*d.items()))), **kwargs)
else:
return lambda x: x
def canon(*rules, **kwargs):
""" Strategy for canonicalization
Apply each rule in a bottom_up fashion through the tree.
Do each one in turn.
Keep doing this until there is no change.
"""
return exhaust(top_down(exhaust(do_one(*rules)), **kwargs))
def typed(ruletypes):
""" Apply rules based on the expression type
inputs:
ruletypes -- a dict mapping {Type: rule}
>>> from sympy.strategies import rm_id, typed
>>> from sympy import Add, Mul
>>> rm_zeros = rm_id(lambda x: x==0)
>>> rm_ones = rm_id(lambda x: x==1)
>>> remove_idents = typed({Add: rm_zeros, Mul: rm_ones})
"""
return switch(type, ruletypes)
|
3e1c3f1edbf42774ed9ae9bb9735f766a712de69cbc05db2e934161a7325ddc0 | """ Generic Rules for SymPy
This file assumes knowledge of Basic and little else.
"""
from sympy.utilities.iterables import sift
from .util import new
# Functions that create rules
def rm_id(isid, new=new):
""" Create a rule to remove identities
isid - fn :: x -> Bool --- whether or not this element is an identity
>>> from sympy.strategies import rm_id
>>> from sympy import Basic
>>> remove_zeros = rm_id(lambda x: x==0)
>>> remove_zeros(Basic(1, 0, 2))
Basic(1, 2)
>>> remove_zeros(Basic(0, 0)) # If only identites then we keep one
Basic(0)
See Also:
unpack
"""
def ident_remove(expr):
""" Remove identities """
ids = list(map(isid, expr.args))
if sum(ids) == 0: # No identities. Common case
return expr
elif sum(ids) != len(ids): # there is at least one non-identity
return new(expr.__class__,
*[arg for arg, x in zip(expr.args, ids) if not x])
else:
return new(expr.__class__, expr.args[0])
return ident_remove
def glom(key, count, combine):
""" Create a rule to conglomerate identical args
>>> from sympy.strategies import glom
>>> from sympy import Add
>>> from sympy.abc import x
>>> key = lambda x: x.as_coeff_Mul()[1]
>>> count = lambda x: x.as_coeff_Mul()[0]
>>> combine = lambda cnt, arg: cnt * arg
>>> rl = glom(key, count, combine)
>>> rl(Add(x, -x, 3*x, 2, 3, evaluate=False))
3*x + 5
Wait, how are key, count and combine supposed to work?
>>> key(2*x)
x
>>> count(2*x)
2
>>> combine(2, x)
2*x
"""
def conglomerate(expr):
""" Conglomerate together identical args x + x -> 2x """
groups = sift(expr.args, key)
counts = {k: sum(map(count, args)) for k, args in groups.items()}
newargs = [combine(cnt, mat) for mat, cnt in counts.items()]
if set(newargs) != set(expr.args):
return new(type(expr), *newargs)
else:
return expr
return conglomerate
def sort(key, new=new):
""" Create a rule to sort by a key function
>>> from sympy.strategies import sort
>>> from sympy import Basic
>>> sort_rl = sort(str)
>>> sort_rl(Basic(3, 1, 2))
Basic(1, 2, 3)
"""
def sort_rl(expr):
return new(expr.__class__, *sorted(expr.args, key=key))
return sort_rl
def distribute(A, B):
""" Turns an A containing Bs into a B of As
where A, B are container types
>>> from sympy.strategies import distribute
>>> from sympy import Add, Mul, symbols
>>> x, y = symbols('x,y')
>>> dist = distribute(Mul, Add)
>>> expr = Mul(2, x+y, evaluate=False)
>>> expr
2*(x + y)
>>> dist(expr)
2*x + 2*y
"""
def distribute_rl(expr):
for i, arg in enumerate(expr.args):
if isinstance(arg, B):
first, b, tail = expr.args[:i], expr.args[i], expr.args[i+1:]
return B(*[A(*(first + (arg,) + tail)) for arg in b.args])
return expr
return distribute_rl
def subs(a, b):
""" Replace expressions exactly """
def subs_rl(expr):
if expr == a:
return b
else:
return expr
return subs_rl
# Functions that are rules
def unpack(expr):
""" Rule to unpack singleton args
>>> from sympy.strategies import unpack
>>> from sympy import Basic
>>> unpack(Basic(2))
2
"""
if len(expr.args) == 1:
return expr.args[0]
else:
return expr
def flatten(expr, new=new):
""" Flatten T(a, b, T(c, d), T2(e)) to T(a, b, c, d, T2(e)) """
cls = expr.__class__
args = []
for arg in expr.args:
if arg.__class__ == cls:
args.extend(arg.args)
else:
args.append(arg)
return new(expr.__class__, *args)
def rebuild(expr):
""" Rebuild a SymPy tree
This function recursively calls constructors in the expression tree.
This forces canonicalization and removes ugliness introduced by the use of
Basic.__new__
"""
if expr.is_Atom:
return expr
else:
return expr.func(*list(map(rebuild, expr.args)))
|
0e13e7049e51c3fc73846881b527224f257e1f6ce19718236158eb347fa02f29 | """Strategies to Traverse a Tree."""
from sympy.strategies.util import basic_fns
from sympy.strategies.core import chain, do_one
def top_down(rule, fns=basic_fns):
"""Apply a rule down a tree running it on the top nodes first."""
return chain(rule, lambda expr: sall(top_down(rule, fns), fns)(expr))
def bottom_up(rule, fns=basic_fns):
"""Apply a rule down a tree running it on the bottom nodes first."""
return chain(lambda expr: sall(bottom_up(rule, fns), fns)(expr), rule)
def top_down_once(rule, fns=basic_fns):
"""Apply a rule down a tree - stop on success."""
return do_one(rule, lambda expr: sall(top_down(rule, fns), fns)(expr))
def bottom_up_once(rule, fns=basic_fns):
"""Apply a rule up a tree - stop on success."""
return do_one(lambda expr: sall(bottom_up(rule, fns), fns)(expr), rule)
def sall(rule, fns=basic_fns):
"""Strategic all - apply rule to args."""
op, new, children, leaf = map(fns.get, ('op', 'new', 'children', 'leaf'))
def all_rl(expr):
if leaf(expr):
return expr
else:
args = map(rule, children(expr))
return new(op(expr), *args)
return all_rl
|
d0e6d9fa0d43122d60d3b005fabe93f35978a2f1b05bb1ea713ae39243aa8034 | from sympy import Basic
new = Basic.__new__
def assoc(d, k, v):
d = d.copy()
d[k] = v
return d
basic_fns = {'op': type,
'new': Basic.__new__,
'leaf': lambda x: not isinstance(x, Basic) or x.is_Atom,
'children': lambda x: x.args}
expr_fns = assoc(basic_fns, 'new', lambda op, *args: op(*args))
|
81c2ee6523a14b167fb3fd56f704089ee5baacba2e8d5196cb103ae97eaf911c | """
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from typing import Any, Dict, Iterable
import inspect
import keyword
import textwrap
import linecache
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.core.compatibility import (exec_, is_sequence, iterable,
NotIterable, builtins)
from sympy.utilities.misc import filldedent
from sympy.utilities.decorator import doctest_depends_on
__doctest_requires__ = {('lambdify',): ['numpy', 'tensorflow']}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
MATH_DEFAULT = {} # type: Dict[str, Any]
MPMATH_DEFAULT = {} # type: Dict[str, Any]
NUMPY_DEFAULT = {"I": 1j} # type: Dict[str, Any]
SCIPY_DEFAULT = {"I": 1j} # type: Dict[str, Any]
TENSORFLOW_DEFAULT = {} # type: Dict[str, Any]
SYMPY_DEFAULT = {} # type: Dict[str, Any]
NUMEXPR_DEFAULT = {} # type: Dict[str, Any]
# These are the namespaces the lambda functions will use.
# These are separate from the names above because they are modified
# throughout this file, whereas the defaults should remain unmodified.
MATH = MATH_DEFAULT.copy()
MPMATH = MPMATH_DEFAULT.copy()
NUMPY = NUMPY_DEFAULT.copy()
SCIPY = SCIPY_DEFAULT.copy()
TENSORFLOW = TENSORFLOW_DEFAULT.copy()
SYMPY = SYMPY_DEFAULT.copy()
NUMEXPR = NUMEXPR_DEFAULT.copy()
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
# NOTE: This dictionary is reused in Function._eval_evalf to allow subclasses
# of Function to automatically evalf.
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableDenseMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci",
"RisingFactorial": "rf",
"FallingFactorial": "ff",
}
NUMPY_TRANSLATIONS = {} # type: Dict[str, str]
SCIPY_TRANSLATIONS = {} # type: Dict[str, str]
TENSORFLOW_TRANSLATIONS = {} # type: Dict[str, str]
NUMEXPR_TRANSLATIONS = {} # type: Dict[str, str]
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import numpy; from numpy import *; from numpy.linalg import *",)),
"scipy": (SCIPY, SCIPY_DEFAULT, SCIPY_TRANSLATIONS, ("import numpy; import scipy; from scipy import *; from scipy.special import *",)),
"tensorflow": (TENSORFLOW, TENSORFLOW_DEFAULT, TENSORFLOW_TRANSLATIONS, ("import tensorflow",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload=False):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy", "tensorflow".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
# Required despite static analysis claiming it is not used
from sympy.external import import_module # noqa:F401
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec_(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
# For computing the modulus of a sympy expression we use the builtin abs
# function, instead of the previously used fabs function for all
# translation modules. This is because the fabs function in the math
# module does not accept complex valued arguments. (see issue 9474). The
# only exception, where we don't use the builtin abs function is the
# mpmath translation module, because mpmath.fabs returns mpf objects in
# contrast to abs().
if 'Abs' not in namespace:
namespace['Abs'] = abs
# Used for dynamically generated filenames that are inserted into the
# linecache.
_lambdify_generated_counter = 1
@doctest_depends_on(modules=('numpy', 'tensorflow', ), python_version=(3,))
def lambdify(args: Iterable, expr, modules=None, printer=None, use_imps=True,
dummify=False):
"""Convert a SymPy expression into a function that allows for fast
numeric evaluation.
.. warning::
This function uses ``exec``, and thus shouldn't be used on
unsanitized input.
.. versionchanged:: 1.7.0
Passing a set for the *args* parameter is deprecated as sets are
unordered. Use an ordered iterable such as a list or tuple.
Explanation
===========
For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an
equivalent NumPy function that numerically evaluates it:
>>> from sympy import sin, cos, symbols, lambdify
>>> import numpy as np
>>> x = symbols('x')
>>> expr = sin(x) + cos(x)
>>> expr
sin(x) + cos(x)
>>> f = lambdify(x, expr, 'numpy')
>>> a = np.array([1, 2])
>>> f(a)
[1.38177329 0.49315059]
The primary purpose of this function is to provide a bridge from SymPy
expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath,
and tensorflow. In general, SymPy functions do not work with objects from
other libraries, such as NumPy arrays, and functions from numeric
libraries like NumPy or mpmath do not work on SymPy expressions.
``lambdify`` bridges the two by converting a SymPy expression to an
equivalent numeric function.
The basic workflow with ``lambdify`` is to first create a SymPy expression
representing whatever mathematical function you wish to evaluate. This
should be done using only SymPy functions and expressions. Then, use
``lambdify`` to convert this to an equivalent function for numerical
evaluation. For instance, above we created ``expr`` using the SymPy symbol
``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an
equivalent NumPy function ``f``, and called it on a NumPy array ``a``.
Parameters
==========
args : List[Symbol]
A variable or a list of variables whose nesting represents the
nesting of the arguments that will be passed to the function.
Variables can be symbols, undefined functions, or matrix symbols.
>>> from sympy import Eq
>>> from sympy.abc import x, y, z
The list of variables should match the structure of how the
arguments will be passed to the function. Simply enclose the
parameters as they will be passed in a list.
To call a function like ``f(x)`` then ``[x]``
should be the first argument to ``lambdify``; for this
case a single ``x`` can also be used:
>>> f = lambdify(x, x + 1)
>>> f(1)
2
>>> f = lambdify([x], x + 1)
>>> f(1)
2
To call a function like ``f(x, y)`` then ``[x, y]`` will
be the first argument of the ``lambdify``:
>>> f = lambdify([x, y], x + y)
>>> f(1, 1)
2
To call a function with a single 3-element tuple like
``f((x, y, z))`` then ``[(x, y, z)]`` will be the first
argument of the ``lambdify``:
>>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2))
>>> f((3, 4, 5))
True
If two args will be passed and the first is a scalar but
the second is a tuple with two arguments then the items
in the list should match that structure:
>>> f = lambdify([x, (y, z)], x + y + z)
>>> f(1, (2, 3))
6
expr : Expr
An expression, list of expressions, or matrix to be evaluated.
Lists may be nested.
If the expression is a list, the output will also be a list.
>>> f = lambdify(x, [x, [x + 1, x + 2]])
>>> f(1)
[1, [2, 3]]
If it is a matrix, an array will be returned (for the NumPy module).
>>> from sympy import Matrix
>>> f = lambdify(x, Matrix([x, x + 1]))
>>> f(1)
[[1]
[2]]
Note that the argument order here (variables then expression) is used
to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works
(roughly) like ``lambda x: expr``
(see :ref:`lambdify-how-it-works` below).
modules : str, optional
Specifies the numeric library to use.
If not specified, *modules* defaults to:
- ``["scipy", "numpy"]`` if SciPy is installed
- ``["numpy"]`` if only NumPy is installed
- ``["math", "mpmath", "sympy"]`` if neither is installed.
That is, SymPy functions are replaced as far as possible by
either ``scipy`` or ``numpy`` functions if available, and Python's
standard library ``math``, or ``mpmath`` functions otherwise.
*modules* can be one of the following types:
- The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``,
``"scipy"``, ``"sympy"``, or ``"tensorflow"``. This uses the
corresponding printer and namespace mapping for that module.
- A module (e.g., ``math``). This uses the global namespace of the
module. If the module is one of the above known modules, it will
also use the corresponding printer and namespace mapping
(i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``).
- A dictionary that maps names of SymPy functions to arbitrary
functions
(e.g., ``{'sin': custom_sin}``).
- A list that contains a mix of the arguments above, with higher
priority given to entries appearing first
(e.g., to use the NumPy module but override the ``sin`` function
with a custom version, you can use
``[{'sin': custom_sin}, 'numpy']``).
dummify : bool, optional
Whether or not the variables in the provided expression that are not
valid Python identifiers are substituted with dummy symbols.
This allows for undefined functions like ``Function('f')(t)`` to be
supplied as arguments. By default, the variables are only dummified
if they are not valid Python identifiers.
Set ``dummify=True`` to replace all arguments with dummy symbols
(if ``args`` is not a string) - for example, to ensure that the
arguments do not redefine any built-in names.
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
``lambdify`` can be used to translate SymPy expressions into mpmath
functions. This may be preferable to using ``evalf`` (which uses mpmath on
the backend) in some cases.
>>> f = lambdify(x, sin(x), 'mpmath')
>>> f(1)
0.8414709848078965
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
The ``flatten`` function can be used to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in ``expr`` can also carry their own numerical
implementations, in a callable attached to the ``_imp_`` attribute. This
can be used with undefined functions using the ``implemented_function``
factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
Usage with Tensorflow:
>>> import tensorflow as tf
>>> from sympy import Max, sin, lambdify
>>> from sympy.abc import x
>>> f = Max(x, sin(x))
>>> func = lambdify(x, f, 'tensorflow')
After tensorflow v2, eager execution is enabled by default.
If you want to get the compatible result across tensorflow v1 and v2
as same as this tutorial, run this line.
>>> tf.compat.v1.enable_eager_execution()
If you have eager execution enabled, you can get the result out
immediately as you can use numpy.
If you pass tensorflow objects, you may get an ``EagerTensor``
object instead of value.
>>> result = func(tf.constant(1.0))
>>> print(result)
tf.Tensor(1.0, shape=(), dtype=float32)
>>> print(result.__class__)
<class 'tensorflow.python.framework.ops.EagerTensor'>
You can use ``.numpy()`` to get the numpy value of the tensor.
>>> result.numpy()
1.0
>>> var = tf.Variable(2.0)
>>> result = func(var) # also works for tf.Variable and tf.Placeholder
>>> result.numpy()
2.0
And it works with any shape array.
>>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> result = func(tensor)
>>> result.numpy()
[[1. 2.]
[3. 4.]]
Notes
=====
- For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
``implemented_function`` and user defined subclasses of Function. If
specified, numexpr may be the only option in modules. The official list
of numexpr functions can be found at:
https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-functions
- In previous versions of SymPy, ``lambdify`` replaced ``Matrix`` with
``numpy.matrix`` by default. As of SymPy 1.0 ``numpy.array`` is the
default. To get the old default behavior you must pass in
``[{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']`` to the
``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
[[1]
[2]]
- In the above examples, the generated functions can accept scalar
values or numpy arrays as arguments. However, in some cases
the generated function relies on the input being a numpy array:
>>> from sympy import Piecewise
>>> from sympy.testing.pytest import ignore_warnings
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy")
>>> with ignore_warnings(RuntimeWarning):
... f(numpy.array([-1, 0, 1, 2]))
[-1. 0. 1. 0.5]
>>> f(0)
Traceback (most recent call last):
...
ZeroDivisionError: division by zero
In such cases, the input should be wrapped in a numpy array:
>>> with ignore_warnings(RuntimeWarning):
... float(f(numpy.array([0])))
0.0
Or if numpy functionality is not required another module can be used:
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math")
>>> f(0)
0
.. _lambdify-how-it-works:
How it works
============
When using this function, it helps a great deal to have an idea of what it
is doing. At its core, lambdify is nothing more than a namespace
translation, on top of a special printer that makes some corner cases work
properly.
To understand lambdify, first we must properly understand how Python
namespaces work. Say we had two files. One called ``sin_cos_sympy.py``,
with
.. code:: python
# sin_cos_sympy.py
from sympy import sin, cos
def sin_cos(x):
return sin(x) + cos(x)
and one called ``sin_cos_numpy.py`` with
.. code:: python
# sin_cos_numpy.py
from numpy import sin, cos
def sin_cos(x):
return sin(x) + cos(x)
The two files define an identical function ``sin_cos``. However, in the
first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and
``cos``. In the second, they are defined as the NumPy versions.
If we were to import the first file and use the ``sin_cos`` function, we
would get something like
>>> from sin_cos_sympy import sin_cos # doctest: +SKIP
>>> sin_cos(1) # doctest: +SKIP
cos(1) + sin(1)
On the other hand, if we imported ``sin_cos`` from the second file, we
would get
>>> from sin_cos_numpy import sin_cos # doctest: +SKIP
>>> sin_cos(1) # doctest: +SKIP
1.38177329068
In the first case we got a symbolic output, because it used the symbolic
``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric
result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions
from NumPy. But notice that the versions of ``sin`` and ``cos`` that were
used was not inherent to the ``sin_cos`` function definition. Both
``sin_cos`` definitions are exactly the same. Rather, it was based on the
names defined at the module where the ``sin_cos`` function was defined.
The key point here is that when function in Python references a name that
is not defined in the function, that name is looked up in the "global"
namespace of the module where that function is defined.
Now, in Python, we can emulate this behavior without actually writing a
file to disk using the ``exec`` function. ``exec`` takes a string
containing a block of Python code, and a dictionary that should contain
the global variables of the module. It then executes the code "in" that
dictionary, as if it were the module globals. The following is equivalent
to the ``sin_cos`` defined in ``sin_cos_sympy.py``:
>>> import sympy
>>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos}
>>> exec('''
... def sin_cos(x):
... return sin(x) + cos(x)
... ''', module_dictionary)
>>> sin_cos = module_dictionary['sin_cos']
>>> sin_cos(1)
cos(1) + sin(1)
and similarly with ``sin_cos_numpy``:
>>> import numpy
>>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos}
>>> exec('''
... def sin_cos(x):
... return sin(x) + cos(x)
... ''', module_dictionary)
>>> sin_cos = module_dictionary['sin_cos']
>>> sin_cos(1)
1.38177329068
So now we can get an idea of how ``lambdify`` works. The name "lambdify"
comes from the fact that we can think of something like ``lambdify(x,
sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where
``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why
the symbols argument is first in ``lambdify``, as opposed to most SymPy
functions where it comes after the expression: to better mimic the
``lambda`` keyword.
``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and
1. Converts it to a string
2. Creates a module globals dictionary based on the modules that are
passed in (by default, it uses the NumPy module)
3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the
list of variables separated by commas, and ``{expr}`` is the string
created in step 1., then ``exec``s that string with the module globals
namespace and returns ``func``.
In fact, functions returned by ``lambdify`` support inspection. So you can
see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you
are using IPython or the Jupyter notebook.
>>> f = lambdify(x, sin(x) + cos(x))
>>> import inspect
>>> print(inspect.getsource(f))
def _lambdifygenerated(x):
return (sin(x) + cos(x))
This shows us the source code of the function, but not the namespace it
was defined in. We can inspect that by looking at the ``__globals__``
attribute of ``f``:
>>> f.__globals__['sin']
<ufunc 'sin'>
>>> f.__globals__['cos']
<ufunc 'cos'>
>>> f.__globals__['sin'] is numpy.sin
True
This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be
``numpy.sin`` and ``numpy.cos``.
Note that there are some convenience layers in each of these steps, but at
the core, this is how ``lambdify`` works. Step 1 is done using the
``LambdaPrinter`` printers defined in the printing module (see
:mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions
to define how they should be converted to a string for different modules.
You can change which printer ``lambdify`` uses by passing a custom printer
in to the ``printer`` argument.
Step 2 is augmented by certain translations. There are default
translations for each module, but you can provide your own by passing a
list to the ``modules`` argument. For instance,
>>> def mysin(x):
... print('taking the sin of', x)
... return numpy.sin(x)
...
>>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy'])
>>> f(1)
taking the sin of 1
0.8414709848078965
The globals dictionary is generated from the list by merging the
dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The
merging is done so that earlier items take precedence, which is why
``mysin`` is used above instead of ``numpy.sin``.
If you want to modify the way ``lambdify`` works for a given function, it
is usually easiest to do so by modifying the globals dictionary as such.
In more complicated cases, it may be necessary to create and pass in a
custom printer.
Finally, step 3 is augmented with certain convenience operations, such as
the addition of a docstring.
Understanding how ``lambdify`` works can make it easier to avoid certain
gotchas when using it. For instance, a common mistake is to create a
lambdified function for one module (say, NumPy), and pass it objects from
another (say, a SymPy expression).
For instance, say we create
>>> from sympy.abc import x
>>> f = lambdify(x, x + 1, 'numpy')
Now if we pass in a NumPy array, we get that array plus 1
>>> import numpy
>>> a = numpy.array([1, 2])
>>> f(a)
[2 3]
But what happens if you make the mistake of passing in a SymPy expression
instead of a NumPy array:
>>> f(x + 1)
x + 2
This worked, but it was only by accident. Now take a different lambdified
function:
>>> from sympy import sin
>>> g = lambdify(x, x + sin(x), 'numpy')
This works as expected on NumPy arrays:
>>> g(a)
[1.84147098 2.90929743]
But if we try to pass in a SymPy expression, it fails
>>> try:
... g(x + 1)
... # NumPy release after 1.17 raises TypeError instead of
... # AttributeError
... except (AttributeError, TypeError):
... raise AttributeError() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError:
Now, let's look at what happened. The reason this fails is that ``g``
calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not
know how to operate on a SymPy object. **As a general rule, NumPy
functions do not know how to operate on SymPy expressions, and SymPy
functions do not know how to operate on NumPy arrays. This is why lambdify
exists: to provide a bridge between SymPy and NumPy.**
However, why is it that ``f`` did work? That's because ``f`` doesn't call
any functions, it only adds 1. So the resulting function that is created,
``def _lambdifygenerated(x): return x + 1`` does not depend on the globals
namespace it is defined in. Thus it works, but only by accident. A future
version of ``lambdify`` may remove this behavior.
Be aware that certain implementation details described here may change in
future versions of SymPy. The API of passing in custom modules and
printers will not change, but the details of how a lambda function is
created may change. However, the basic idea will remain the same, and
understanding it will be helpful to understanding the behavior of
lambdify.
**In general: you should create lambdified functions for one module (say,
NumPy), and only pass it input types that are compatible with that module
(say, NumPy arrays).** Remember that by default, if the ``module``
argument is not provided, ``lambdify`` creates functions using the NumPy
and SciPy namespaces.
"""
from sympy.core.symbol import Symbol
# If the user hasn't specified any modules, use what is available.
if modules is None:
try:
_import("scipy")
except ImportError:
try:
_import("numpy")
except ImportError:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
else:
modules = ["numpy", "scipy"]
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {} # type: Dict[str, Any]
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if printer is None:
if _module_present('mpmath', namespaces):
from sympy.printing.pycode import MpmathPrinter as Printer # type: ignore
elif _module_present('scipy', namespaces):
from sympy.printing.pycode import SciPyPrinter as Printer # type: ignore
elif _module_present('numpy', namespaces):
from sympy.printing.pycode import NumPyPrinter as Printer # type: ignore
elif _module_present('numexpr', namespaces):
from sympy.printing.lambdarepr import NumExprPrinter as Printer # type: ignore
elif _module_present('tensorflow', namespaces):
from sympy.printing.tensorflow import TensorflowPrinter as Printer # type: ignore
elif _module_present('sympy', namespaces):
from sympy.printing.pycode import SymPyPrinter as Printer # type: ignore
else:
from sympy.printing.pycode import PythonCodePrinter as Printer # type: ignore
user_functions = {}
for m in namespaces[::-1]:
if isinstance(m, dict):
for k in m:
user_functions[k] = k
printer = Printer({'fully_qualified_modules': False, 'inline': True,
'allow_unknown_functions': True,
'user_functions': user_functions})
if isinstance(args, set):
SymPyDeprecationWarning(
feature="The list of arguments is a `set`. This leads to unpredictable results",
useinstead=": Convert set into list or tuple",
issue=20013,
deprecated_since_version="1.6.3"
).warn()
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items() # type: ignore
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create the function definition code and execute it
funcname = '_lambdifygenerated'
if _module_present('tensorflow', namespaces):
funcprinter = _TensorflowEvaluatorPrinter(printer, dummify) # type: _EvaluatorPrinter
else:
funcprinter = _EvaluatorPrinter(printer, dummify)
funcstr = funcprinter.doprint(funcname, args, expr)
# Collect the module imports from the code printers.
imp_mod_lines = []
for mod, keys in (getattr(printer, 'module_imports', None) or {}).items():
for k in keys:
if k not in namespace:
ln = "from %s import %s" % (mod, k)
try:
exec_(ln, {}, namespace)
except ImportError:
# Tensorflow 2.0 has issues with importing a specific
# function from its submodule.
# https://github.com/tensorflow/tensorflow/issues/33022
ln = "%s = %s.%s" % (k, mod, k)
exec_(ln, {}, namespace)
imp_mod_lines.append(ln)
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
funclocals = {} # type: Dict[str, Any]
global _lambdify_generated_counter
filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter
_lambdify_generated_counter += 1
c = compile(funcstr, filename, 'exec')
exec_(c, namespace, funclocals)
# mtime has to be None or else linecache.checkcache will remove it
linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename) # type: ignore
func = funclocals[funcname]
# Apply the docstring
sig = "func({})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = (
"Created with lambdify. Signature:\n\n"
"{sig}\n\n"
"Expression:\n\n"
"{expr}\n\n"
"Source code:\n\n"
"{src}\n\n"
"Imported modules:\n\n"
"{imp_mods}"
).format(sig=sig, expr=expr_str, src=funcstr, imp_mods='\n'.join(imp_mod_lines))
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=None):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(_0,_1[0],_1[1])'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten, Derivative, Basic
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
# replace these with Dummy symbols
if isinstance(args, (Function, Symbol, Derivative)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
expr = sympify(expr)
# dict/tuple are sympified to Basic
if isinstance(expr, Basic):
expr = expr.xreplace(dummies_dict)
# list is not sympified to Basic
elif isinstance(expr, list):
expr = [sub_expr(a, dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector, NotIterable))
def flat_indexes(iterable):
n = 0
for el in iterable:
if isiter(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
if dummify is None:
dummify = any(isinstance(a, Basic) and
a.atoms(Function, Derivative) for a in (
args if isiter(args) else [args]))
if isiter(args) and any(isiter(i) for i in args):
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
indexed_args = ','.join([
dum_args[ind[0]] + ''.join(["[%s]" % k for k in ind[1:]])
for ind in flat_indexes(args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
return 'lambda %s: (%s)(%s)' % (','.join(dum_args), lstr, indexed_args)
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
class _EvaluatorPrinter:
def __init__(self, printer=None, dummify=False):
self._dummify = dummify
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import LambdaPrinter
if printer is None:
printer = LambdaPrinter()
if inspect.isfunction(printer):
self._exprrepr = printer
else:
if inspect.isclass(printer):
printer = printer()
self._exprrepr = printer.doprint
#if hasattr(printer, '_print_Symbol'):
# symbolrepr = printer._print_Symbol
#if hasattr(printer, '_print_Dummy'):
# dummyrepr = printer._print_Dummy
# Used to print the generated function arguments in a standard way
self._argrepr = LambdaPrinter().doprint
def doprint(self, funcname, args, expr):
"""Returns the function definition code as a string."""
from sympy import Dummy
funcbody = []
if not iterable(args):
args = [args]
argstrs, expr = self._preprocess(args, expr)
# Generate argument unpacking and final argument list
funcargs = []
unpackings = []
for argstr in argstrs:
if iterable(argstr):
funcargs.append(self._argrepr(Dummy()))
unpackings.extend(self._print_unpacking(argstr, funcargs[-1]))
else:
funcargs.append(argstr)
funcsig = 'def {}({}):'.format(funcname, ', '.join(funcargs))
# Wrap input arguments before unpacking
funcbody.extend(self._print_funcargwrapping(funcargs))
funcbody.extend(unpackings)
funcbody.append('return ({})'.format(self._exprrepr(expr)))
funclines = [funcsig]
funclines.extend(' ' + line for line in funcbody)
return '\n'.join(funclines) + '\n'
@classmethod
def _is_safe_ident(cls, ident):
return isinstance(ident, str) and ident.isidentifier() \
and not keyword.iskeyword(ident)
def _preprocess(self, args, expr):
"""Preprocess args, expr to replace arguments that do not map
to valid Python identifiers.
Returns string form of args, and updated expr.
"""
from sympy import Dummy, Function, flatten, Derivative, ordered, Basic
from sympy.matrices import DeferredVector
from sympy.core.symbol import uniquely_named_symbol
from sympy.core.expr import Expr
# Args of type Dummy can cause name collisions with args
# of type Symbol. Force dummify of everything in this
# situation.
dummify = self._dummify or any(
isinstance(arg, Dummy) for arg in flatten(args))
argstrs = [None]*len(args)
for arg, i in reversed(list(ordered(zip(args, range(len(args)))))):
if iterable(arg):
s, expr = self._preprocess(arg, expr)
elif isinstance(arg, DeferredVector):
s = str(arg)
elif isinstance(arg, Basic) and arg.is_symbol:
s = self._argrepr(arg)
if dummify or not self._is_safe_ident(s):
dummy = Dummy()
if isinstance(expr, Expr):
dummy = uniquely_named_symbol(
dummy.name, expr, modify=lambda s: '_' + s)
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
elif dummify or isinstance(arg, (Function, Derivative)):
dummy = Dummy()
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
else:
s = str(arg)
argstrs[i] = s
return argstrs, expr
def _subexpr(self, expr, dummies_dict):
from sympy.matrices import DeferredVector
from sympy import sympify
expr = sympify(expr)
xreplace = getattr(expr, 'xreplace', None)
if xreplace is not None:
expr = xreplace(dummies_dict)
else:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [self._subexpr(sympify(a), dummies_dict) for a in expr.keys()]
v = [self._subexpr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(self._subexpr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [self._subexpr(sympify(a), dummies_dict) for a in expr]
return expr
def _print_funcargwrapping(self, args):
"""Generate argument wrapping code.
args is the argument list of the generated function (strings).
Return value is a list of lines of code that will be inserted at
the beginning of the function definition.
"""
return []
def _print_unpacking(self, unpackto, arg):
"""Generate argument unpacking code.
arg is the function argument to be unpacked (a string), and
unpackto is a list or nested lists of the variable names (strings) to
unpack to.
"""
def unpack_lhs(lvalues):
return '[{}]'.format(', '.join(
unpack_lhs(val) if iterable(val) else val for val in lvalues))
return ['{} = {}'.format(unpack_lhs(unpackto), arg)]
class _TensorflowEvaluatorPrinter(_EvaluatorPrinter):
def _print_unpacking(self, lvalues, rvalue):
"""Generate argument unpacking code.
This method is used when the input value is not interable,
but can be indexed (see issue #14655).
"""
from sympy import flatten
def flat_indexes(elems):
n = 0
for el in elems:
if iterable(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
indexed = ', '.join('{}[{}]'.format(rvalue, ']['.join(map(str, ind)))
for ind in flat_indexes(lvalues))
return ['[{}] = [{}]'.format(', '.join(flatten(lvalues)), indexed)]
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as ``expr``. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within ``expr`` and
corresponding values being the numerical implementation of
function
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of SymPy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If ``symfunc`` is an Undefined function, create a new function
with the same name and the implemented function attached.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> f = implemented_function('f', lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
kwargs = {}
if isinstance(symfunc, UndefinedFunction):
kwargs = symfunc._kwargs
symfunc = symfunc.__name__
if isinstance(symfunc, str):
# Keyword arguments to UndefinedFunction are added as attributes to
# the created class.
symfunc = UndefinedFunction(
symfunc, _imp_=staticmethod(implementation), **kwargs)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError(filldedent('''
symfunc should be either a string or
an UndefinedFunction instance.'''))
return symfunc
|
a53ba351df1b9660ac721ffc99f61a04975b9ee024eb120a5f519c84e63cbaa9 | from collections import defaultdict, OrderedDict
from itertools import (
combinations, combinations_with_replacement, permutations,
product, product as cartes
)
import random
from operator import gt
from sympy.core import Basic
# this is the logical location of these functions
from sympy.core.compatibility import (
as_int, default_sort_key, is_sequence, iterable, ordered
)
from sympy.utilities.enumerative import (
multiset_partitions_taocp, list_visitor, MultisetPartitionTraverser)
def is_palindromic(s, i=0, j=None):
"""return True if the sequence is the same from left to right as it
is from right to left in the whole sequence (default) or in the
Python slice ``s[i: j]``; else False.
Examples
========
>>> from sympy.utilities.iterables import is_palindromic
>>> is_palindromic([1, 0, 1])
True
>>> is_palindromic('abcbb')
False
>>> is_palindromic('abcbb', 1)
False
Normal Python slicing is performed in place so there is no need to
create a slice of the sequence for testing:
>>> is_palindromic('abcbb', 1, -1)
True
>>> is_palindromic('abcbb', -4, -1)
True
See Also
========
sympy.ntheory.digits.is_palindromic: tests integers
"""
i, j, _ = slice(i, j).indices(len(s))
m = (j - i)//2
# if length is odd, middle element will be ignored
return all(s[i + k] == s[j - 1 - k] for k in range(m))
def flatten(iterable, levels=None, cls=None):
"""
Recursively denest iterable containers.
>>> from sympy.utilities.iterables import flatten
>>> flatten([1, 2, 3])
[1, 2, 3]
>>> flatten([1, 2, [3]])
[1, 2, 3]
>>> flatten([1, [2, 3], [4, 5]])
[1, 2, 3, 4, 5]
>>> flatten([1.0, 2, (1, None)])
[1.0, 2, 1, None]
If you want to denest only a specified number of levels of
nested containers, then set ``levels`` flag to the desired
number of levels::
>>> ls = [[(-2, -1), (1, 2)], [(0, 0)]]
>>> flatten(ls, levels=1)
[(-2, -1), (1, 2), (0, 0)]
If cls argument is specified, it will only flatten instances of that
class, for example:
>>> from sympy.core import Basic
>>> class MyOp(Basic):
... pass
...
>>> flatten([MyOp(1, MyOp(2, 3))], cls=MyOp)
[1, 2, 3]
adapted from https://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
"""
from sympy.tensor.array import NDimArray
if levels is not None:
if not levels:
return iterable
elif levels > 0:
levels -= 1
else:
raise ValueError(
"expected non-negative number of levels, got %s" % levels)
if cls is None:
reducible = lambda x: is_sequence(x, set)
else:
reducible = lambda x: isinstance(x, cls)
result = []
for el in iterable:
if reducible(el):
if hasattr(el, 'args') and not isinstance(el, NDimArray):
el = el.args
result.extend(flatten(el, levels=levels, cls=cls))
else:
result.append(el)
return result
def unflatten(iter, n=2):
"""Group ``iter`` into tuples of length ``n``. Raise an error if
the length of ``iter`` is not a multiple of ``n``.
"""
if n < 1 or len(iter) % n:
raise ValueError('iter length is not a multiple of %i' % n)
return list(zip(*(iter[i::n] for i in range(n))))
def reshape(seq, how):
"""Reshape the sequence according to the template in ``how``.
Examples
========
>>> from sympy.utilities import reshape
>>> seq = list(range(1, 9))
>>> reshape(seq, [4]) # lists of 4
[[1, 2, 3, 4], [5, 6, 7, 8]]
>>> reshape(seq, (4,)) # tuples of 4
[(1, 2, 3, 4), (5, 6, 7, 8)]
>>> reshape(seq, (2, 2)) # tuples of 4
[(1, 2, 3, 4), (5, 6, 7, 8)]
>>> reshape(seq, (2, [2])) # (i, i, [i, i])
[(1, 2, [3, 4]), (5, 6, [7, 8])]
>>> reshape(seq, ((2,), [2])) # etc....
[((1, 2), [3, 4]), ((5, 6), [7, 8])]
>>> reshape(seq, (1, [2], 1))
[(1, [2, 3], 4), (5, [6, 7], 8)]
>>> reshape(tuple(seq), ([[1], 1, (2,)],))
(([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],))
>>> reshape(tuple(seq), ([1], 1, (2,)))
(([1], 2, (3, 4)), ([5], 6, (7, 8)))
>>> reshape(list(range(12)), [2, [3], {2}, (1, (3,), 1)])
[[0, 1, [2, 3, 4], {5, 6}, (7, (8, 9, 10), 11)]]
"""
m = sum(flatten(how))
n, rem = divmod(len(seq), m)
if m < 0 or rem:
raise ValueError('template must sum to positive number '
'that divides the length of the sequence')
i = 0
container = type(how)
rv = [None]*n
for k in range(len(rv)):
rv[k] = []
for hi in how:
if type(hi) is int:
rv[k].extend(seq[i: i + hi])
i += hi
else:
n = sum(flatten(hi))
hi_type = type(hi)
rv[k].append(hi_type(reshape(seq[i: i + n], hi)[0]))
i += n
rv[k] = container(rv[k])
return type(seq)(rv)
def group(seq, multiple=True):
"""
Splits a sequence into a list of lists of equal, adjacent elements.
Examples
========
>>> from sympy.utilities.iterables import group
>>> group([1, 1, 1, 2, 2, 3])
[[1, 1, 1], [2, 2], [3]]
>>> group([1, 1, 1, 2, 2, 3], multiple=False)
[(1, 3), (2, 2), (3, 1)]
>>> group([1, 1, 3, 2, 2, 1], multiple=False)
[(1, 2), (3, 1), (2, 2), (1, 1)]
See Also
========
multiset
"""
if not seq:
return []
current, groups = [seq[0]], []
for elem in seq[1:]:
if elem == current[-1]:
current.append(elem)
else:
groups.append(current)
current = [elem]
groups.append(current)
if multiple:
return groups
for i, current in enumerate(groups):
groups[i] = (current[0], len(current))
return groups
def _iproduct2(iterable1, iterable2):
'''Cartesian product of two possibly infinite iterables'''
it1 = iter(iterable1)
it2 = iter(iterable2)
elems1 = []
elems2 = []
sentinel = object()
def append(it, elems):
e = next(it, sentinel)
if e is not sentinel:
elems.append(e)
n = 0
append(it1, elems1)
append(it2, elems2)
while n <= len(elems1) + len(elems2):
for m in range(n-len(elems1)+1, len(elems2)):
yield (elems1[n-m], elems2[m])
n += 1
append(it1, elems1)
append(it2, elems2)
def iproduct(*iterables):
'''
Cartesian product of iterables.
Generator of the cartesian product of iterables. This is analogous to
itertools.product except that it works with infinite iterables and will
yield any item from the infinite product eventually.
Examples
========
>>> from sympy.utilities.iterables import iproduct
>>> sorted(iproduct([1,2], [3,4]))
[(1, 3), (1, 4), (2, 3), (2, 4)]
With an infinite iterator:
>>> from sympy import S
>>> (3,) in iproduct(S.Integers)
True
>>> (3, 4) in iproduct(S.Integers, S.Integers)
True
.. seealso::
`itertools.product <https://docs.python.org/3/library/itertools.html#itertools.product>`_
'''
if len(iterables) == 0:
yield ()
return
elif len(iterables) == 1:
for e in iterables[0]:
yield (e,)
elif len(iterables) == 2:
yield from _iproduct2(*iterables)
else:
first, others = iterables[0], iterables[1:]
for ef, eo in _iproduct2(first, iproduct(*others)):
yield (ef,) + eo
def multiset(seq):
"""Return the hashable sequence in multiset form with values being the
multiplicity of the item in the sequence.
Examples
========
>>> from sympy.utilities.iterables import multiset
>>> multiset('mississippi')
{'i': 4, 'm': 1, 'p': 2, 's': 4}
See Also
========
group
"""
rv = defaultdict(int)
for s in seq:
rv[s] += 1
return dict(rv)
def postorder_traversal(node, keys=None):
"""
Do a postorder traversal of a tree.
This generator recursively yields nodes that it has visited in a postorder
fashion. That is, it descends through the tree depth-first to yield all of
a node's children's postorder traversal before yielding the node itself.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of
``ordered`` will be used (node count and default_sort_key).
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy.utilities.iterables import postorder_traversal
>>> from sympy.abc import w, x, y, z
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(postorder_traversal(w + (x + y)*z)) # doctest: +SKIP
[z, y, x, x + y, z*(x + y), w, w + z*(x + y)]
>>> list(postorder_traversal(w + (x + y)*z, keys=True))
[w, z, x, y, x + y, z*(x + y), w + z*(x + y)]
"""
if isinstance(node, Basic):
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
yield from postorder_traversal(arg, keys)
elif iterable(node):
for item in node:
yield from postorder_traversal(item, keys)
yield node
def interactive_traversal(expr):
"""Traverse a tree asking a user which branch to choose. """
from sympy.printing import pprint
RED, BRED = '\033[0;31m', '\033[1;31m'
GREEN, BGREEN = '\033[0;32m', '\033[1;32m'
YELLOW, BYELLOW = '\033[0;33m', '\033[1;33m' # noqa
BLUE, BBLUE = '\033[0;34m', '\033[1;34m' # noqa
MAGENTA, BMAGENTA = '\033[0;35m', '\033[1;35m'# noqa
CYAN, BCYAN = '\033[0;36m', '\033[1;36m' # noqa
END = '\033[0m'
def cprint(*args):
print("".join(map(str, args)) + END)
def _interactive_traversal(expr, stage):
if stage > 0:
print()
cprint("Current expression (stage ", BYELLOW, stage, END, "):")
print(BCYAN)
pprint(expr)
print(END)
if isinstance(expr, Basic):
if expr.is_Add:
args = expr.as_ordered_terms()
elif expr.is_Mul:
args = expr.as_ordered_factors()
else:
args = expr.args
elif hasattr(expr, "__iter__"):
args = list(expr)
else:
return expr
n_args = len(args)
if not n_args:
return expr
for i, arg in enumerate(args):
cprint(GREEN, "[", BGREEN, i, GREEN, "] ", BLUE, type(arg), END)
pprint(arg)
print()
if n_args == 1:
choices = '0'
else:
choices = '0-%d' % (n_args - 1)
try:
choice = input("Your choice [%s,f,l,r,d,?]: " % choices)
except EOFError:
result = expr
print()
else:
if choice == '?':
cprint(RED, "%s - select subexpression with the given index" %
choices)
cprint(RED, "f - select the first subexpression")
cprint(RED, "l - select the last subexpression")
cprint(RED, "r - select a random subexpression")
cprint(RED, "d - done\n")
result = _interactive_traversal(expr, stage)
elif choice in ['d', '']:
result = expr
elif choice == 'f':
result = _interactive_traversal(args[0], stage + 1)
elif choice == 'l':
result = _interactive_traversal(args[-1], stage + 1)
elif choice == 'r':
result = _interactive_traversal(random.choice(args), stage + 1)
else:
try:
choice = int(choice)
except ValueError:
cprint(BRED,
"Choice must be a number in %s range\n" % choices)
result = _interactive_traversal(expr, stage)
else:
if choice < 0 or choice >= n_args:
cprint(BRED, "Choice must be in %s range\n" % choices)
result = _interactive_traversal(expr, stage)
else:
result = _interactive_traversal(args[choice], stage + 1)
return result
return _interactive_traversal(expr, 0)
def ibin(n, bits=None, str=False):
"""Return a list of length ``bits`` corresponding to the binary value
of ``n`` with small bits to the right (last). If bits is omitted, the
length will be the number required to represent ``n``. If the bits are
desired in reversed order, use the ``[::-1]`` slice of the returned list.
If a sequence of all bits-length lists starting from ``[0, 0,..., 0]``
through ``[1, 1, ..., 1]`` are desired, pass a non-integer for bits, e.g.
``'all'``.
If the bit *string* is desired pass ``str=True``.
Examples
========
>>> from sympy.utilities.iterables import ibin
>>> ibin(2)
[1, 0]
>>> ibin(2, 4)
[0, 0, 1, 0]
If all lists corresponding to 0 to 2**n - 1, pass a non-integer
for bits:
>>> bits = 2
>>> for i in ibin(2, 'all'):
... print(i)
(0, 0)
(0, 1)
(1, 0)
(1, 1)
If a bit string is desired of a given length, use str=True:
>>> n = 123
>>> bits = 10
>>> ibin(n, bits, str=True)
'0001111011'
>>> ibin(n, bits, str=True)[::-1] # small bits left
'1101111000'
>>> list(ibin(3, 'all', str=True))
['000', '001', '010', '011', '100', '101', '110', '111']
"""
if n < 0:
raise ValueError("negative numbers are not allowed")
n = as_int(n)
if bits is None:
bits = 0
else:
try:
bits = as_int(bits)
except ValueError:
bits = -1
else:
if n.bit_length() > bits:
raise ValueError(
"`bits` must be >= {}".format(n.bit_length()))
if not str:
if bits >= 0:
return [1 if i == "1" else 0 for i in bin(n)[2:].rjust(bits, "0")]
else:
return variations(list(range(2)), n, repetition=True)
else:
if bits >= 0:
return bin(n)[2:].rjust(bits, "0")
else:
return (bin(i)[2:].rjust(n, "0") for i in range(2**n))
def variations(seq, n, repetition=False):
r"""Returns a generator of the n-sized variations of ``seq`` (size N).
``repetition`` controls whether items in ``seq`` can appear more than once;
Examples
========
``variations(seq, n)`` will return `\frac{N!}{(N - n)!}` permutations without
repetition of ``seq``'s elements:
>>> from sympy.utilities.iterables import variations
>>> list(variations([1, 2], 2))
[(1, 2), (2, 1)]
``variations(seq, n, True)`` will return the `N^n` permutations obtained
by allowing repetition of elements:
>>> list(variations([1, 2], 2, repetition=True))
[(1, 1), (1, 2), (2, 1), (2, 2)]
If you ask for more items than are in the set you get the empty set unless
you allow repetitions:
>>> list(variations([0, 1], 3, repetition=False))
[]
>>> list(variations([0, 1], 3, repetition=True))[:4]
[(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]
.. seealso::
`itertools.permutations <https://docs.python.org/3/library/itertools.html#itertools.permutations>`_,
`itertools.product <https://docs.python.org/3/library/itertools.html#itertools.product>`_
"""
if not repetition:
seq = tuple(seq)
if len(seq) < n:
return
yield from permutations(seq, n)
else:
if n == 0:
yield ()
else:
yield from product(seq, repeat=n)
def subsets(seq, k=None, repetition=False):
r"""Generates all `k`-subsets (combinations) from an `n`-element set, ``seq``.
A `k`-subset of an `n`-element set is any subset of length exactly `k`. The
number of `k`-subsets of an `n`-element set is given by ``binomial(n, k)``,
whereas there are `2^n` subsets all together. If `k` is ``None`` then all
`2^n` subsets will be returned from shortest to longest.
Examples
========
>>> from sympy.utilities.iterables import subsets
``subsets(seq, k)`` will return the `\frac{n!}{k!(n - k)!}` `k`-subsets (combinations)
without repetition, i.e. once an item has been removed, it can no
longer be "taken":
>>> list(subsets([1, 2], 2))
[(1, 2)]
>>> list(subsets([1, 2]))
[(), (1,), (2,), (1, 2)]
>>> list(subsets([1, 2, 3], 2))
[(1, 2), (1, 3), (2, 3)]
``subsets(seq, k, repetition=True)`` will return the `\frac{(n - 1 + k)!}{k!(n - 1)!}`
combinations *with* repetition:
>>> list(subsets([1, 2], 2, repetition=True))
[(1, 1), (1, 2), (2, 2)]
If you ask for more items than are in the set you get the empty set unless
you allow repetitions:
>>> list(subsets([0, 1], 3, repetition=False))
[]
>>> list(subsets([0, 1], 3, repetition=True))
[(0, 0, 0), (0, 0, 1), (0, 1, 1), (1, 1, 1)]
"""
if k is None:
for k in range(len(seq) + 1):
yield from subsets(seq, k, repetition)
else:
if not repetition:
yield from combinations(seq, k)
else:
yield from combinations_with_replacement(seq, k)
def filter_symbols(iterator, exclude):
"""
Only yield elements from `iterator` that do not occur in `exclude`.
Parameters
==========
iterator : iterable
iterator to take elements from
exclude : iterable
elements to exclude
Returns
=======
iterator : iterator
filtered iterator
"""
exclude = set(exclude)
for s in iterator:
if s not in exclude:
yield s
def numbered_symbols(prefix='x', cls=None, start=0, exclude=[], *args, **assumptions):
"""
Generate an infinite stream of Symbols consisting of a prefix and
increasing subscripts provided that they do not occur in ``exclude``.
Parameters
==========
prefix : str, optional
The prefix to use. By default, this function will generate symbols of
the form "x0", "x1", etc.
cls : class, optional
The class to use. By default, it uses ``Symbol``, but you can also use ``Wild`` or ``Dummy``.
start : int, optional
The start number. By default, it is 0.
Returns
=======
sym : Symbol
The subscripted symbols.
"""
exclude = set(exclude or [])
if cls is None:
# We can't just make the default cls=Symbol because it isn't
# imported yet.
from sympy import Symbol
cls = Symbol
while True:
name = '%s%s' % (prefix, start)
s = cls(name, *args, **assumptions)
if s not in exclude:
yield s
start += 1
def capture(func):
"""Return the printed output of func().
``func`` should be a function without arguments that produces output with
print statements.
>>> from sympy.utilities.iterables import capture
>>> from sympy import pprint
>>> from sympy.abc import x
>>> def foo():
... print('hello world!')
...
>>> 'hello' in capture(foo) # foo, not foo()
True
>>> capture(lambda: pprint(2/x))
'2\\n-\\nx\\n'
"""
from sympy.core.compatibility import StringIO
import sys
stdout = sys.stdout
sys.stdout = file = StringIO()
try:
func()
finally:
sys.stdout = stdout
return file.getvalue()
def sift(seq, keyfunc, binary=False):
"""
Sift the sequence, ``seq`` according to ``keyfunc``.
Returns
=======
When ``binary`` is ``False`` (default), the output is a dictionary
where elements of ``seq`` are stored in a list keyed to the value
of keyfunc for that element. If ``binary`` is True then a tuple
with lists ``T`` and ``F`` are returned where ``T`` is a list
containing elements of seq for which ``keyfunc`` was ``True`` and
``F`` containing those elements for which ``keyfunc`` was ``False``;
a ValueError is raised if the ``keyfunc`` is not binary.
Examples
========
>>> from sympy.utilities import sift
>>> from sympy.abc import x, y
>>> from sympy import sqrt, exp, pi, Tuple
>>> sift(range(5), lambda x: x % 2)
{0: [0, 2, 4], 1: [1, 3]}
sift() returns a defaultdict() object, so any key that has no matches will
give [].
>>> sift([x], lambda x: x.is_commutative)
{True: [x]}
>>> _[False]
[]
Sometimes you will not know how many keys you will get:
>>> sift([sqrt(x), exp(x), (y**x)**2],
... lambda x: x.as_base_exp()[0])
{E: [exp(x)], x: [sqrt(x)], y: [y**(2*x)]}
Sometimes you expect the results to be binary; the
results can be unpacked by setting ``binary`` to True:
>>> sift(range(4), lambda x: x % 2, binary=True)
([1, 3], [0, 2])
>>> sift(Tuple(1, pi), lambda x: x.is_rational, binary=True)
([1], [pi])
A ValueError is raised if the predicate was not actually binary
(which is a good test for the logic where sifting is used and
binary results were expected):
>>> unknown = exp(1) - pi # the rationality of this is unknown
>>> args = Tuple(1, pi, unknown)
>>> sift(args, lambda x: x.is_rational, binary=True)
Traceback (most recent call last):
...
ValueError: keyfunc gave non-binary output
The non-binary sifting shows that there were 3 keys generated:
>>> set(sift(args, lambda x: x.is_rational).keys())
{None, False, True}
If you need to sort the sifted items it might be better to use
``ordered`` which can economically apply multiple sort keys
to a sequence while sorting.
See Also
========
ordered
"""
if not binary:
m = defaultdict(list)
for i in seq:
m[keyfunc(i)].append(i)
return m
sift = F, T = [], []
for i in seq:
try:
sift[keyfunc(i)].append(i)
except (IndexError, TypeError):
raise ValueError('keyfunc gave non-binary output')
return T, F
def take(iter, n):
"""Return ``n`` items from ``iter`` iterator. """
return [ value for _, value in zip(range(n), iter) ]
def dict_merge(*dicts):
"""Merge dictionaries into a single dictionary. """
merged = {}
for dict in dicts:
merged.update(dict)
return merged
def common_prefix(*seqs):
"""Return the subsequence that is a common start of sequences in ``seqs``.
>>> from sympy.utilities.iterables import common_prefix
>>> common_prefix(list(range(3)))
[0, 1, 2]
>>> common_prefix(list(range(3)), list(range(4)))
[0, 1, 2]
>>> common_prefix([1, 2, 3], [1, 2, 5])
[1, 2]
>>> common_prefix([1, 2, 3], [1, 3, 5])
[1]
"""
if any(not s for s in seqs):
return []
elif len(seqs) == 1:
return seqs[0]
i = 0
for i in range(min(len(s) for s in seqs)):
if not all(seqs[j][i] == seqs[0][i] for j in range(len(seqs))):
break
else:
i += 1
return seqs[0][:i]
def common_suffix(*seqs):
"""Return the subsequence that is a common ending of sequences in ``seqs``.
>>> from sympy.utilities.iterables import common_suffix
>>> common_suffix(list(range(3)))
[0, 1, 2]
>>> common_suffix(list(range(3)), list(range(4)))
[]
>>> common_suffix([1, 2, 3], [9, 2, 3])
[2, 3]
>>> common_suffix([1, 2, 3], [9, 7, 3])
[3]
"""
if any(not s for s in seqs):
return []
elif len(seqs) == 1:
return seqs[0]
i = 0
for i in range(-1, -min(len(s) for s in seqs) - 1, -1):
if not all(seqs[j][i] == seqs[0][i] for j in range(len(seqs))):
break
else:
i -= 1
if i == -1:
return []
else:
return seqs[0][i + 1:]
def prefixes(seq):
"""
Generate all prefixes of a sequence.
Examples
========
>>> from sympy.utilities.iterables import prefixes
>>> list(prefixes([1,2,3,4]))
[[1], [1, 2], [1, 2, 3], [1, 2, 3, 4]]
"""
n = len(seq)
for i in range(n):
yield seq[:i + 1]
def postfixes(seq):
"""
Generate all postfixes of a sequence.
Examples
========
>>> from sympy.utilities.iterables import postfixes
>>> list(postfixes([1,2,3,4]))
[[4], [3, 4], [2, 3, 4], [1, 2, 3, 4]]
"""
n = len(seq)
for i in range(n):
yield seq[n - i - 1:]
def topological_sort(graph, key=None):
r"""
Topological sort of graph's vertices.
Parameters
==========
graph : tuple[list, list[tuple[T, T]]
A tuple consisting of a list of vertices and a list of edges of
a graph to be sorted topologically.
key : callable[T] (optional)
Ordering key for vertices on the same level. By default the natural
(e.g. lexicographic) ordering is used (in this case the base type
must implement ordering relations).
Examples
========
Consider a graph::
+---+ +---+ +---+
| 7 |\ | 5 | | 3 |
+---+ \ +---+ +---+
| _\___/ ____ _/ |
| / \___/ \ / |
V V V V |
+----+ +---+ |
| 11 | | 8 | |
+----+ +---+ |
| | \____ ___/ _ |
| \ \ / / \ |
V \ V V / V V
+---+ \ +---+ | +----+
| 2 | | | 9 | | | 10 |
+---+ | +---+ | +----+
\________/
where vertices are integers. This graph can be encoded using
elementary Python's data structures as follows::
>>> V = [2, 3, 5, 7, 8, 9, 10, 11]
>>> E = [(7, 11), (7, 8), (5, 11), (3, 8), (3, 10),
... (11, 2), (11, 9), (11, 10), (8, 9)]
To compute a topological sort for graph ``(V, E)`` issue::
>>> from sympy.utilities.iterables import topological_sort
>>> topological_sort((V, E))
[3, 5, 7, 8, 11, 2, 9, 10]
If specific tie breaking approach is needed, use ``key`` parameter::
>>> topological_sort((V, E), key=lambda v: -v)
[7, 5, 11, 3, 10, 8, 9, 2]
Only acyclic graphs can be sorted. If the input graph has a cycle,
then ``ValueError`` will be raised::
>>> topological_sort((V, E + [(10, 7)]))
Traceback (most recent call last):
...
ValueError: cycle detected
References
==========
.. [1] https://en.wikipedia.org/wiki/Topological_sorting
"""
V, E = graph
L = []
S = set(V)
E = list(E)
for v, u in E:
S.discard(u)
if key is None:
key = lambda value: value
S = sorted(S, key=key, reverse=True)
while S:
node = S.pop()
L.append(node)
for u, v in list(E):
if u == node:
E.remove((u, v))
for _u, _v in E:
if v == _v:
break
else:
kv = key(v)
for i, s in enumerate(S):
ks = key(s)
if kv > ks:
S.insert(i, v)
break
else:
S.append(v)
if E:
raise ValueError("cycle detected")
else:
return L
def strongly_connected_components(G):
r"""
Strongly connected components of a directed graph in reverse topological
order.
Parameters
==========
graph : tuple[list, list[tuple[T, T]]
A tuple consisting of a list of vertices and a list of edges of
a graph whose strongly connected components are to be found.
Examples
========
Consider a directed graph (in dot notation)::
digraph {
A -> B
A -> C
B -> C
C -> B
B -> D
}
where vertices are the letters A, B, C and D. This graph can be encoded
using Python's elementary data structures as follows::
>>> V = ['A', 'B', 'C', 'D']
>>> E = [('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B'), ('B', 'D')]
The strongly connected components of this graph can be computed as
>>> from sympy.utilities.iterables import strongly_connected_components
>>> strongly_connected_components((V, E))
[['D'], ['B', 'C'], ['A']]
This also gives the components in reverse topological order.
Since the subgraph containing B and C has a cycle they must be together in
a strongly connected component. A and D are connected to the rest of the
graph but not in a cyclic manner so they appear as their own strongly
connected components.
Notes
=====
The vertices of the graph must be hashable for the data structures used.
If the vertices are unhashable replace them with integer indices.
This function uses Tarjan's algorithm to compute the strongly connected
components in `O(|V|+|E|)` (linear) time.
References
==========
.. [1] https://en.wikipedia.org/wiki/Strongly_connected_component
.. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
See Also
========
sympy.utilities.iterables.connected_components
"""
# Map from a vertex to its neighbours
V, E = G
Gmap = {vi: [] for vi in V}
for v1, v2 in E:
Gmap[v1].append(v2)
# Non-recursive Tarjan's algorithm:
lowlink = {}
indices = {}
stack = OrderedDict()
callstack = []
components = []
nomore = object()
def start(v):
index = len(stack)
indices[v] = lowlink[v] = index
stack[v] = None
callstack.append((v, iter(Gmap[v])))
def finish(v1):
# Finished a component?
if lowlink[v1] == indices[v1]:
component = [stack.popitem()[0]]
while component[-1] is not v1:
component.append(stack.popitem()[0])
components.append(component[::-1])
v2, _ = callstack.pop()
if callstack:
v1, _ = callstack[-1]
lowlink[v1] = min(lowlink[v1], lowlink[v2])
for v in V:
if v in indices:
continue
start(v)
while callstack:
v1, it1 = callstack[-1]
v2 = next(it1, nomore)
# Finished children of v1?
if v2 is nomore:
finish(v1)
# Recurse on v2
elif v2 not in indices:
start(v2)
elif v2 in stack:
lowlink[v1] = min(lowlink[v1], indices[v2])
# Reverse topological sort order:
return components
def connected_components(G):
r"""
Connected components of an undirected graph or weakly connected components
of a directed graph.
Parameters
==========
graph : tuple[list, list[tuple[T, T]]
A tuple consisting of a list of vertices and a list of edges of
a graph whose connected components are to be found.
Examples
========
Given an undirected graph::
graph {
A -- B
C -- D
}
We can find the connected components using this function if we include
each edge in both directions::
>>> from sympy.utilities.iterables import connected_components
>>> V = ['A', 'B', 'C', 'D']
>>> E = [('A', 'B'), ('B', 'A'), ('C', 'D'), ('D', 'C')]
>>> connected_components((V, E))
[['A', 'B'], ['C', 'D']]
The weakly connected components of a directed graph can found the same
way.
Notes
=====
The vertices of the graph must be hashable for the data structures used.
If the vertices are unhashable replace them with integer indices.
This function uses Tarjan's algorithm to compute the connected components
in `O(|V|+|E|)` (linear) time.
References
==========
.. [1] https://en.wikipedia.org/wiki/Connected_component_(graph_theory)
.. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
See Also
========
sympy.utilities.iterables.strongly_connected_components
"""
# Duplicate edges both ways so that the graph is effectively undirected
# and return the strongly connected components:
V, E = G
E_undirected = []
for v1, v2 in E:
E_undirected.extend([(v1, v2), (v2, v1)])
return strongly_connected_components((V, E_undirected))
def rotate_left(x, y):
"""
Left rotates a list x by the number of steps specified
in y.
Examples
========
>>> from sympy.utilities.iterables import rotate_left
>>> a = [0, 1, 2]
>>> rotate_left(a, 1)
[1, 2, 0]
"""
if len(x) == 0:
return []
y = y % len(x)
return x[y:] + x[:y]
def rotate_right(x, y):
"""
Right rotates a list x by the number of steps specified
in y.
Examples
========
>>> from sympy.utilities.iterables import rotate_right
>>> a = [0, 1, 2]
>>> rotate_right(a, 1)
[2, 0, 1]
"""
if len(x) == 0:
return []
y = len(x) - y % len(x)
return x[y:] + x[:y]
def least_rotation(x):
'''
Returns the number of steps of left rotation required to
obtain lexicographically minimal string/list/tuple, etc.
Examples
========
>>> from sympy.utilities.iterables import least_rotation, rotate_left
>>> a = [3, 1, 5, 1, 2]
>>> least_rotation(a)
3
>>> rotate_left(a, _)
[1, 2, 3, 1, 5]
References
==========
.. [1] https://en.wikipedia.org/wiki/Lexicographically_minimal_string_rotation
'''
S = x + x # Concatenate string to it self to avoid modular arithmetic
f = [-1] * len(S) # Failure function
k = 0 # Least rotation of string found so far
for j in range(1,len(S)):
sj = S[j]
i = f[j-k-1]
while i != -1 and sj != S[k+i+1]:
if sj < S[k+i+1]:
k = j-i-1
i = f[i]
if sj != S[k+i+1]:
if sj < S[k]:
k = j
f[j-k] = -1
else:
f[j-k] = i+1
return k
def multiset_combinations(m, n, g=None):
"""
Return the unique combinations of size ``n`` from multiset ``m``.
Examples
========
>>> from sympy.utilities.iterables import multiset_combinations
>>> from itertools import combinations
>>> [''.join(i) for i in multiset_combinations('baby', 3)]
['abb', 'aby', 'bby']
>>> def count(f, s): return len(list(f(s, 3)))
The number of combinations depends on the number of letters; the
number of unique combinations depends on how the letters are
repeated.
>>> s1 = 'abracadabra'
>>> s2 = 'banana tree'
>>> count(combinations, s1), count(multiset_combinations, s1)
(165, 23)
>>> count(combinations, s2), count(multiset_combinations, s2)
(165, 54)
"""
if g is None:
if type(m) is dict:
if n > sum(m.values()):
return
g = [[k, m[k]] for k in ordered(m)]
else:
m = list(m)
if n > len(m):
return
try:
m = multiset(m)
g = [(k, m[k]) for k in ordered(m)]
except TypeError:
m = list(ordered(m))
g = [list(i) for i in group(m, multiple=False)]
del m
if sum(v for k, v in g) < n or not n:
yield []
else:
for i, (k, v) in enumerate(g):
if v >= n:
yield [k]*n
v = n - 1
for v in range(min(n, v), 0, -1):
for j in multiset_combinations(None, n - v, g[i + 1:]):
rv = [k]*v + j
if len(rv) == n:
yield rv
def multiset_permutations(m, size=None, g=None):
"""
Return the unique permutations of multiset ``m``.
Examples
========
>>> from sympy.utilities.iterables import multiset_permutations
>>> from sympy import factorial
>>> [''.join(i) for i in multiset_permutations('aab')]
['aab', 'aba', 'baa']
>>> factorial(len('banana'))
720
>>> len(list(multiset_permutations('banana')))
60
"""
if g is None:
if type(m) is dict:
g = [[k, m[k]] for k in ordered(m)]
else:
m = list(ordered(m))
g = [list(i) for i in group(m, multiple=False)]
del m
do = [gi for gi in g if gi[1] > 0]
SUM = sum([gi[1] for gi in do])
if not do or size is not None and (size > SUM or size < 1):
if size < 1:
yield []
return
elif size == 1:
for k, v in do:
yield [k]
elif len(do) == 1:
k, v = do[0]
v = v if size is None else (size if size <= v else 0)
yield [k for i in range(v)]
elif all(v == 1 for k, v in do):
for p in permutations([k for k, v in do], size):
yield list(p)
else:
size = size if size is not None else SUM
for i, (k, v) in enumerate(do):
do[i][1] -= 1
for j in multiset_permutations(None, size - 1, do):
if j:
yield [k] + j
do[i][1] += 1
def _partition(seq, vector, m=None):
"""
Return the partition of seq as specified by the partition vector.
Examples
========
>>> from sympy.utilities.iterables import _partition
>>> _partition('abcde', [1, 0, 1, 2, 0])
[['b', 'e'], ['a', 'c'], ['d']]
Specifying the number of bins in the partition is optional:
>>> _partition('abcde', [1, 0, 1, 2, 0], 3)
[['b', 'e'], ['a', 'c'], ['d']]
The output of _set_partitions can be passed as follows:
>>> output = (3, [1, 0, 1, 2, 0])
>>> _partition('abcde', *output)
[['b', 'e'], ['a', 'c'], ['d']]
See Also
========
combinatorics.partitions.Partition.from_rgs
"""
if m is None:
m = max(vector) + 1
elif type(vector) is int: # entered as m, vector
vector, m = m, vector
p = [[] for i in range(m)]
for i, v in enumerate(vector):
p[v].append(seq[i])
return p
def _set_partitions(n):
"""Cycle through all partions of n elements, yielding the
current number of partitions, ``m``, and a mutable list, ``q``
such that element[i] is in part q[i] of the partition.
NOTE: ``q`` is modified in place and generally should not be changed
between function calls.
Examples
========
>>> from sympy.utilities.iterables import _set_partitions, _partition
>>> for m, q in _set_partitions(3):
... print('%s %s %s' % (m, q, _partition('abc', q, m)))
1 [0, 0, 0] [['a', 'b', 'c']]
2 [0, 0, 1] [['a', 'b'], ['c']]
2 [0, 1, 0] [['a', 'c'], ['b']]
2 [0, 1, 1] [['a'], ['b', 'c']]
3 [0, 1, 2] [['a'], ['b'], ['c']]
Notes
=====
This algorithm is similar to, and solves the same problem as,
Algorithm 7.2.1.5H, from volume 4A of Knuth's The Art of Computer
Programming. Knuth uses the term "restricted growth string" where
this code refers to a "partition vector". In each case, the meaning is
the same: the value in the ith element of the vector specifies to
which part the ith set element is to be assigned.
At the lowest level, this code implements an n-digit big-endian
counter (stored in the array q) which is incremented (with carries) to
get the next partition in the sequence. A special twist is that a
digit is constrained to be at most one greater than the maximum of all
the digits to the left of it. The array p maintains this maximum, so
that the code can efficiently decide when a digit can be incremented
in place or whether it needs to be reset to 0 and trigger a carry to
the next digit. The enumeration starts with all the digits 0 (which
corresponds to all the set elements being assigned to the same 0th
part), and ends with 0123...n, which corresponds to each set element
being assigned to a different, singleton, part.
This routine was rewritten to use 0-based lists while trying to
preserve the beauty and efficiency of the original algorithm.
References
==========
.. [1] Nijenhuis, Albert and Wilf, Herbert. (1978) Combinatorial Algorithms,
2nd Ed, p 91, algorithm "nexequ". Available online from
https://www.math.upenn.edu/~wilf/website/CombAlgDownld.html (viewed
November 17, 2012).
"""
p = [0]*n
q = [0]*n
nc = 1
yield nc, q
while nc != n:
m = n
while 1:
m -= 1
i = q[m]
if p[i] != 1:
break
q[m] = 0
i += 1
q[m] = i
m += 1
nc += m - n
p[0] += n - m
if i == nc:
p[nc] = 0
nc += 1
p[i - 1] -= 1
p[i] += 1
yield nc, q
def multiset_partitions(multiset, m=None):
"""
Return unique partitions of the given multiset (in list form).
If ``m`` is None, all multisets will be returned, otherwise only
partitions with ``m`` parts will be returned.
If ``multiset`` is an integer, a range [0, 1, ..., multiset - 1]
will be supplied.
Examples
========
>>> from sympy.utilities.iterables import multiset_partitions
>>> list(multiset_partitions([1, 2, 3, 4], 2))
[[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]],
[[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
[[1], [2, 3, 4]]]
>>> list(multiset_partitions([1, 2, 3, 4], 1))
[[[1, 2, 3, 4]]]
Only unique partitions are returned and these will be returned in a
canonical order regardless of the order of the input:
>>> a = [1, 2, 2, 1]
>>> ans = list(multiset_partitions(a, 2))
>>> a.sort()
>>> list(multiset_partitions(a, 2)) == ans
True
>>> a = range(3, 1, -1)
>>> (list(multiset_partitions(a)) ==
... list(multiset_partitions(sorted(a))))
True
If m is omitted then all partitions will be returned:
>>> list(multiset_partitions([1, 1, 2]))
[[[1, 1, 2]], [[1, 1], [2]], [[1, 2], [1]], [[1], [1], [2]]]
>>> list(multiset_partitions([1]*3))
[[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
Counting
========
The number of partitions of a set is given by the bell number:
>>> from sympy import bell
>>> len(list(multiset_partitions(5))) == bell(5) == 52
True
The number of partitions of length k from a set of size n is given by the
Stirling Number of the 2nd kind:
>>> from sympy.functions.combinatorial.numbers import stirling
>>> stirling(5, 2) == len(list(multiset_partitions(5, 2))) == 15
True
These comments on counting apply to *sets*, not multisets.
Notes
=====
When all the elements are the same in the multiset, the order
of the returned partitions is determined by the ``partitions``
routine. If one is counting partitions then it is better to use
the ``nT`` function.
See Also
========
partitions
sympy.combinatorics.partitions.Partition
sympy.combinatorics.partitions.IntegerPartition
sympy.functions.combinatorial.numbers.nT
"""
# This function looks at the supplied input and dispatches to
# several special-case routines as they apply.
if type(multiset) is int:
n = multiset
if m and m > n:
return
multiset = list(range(n))
if m == 1:
yield [multiset[:]]
return
# If m is not None, it can sometimes be faster to use
# MultisetPartitionTraverser.enum_range() even for inputs
# which are sets. Since the _set_partitions code is quite
# fast, this is only advantageous when the overall set
# partitions outnumber those with the desired number of parts
# by a large factor. (At least 60.) Such a switch is not
# currently implemented.
for nc, q in _set_partitions(n):
if m is None or nc == m:
rv = [[] for i in range(nc)]
for i in range(n):
rv[q[i]].append(multiset[i])
yield rv
return
if len(multiset) == 1 and isinstance(multiset, str):
multiset = [multiset]
if not has_variety(multiset):
# Only one component, repeated n times. The resulting
# partitions correspond to partitions of integer n.
n = len(multiset)
if m and m > n:
return
if m == 1:
yield [multiset[:]]
return
x = multiset[:1]
for size, p in partitions(n, m, size=True):
if m is None or size == m:
rv = []
for k in sorted(p):
rv.extend([x*k]*p[k])
yield rv
else:
multiset = list(ordered(multiset))
n = len(multiset)
if m and m > n:
return
if m == 1:
yield [multiset[:]]
return
# Split the information of the multiset into two lists -
# one of the elements themselves, and one (of the same length)
# giving the number of repeats for the corresponding element.
elements, multiplicities = zip(*group(multiset, False))
if len(elements) < len(multiset):
# General case - multiset with more than one distinct element
# and at least one element repeated more than once.
if m:
mpt = MultisetPartitionTraverser()
for state in mpt.enum_range(multiplicities, m-1, m):
yield list_visitor(state, elements)
else:
for state in multiset_partitions_taocp(multiplicities):
yield list_visitor(state, elements)
else:
# Set partitions case - no repeated elements. Pretty much
# same as int argument case above, with same possible, but
# currently unimplemented optimization for some cases when
# m is not None
for nc, q in _set_partitions(n):
if m is None or nc == m:
rv = [[] for i in range(nc)]
for i in range(n):
rv[q[i]].append(i)
yield [[multiset[j] for j in i] for i in rv]
def partitions(n, m=None, k=None, size=False):
"""Generate all partitions of positive integer, n.
Parameters
==========
m : integer (default gives partitions of all sizes)
limits number of parts in partition (mnemonic: m, maximum parts)
k : integer (default gives partitions number from 1 through n)
limits the numbers that are kept in the partition (mnemonic: k, keys)
size : bool (default False, only partition is returned)
when ``True`` then (M, P) is returned where M is the sum of the
multiplicities and P is the generated partition.
Each partition is represented as a dictionary, mapping an integer
to the number of copies of that integer in the partition. For example,
the first partition of 4 returned is {4: 1}, "4: one of them".
Examples
========
>>> from sympy.utilities.iterables import partitions
The numbers appearing in the partition (the key of the returned dict)
are limited with k:
>>> for p in partitions(6, k=2): # doctest: +SKIP
... print(p)
{2: 3}
{1: 2, 2: 2}
{1: 4, 2: 1}
{1: 6}
The maximum number of parts in the partition (the sum of the values in
the returned dict) are limited with m (default value, None, gives
partitions from 1 through n):
>>> for p in partitions(6, m=2): # doctest: +SKIP
... print(p)
...
{6: 1}
{1: 1, 5: 1}
{2: 1, 4: 1}
{3: 2}
References
==========
.. [1] modified from Tim Peter's version to allow for k and m values:
http://code.activestate.com/recipes/218332-generator-for-integer-partitions/
See Also
========
sympy.combinatorics.partitions.Partition
sympy.combinatorics.partitions.IntegerPartition
"""
if (n <= 0 or
m is not None and m < 1 or
k is not None and k < 1 or
m and k and m*k < n):
# the empty set is the only way to handle these inputs
# and returning {} to represent it is consistent with
# the counting convention, e.g. nT(0) == 1.
if size:
yield 0, {}
else:
yield {}
return
if m is None:
m = n
else:
m = min(m, n)
if n == 0:
if size:
yield 1, {0: 1}
else:
yield {0: 1}
return
k = min(k or n, n)
n, m, k = as_int(n), as_int(m), as_int(k)
q, r = divmod(n, k)
ms = {k: q}
keys = [k] # ms.keys(), from largest to smallest
if r:
ms[r] = 1
keys.append(r)
room = m - q - bool(r)
if size:
yield sum(ms.values()), ms.copy()
else:
yield ms.copy()
while keys != [1]:
# Reuse any 1's.
if keys[-1] == 1:
del keys[-1]
reuse = ms.pop(1)
room += reuse
else:
reuse = 0
while 1:
# Let i be the smallest key larger than 1. Reuse one
# instance of i.
i = keys[-1]
newcount = ms[i] = ms[i] - 1
reuse += i
if newcount == 0:
del keys[-1], ms[i]
room += 1
# Break the remainder into pieces of size i-1.
i -= 1
q, r = divmod(reuse, i)
need = q + bool(r)
if need > room:
if not keys:
return
continue
ms[i] = q
keys.append(i)
if r:
ms[r] = 1
keys.append(r)
break
room -= need
if size:
yield sum(ms.values()), ms.copy()
else:
yield ms.copy()
def ordered_partitions(n, m=None, sort=True):
"""Generates ordered partitions of integer ``n``.
Parameters
==========
m : integer (default None)
The default value gives partitions of all sizes else only
those with size m. In addition, if ``m`` is not None then
partitions are generated *in place* (see examples).
sort : bool (default True)
Controls whether partitions are
returned in sorted order when ``m`` is not None; when False,
the partitions are returned as fast as possible with elements
sorted, but when m|n the partitions will not be in
ascending lexicographical order.
Examples
========
>>> from sympy.utilities.iterables import ordered_partitions
All partitions of 5 in ascending lexicographical:
>>> for p in ordered_partitions(5):
... print(p)
[1, 1, 1, 1, 1]
[1, 1, 1, 2]
[1, 1, 3]
[1, 2, 2]
[1, 4]
[2, 3]
[5]
Only partitions of 5 with two parts:
>>> for p in ordered_partitions(5, 2):
... print(p)
[1, 4]
[2, 3]
When ``m`` is given, a given list objects will be used more than
once for speed reasons so you will not see the correct partitions
unless you make a copy of each as it is generated:
>>> [p for p in ordered_partitions(7, 3)]
[[1, 1, 1], [1, 1, 1], [1, 1, 1], [2, 2, 2]]
>>> [list(p) for p in ordered_partitions(7, 3)]
[[1, 1, 5], [1, 2, 4], [1, 3, 3], [2, 2, 3]]
When ``n`` is a multiple of ``m``, the elements are still sorted
but the partitions themselves will be *unordered* if sort is False;
the default is to return them in ascending lexicographical order.
>>> for p in ordered_partitions(6, 2):
... print(p)
[1, 5]
[2, 4]
[3, 3]
But if speed is more important than ordering, sort can be set to
False:
>>> for p in ordered_partitions(6, 2, sort=False):
... print(p)
[1, 5]
[3, 3]
[2, 4]
References
==========
.. [1] Generating Integer Partitions, [online],
Available: https://jeromekelleher.net/generating-integer-partitions.html
.. [2] Jerome Kelleher and Barry O'Sullivan, "Generating All
Partitions: A Comparison Of Two Encodings", [online],
Available: https://arxiv.org/pdf/0909.2331v2.pdf
"""
if n < 1 or m is not None and m < 1:
# the empty set is the only way to handle these inputs
# and returning {} to represent it is consistent with
# the counting convention, e.g. nT(0) == 1.
yield []
return
if m is None:
# The list `a`'s leading elements contain the partition in which
# y is the biggest element and x is either the same as y or the
# 2nd largest element; v and w are adjacent element indices
# to which x and y are being assigned, respectively.
a = [1]*n
y = -1
v = n
while v > 0:
v -= 1
x = a[v] + 1
while y >= 2 * x:
a[v] = x
y -= x
v += 1
w = v + 1
while x <= y:
a[v] = x
a[w] = y
yield a[:w + 1]
x += 1
y -= 1
a[v] = x + y
y = a[v] - 1
yield a[:w]
elif m == 1:
yield [n]
elif n == m:
yield [1]*n
else:
# recursively generate partitions of size m
for b in range(1, n//m + 1):
a = [b]*m
x = n - b*m
if not x:
if sort:
yield a
elif not sort and x <= m:
for ax in ordered_partitions(x, sort=False):
mi = len(ax)
a[-mi:] = [i + b for i in ax]
yield a
a[-mi:] = [b]*mi
else:
for mi in range(1, m):
for ax in ordered_partitions(x, mi, sort=True):
a[-mi:] = [i + b for i in ax]
yield a
a[-mi:] = [b]*mi
def binary_partitions(n):
"""
Generates the binary partition of n.
A binary partition consists only of numbers that are
powers of two. Each step reduces a `2^{k+1}` to `2^k` and
`2^k`. Thus 16 is converted to 8 and 8.
Examples
========
>>> from sympy.utilities.iterables import binary_partitions
>>> for i in binary_partitions(5):
... print(i)
...
[4, 1]
[2, 2, 1]
[2, 1, 1, 1]
[1, 1, 1, 1, 1]
References
==========
.. [1] TAOCP 4, section 7.2.1.5, problem 64
"""
from math import ceil, log
pow = int(2**(ceil(log(n, 2))))
sum = 0
partition = []
while pow:
if sum + pow <= n:
partition.append(pow)
sum += pow
pow >>= 1
last_num = len(partition) - 1 - (n & 1)
while last_num >= 0:
yield partition
if partition[last_num] == 2:
partition[last_num] = 1
partition.append(1)
last_num -= 1
continue
partition.append(1)
partition[last_num] >>= 1
x = partition[last_num + 1] = partition[last_num]
last_num += 1
while x > 1:
if x <= len(partition) - last_num - 1:
del partition[-x + 1:]
last_num += 1
partition[last_num] = x
else:
x >>= 1
yield [1]*n
def has_dups(seq):
"""Return True if there are any duplicate elements in ``seq``.
Examples
========
>>> from sympy.utilities.iterables import has_dups
>>> from sympy import Dict, Set
>>> has_dups((1, 2, 1))
True
>>> has_dups(range(3))
False
>>> all(has_dups(c) is False for c in (set(), Set(), dict(), Dict()))
True
"""
from sympy.core.containers import Dict
from sympy.sets.sets import Set
if isinstance(seq, (dict, set, Dict, Set)):
return False
uniq = set()
return any(True for s in seq if s in uniq or uniq.add(s))
def has_variety(seq):
"""Return True if there are any different elements in ``seq``.
Examples
========
>>> from sympy.utilities.iterables import has_variety
>>> has_variety((1, 2, 1))
True
>>> has_variety((1, 1, 1))
False
"""
for i, s in enumerate(seq):
if i == 0:
sentinel = s
else:
if s != sentinel:
return True
return False
def uniq(seq, result=None):
"""
Yield unique elements from ``seq`` as an iterator. The second
parameter ``result`` is used internally; it is not necessary
to pass anything for this.
Note: changing the sequence during iteration will raise a
RuntimeError if the size of the sequence is known; if you pass
an iterator and advance the iterator you will change the
output of this routine but there will be no warning.
Examples
========
>>> from sympy.utilities.iterables import uniq
>>> dat = [1, 4, 1, 5, 4, 2, 1, 2]
>>> type(uniq(dat)) in (list, tuple)
False
>>> list(uniq(dat))
[1, 4, 5, 2]
>>> list(uniq(x for x in dat))
[1, 4, 5, 2]
>>> list(uniq([[1], [2, 1], [1]]))
[[1], [2, 1]]
"""
try:
n = len(seq)
except TypeError:
n = None
def check():
# check that size of seq did not change during iteration;
# if n == None the object won't support size changing, e.g.
# an iterator can't be changed
if n is not None and len(seq) != n:
raise RuntimeError('sequence changed size during iteration')
try:
seen = set()
result = result or []
for i, s in enumerate(seq):
if not (s in seen or seen.add(s)):
yield s
check()
except TypeError:
if s not in result:
yield s
check()
result.append(s)
if hasattr(seq, '__getitem__'):
yield from uniq(seq[i + 1:], result)
else:
yield from uniq(seq, result)
def generate_bell(n):
"""Return permutations of [0, 1, ..., n - 1] such that each permutation
differs from the last by the exchange of a single pair of neighbors.
The ``n!`` permutations are returned as an iterator. In order to obtain
the next permutation from a random starting permutation, use the
``next_trotterjohnson`` method of the Permutation class (which generates
the same sequence in a different manner).
Examples
========
>>> from itertools import permutations
>>> from sympy.utilities.iterables import generate_bell
>>> from sympy import zeros, Matrix
This is the sort of permutation used in the ringing of physical bells,
and does not produce permutations in lexicographical order. Rather, the
permutations differ from each other by exactly one inversion, and the
position at which the swapping occurs varies periodically in a simple
fashion. Consider the first few permutations of 4 elements generated
by ``permutations`` and ``generate_bell``:
>>> list(permutations(range(4)))[:5]
[(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1), (0, 3, 1, 2)]
>>> list(generate_bell(4))[:5]
[(0, 1, 2, 3), (0, 1, 3, 2), (0, 3, 1, 2), (3, 0, 1, 2), (3, 0, 2, 1)]
Notice how the 2nd and 3rd lexicographical permutations have 3 elements
out of place whereas each "bell" permutation always has only two
elements out of place relative to the previous permutation (and so the
signature (+/-1) of a permutation is opposite of the signature of the
previous permutation).
How the position of inversion varies across the elements can be seen
by tracing out where the largest number appears in the permutations:
>>> m = zeros(4, 24)
>>> for i, p in enumerate(generate_bell(4)):
... m[:, i] = Matrix([j - 3 for j in list(p)]) # make largest zero
>>> m.print_nonzero('X')
[XXX XXXXXX XXXXXX XXX]
[XX XX XXXX XX XXXX XX XX]
[X XXXX XX XXXX XX XXXX X]
[ XXXXXX XXXXXX XXXXXX ]
See Also
========
sympy.combinatorics.permutations.Permutation.next_trotterjohnson
References
==========
.. [1] https://en.wikipedia.org/wiki/Method_ringing
.. [2] https://stackoverflow.com/questions/4856615/recursive-permutation/4857018
.. [3] http://programminggeeks.com/bell-algorithm-for-permutation/
.. [4] https://en.wikipedia.org/wiki/Steinhaus%E2%80%93Johnson%E2%80%93Trotter_algorithm
.. [5] Generating involutions, derangements, and relatives by ECO
Vincent Vajnovszki, DMTCS vol 1 issue 12, 2010
"""
n = as_int(n)
if n < 1:
raise ValueError('n must be a positive integer')
if n == 1:
yield (0,)
elif n == 2:
yield (0, 1)
yield (1, 0)
elif n == 3:
yield from [(0, 1, 2), (0, 2, 1), (2, 0, 1), (2, 1, 0), (1, 2, 0), (1, 0, 2)]
else:
m = n - 1
op = [0] + [-1]*m
l = list(range(n))
while True:
yield tuple(l)
# find biggest element with op
big = None, -1 # idx, value
for i in range(n):
if op[i] and l[i] > big[1]:
big = i, l[i]
i, _ = big
if i is None:
break # there are no ops left
# swap it with neighbor in the indicated direction
j = i + op[i]
l[i], l[j] = l[j], l[i]
op[i], op[j] = op[j], op[i]
# if it landed at the end or if the neighbor in the same
# direction is bigger then turn off op
if j == 0 or j == m or l[j + op[j]] > l[j]:
op[j] = 0
# any element bigger to the left gets +1 op
for i in range(j):
if l[i] > l[j]:
op[i] = 1
# any element bigger to the right gets -1 op
for i in range(j + 1, n):
if l[i] > l[j]:
op[i] = -1
def generate_involutions(n):
"""
Generates involutions.
An involution is a permutation that when multiplied
by itself equals the identity permutation. In this
implementation the involutions are generated using
Fixed Points.
Alternatively, an involution can be considered as
a permutation that does not contain any cycles with
a length that is greater than two.
Examples
========
>>> from sympy.utilities.iterables import generate_involutions
>>> list(generate_involutions(3))
[(0, 1, 2), (0, 2, 1), (1, 0, 2), (2, 1, 0)]
>>> len(list(generate_involutions(4)))
10
References
==========
.. [1] http://mathworld.wolfram.com/PermutationInvolution.html
"""
idx = list(range(n))
for p in permutations(idx):
for i in idx:
if p[p[i]] != i:
break
else:
yield p
def generate_derangements(perm):
"""
Routine to generate unique derangements.
TODO: This will be rewritten to use the
ECO operator approach once the permutations
branch is in master.
Examples
========
>>> from sympy.utilities.iterables import generate_derangements
>>> list(generate_derangements([0, 1, 2]))
[[1, 2, 0], [2, 0, 1]]
>>> list(generate_derangements([0, 1, 2, 3]))
[[1, 0, 3, 2], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 3, 1], \
[2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 2, 0, 1], \
[3, 2, 1, 0]]
>>> list(generate_derangements([0, 1, 1]))
[]
See Also
========
sympy.functions.combinatorial.factorials.subfactorial
"""
for p in multiset_permutations(perm):
if not any(i == j for i, j in zip(perm, p)):
yield p
def necklaces(n, k, free=False):
"""
A routine to generate necklaces that may (free=True) or may not
(free=False) be turned over to be viewed. The "necklaces" returned
are comprised of ``n`` integers (beads) with ``k`` different
values (colors). Only unique necklaces are returned.
Examples
========
>>> from sympy.utilities.iterables import necklaces, bracelets
>>> def show(s, i):
... return ''.join(s[j] for j in i)
The "unrestricted necklace" is sometimes also referred to as a
"bracelet" (an object that can be turned over, a sequence that can
be reversed) and the term "necklace" is used to imply a sequence
that cannot be reversed. So ACB == ABC for a bracelet (rotate and
reverse) while the two are different for a necklace since rotation
alone cannot make the two sequences the same.
(mnemonic: Bracelets can be viewed Backwards, but Not Necklaces.)
>>> B = [show('ABC', i) for i in bracelets(3, 3)]
>>> N = [show('ABC', i) for i in necklaces(3, 3)]
>>> set(N) - set(B)
{'ACB'}
>>> list(necklaces(4, 2))
[(0, 0, 0, 0), (0, 0, 0, 1), (0, 0, 1, 1),
(0, 1, 0, 1), (0, 1, 1, 1), (1, 1, 1, 1)]
>>> [show('.o', i) for i in bracelets(4, 2)]
['....', '...o', '..oo', '.o.o', '.ooo', 'oooo']
References
==========
.. [1] http://mathworld.wolfram.com/Necklace.html
"""
return uniq(minlex(i, directed=not free) for i in
variations(list(range(k)), n, repetition=True))
def bracelets(n, k):
"""Wrapper to necklaces to return a free (unrestricted) necklace."""
return necklaces(n, k, free=True)
def generate_oriented_forest(n):
"""
This algorithm generates oriented forests.
An oriented graph is a directed graph having no symmetric pair of directed
edges. A forest is an acyclic graph, i.e., it has no cycles. A forest can
also be described as a disjoint union of trees, which are graphs in which
any two vertices are connected by exactly one simple path.
Examples
========
>>> from sympy.utilities.iterables import generate_oriented_forest
>>> list(generate_oriented_forest(4))
[[0, 1, 2, 3], [0, 1, 2, 2], [0, 1, 2, 1], [0, 1, 2, 0], \
[0, 1, 1, 1], [0, 1, 1, 0], [0, 1, 0, 1], [0, 1, 0, 0], [0, 0, 0, 0]]
References
==========
.. [1] T. Beyer and S.M. Hedetniemi: constant time generation of
rooted trees, SIAM J. Computing Vol. 9, No. 4, November 1980
.. [2] https://stackoverflow.com/questions/1633833/oriented-forest-taocp-algorithm-in-python
"""
P = list(range(-1, n))
while True:
yield P[1:]
if P[n] > 0:
P[n] = P[P[n]]
else:
for p in range(n - 1, 0, -1):
if P[p] != 0:
target = P[p] - 1
for q in range(p - 1, 0, -1):
if P[q] == target:
break
offset = p - q
for i in range(p, n + 1):
P[i] = P[i - offset]
break
else:
break
def minlex(seq, directed=True, is_set=False, small=None):
"""
Return a tuple representing the rotation of the sequence in which
the lexically smallest elements appear first, e.g. `cba ->acb`.
If ``directed`` is False then the smaller of the sequence and the
reversed sequence is returned, e.g. `cba -> abc`.
For more efficient processing, ``is_set`` can be set to True if there
are no duplicates in the sequence.
If the smallest element is known at the time of calling, it can be
passed as ``small`` and the calculation of the smallest element will
be omitted.
Examples
========
>>> from sympy.combinatorics.polyhedron import minlex
>>> minlex((1, 2, 0))
(0, 1, 2)
>>> minlex((1, 0, 2))
(0, 2, 1)
>>> minlex((1, 0, 2), directed=False)
(0, 1, 2)
>>> minlex('11010011000', directed=True)
'00011010011'
>>> minlex('11010011000', directed=False)
'00011001011'
"""
is_str = isinstance(seq, str)
seq = list(seq)
if small is None:
small = min(seq, key=default_sort_key)
if is_set:
i = seq.index(small)
if not directed:
n = len(seq)
p = (i + 1) % n
m = (i - 1) % n
if default_sort_key(seq[p]) > default_sort_key(seq[m]):
seq = list(reversed(seq))
i = n - i - 1
if i:
seq = rotate_left(seq, i)
best = seq
else:
count = seq.count(small)
if count == 1 and directed:
best = rotate_left(seq, seq.index(small))
else:
# if not directed, and not a set, we can't just
# pass this off to minlex with is_set True since
# peeking at the neighbor may not be sufficient to
# make the decision so we continue...
best = seq
for i in range(count):
seq = rotate_left(seq, seq.index(small, count != 1))
if seq < best:
best = seq
# it's cheaper to rotate now rather than search
# again for these in reversed order so we test
# the reverse now
if not directed:
seq = rotate_left(seq, 1)
seq = list(reversed(seq))
if seq < best:
best = seq
seq = list(reversed(seq))
seq = rotate_right(seq, 1)
# common return
if is_str:
return ''.join(best)
return tuple(best)
def runs(seq, op=gt):
"""Group the sequence into lists in which successive elements
all compare the same with the comparison operator, ``op``:
op(seq[i + 1], seq[i]) is True from all elements in a run.
Examples
========
>>> from sympy.utilities.iterables import runs
>>> from operator import ge
>>> runs([0, 1, 2, 2, 1, 4, 3, 2, 2])
[[0, 1, 2], [2], [1, 4], [3], [2], [2]]
>>> runs([0, 1, 2, 2, 1, 4, 3, 2, 2], op=ge)
[[0, 1, 2, 2], [1, 4], [3], [2, 2]]
"""
cycles = []
seq = iter(seq)
try:
run = [next(seq)]
except StopIteration:
return []
while True:
try:
ei = next(seq)
except StopIteration:
break
if op(ei, run[-1]):
run.append(ei)
continue
else:
cycles.append(run)
run = [ei]
if run:
cycles.append(run)
return cycles
def kbins(l, k, ordered=None):
"""
Return sequence ``l`` partitioned into ``k`` bins.
Examples
========
>>> from __future__ import print_function
The default is to give the items in the same order, but grouped
into k partitions without any reordering:
>>> from sympy.utilities.iterables import kbins
>>> for p in kbins(list(range(5)), 2):
... print(p)
...
[[0], [1, 2, 3, 4]]
[[0, 1], [2, 3, 4]]
[[0, 1, 2], [3, 4]]
[[0, 1, 2, 3], [4]]
The ``ordered`` flag is either None (to give the simple partition
of the elements) or is a 2 digit integer indicating whether the order of
the bins and the order of the items in the bins matters. Given::
A = [[0], [1, 2]]
B = [[1, 2], [0]]
C = [[2, 1], [0]]
D = [[0], [2, 1]]
the following values for ``ordered`` have the shown meanings::
00 means A == B == C == D
01 means A == B
10 means A == D
11 means A == A
>>> for ordered_flag in [None, 0, 1, 10, 11]:
... print('ordered = %s' % ordered_flag)
... for p in kbins(list(range(3)), 2, ordered=ordered_flag):
... print(' %s' % p)
...
ordered = None
[[0], [1, 2]]
[[0, 1], [2]]
ordered = 0
[[0, 1], [2]]
[[0, 2], [1]]
[[0], [1, 2]]
ordered = 1
[[0], [1, 2]]
[[0], [2, 1]]
[[1], [0, 2]]
[[1], [2, 0]]
[[2], [0, 1]]
[[2], [1, 0]]
ordered = 10
[[0, 1], [2]]
[[2], [0, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[0], [1, 2]]
[[1, 2], [0]]
ordered = 11
[[0], [1, 2]]
[[0, 1], [2]]
[[0], [2, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[1, 0], [2]]
[[1], [2, 0]]
[[1, 2], [0]]
[[2], [0, 1]]
[[2, 0], [1]]
[[2], [1, 0]]
[[2, 1], [0]]
See Also
========
partitions, multiset_partitions
"""
def partition(lista, bins):
# EnricoGiampieri's partition generator from
# https://stackoverflow.com/questions/13131491/
# partition-n-items-into-k-bins-in-python-lazily
if len(lista) == 1 or bins == 1:
yield [lista]
elif len(lista) > 1 and bins > 1:
for i in range(1, len(lista)):
for part in partition(lista[i:], bins - 1):
if len([lista[:i]] + part) == bins:
yield [lista[:i]] + part
if ordered is None:
yield from partition(l, k)
elif ordered == 11:
for pl in multiset_permutations(l):
pl = list(pl)
yield from partition(pl, k)
elif ordered == 00:
yield from multiset_partitions(l, k)
elif ordered == 10:
for p in multiset_partitions(l, k):
for perm in permutations(p):
yield list(perm)
elif ordered == 1:
for kgot, p in partitions(len(l), k, size=True):
if kgot != k:
continue
for li in multiset_permutations(l):
rv = []
i = j = 0
li = list(li)
for size, multiplicity in sorted(p.items()):
for m in range(multiplicity):
j = i + size
rv.append(li[i: j])
i = j
yield rv
else:
raise ValueError(
'ordered must be one of 00, 01, 10 or 11, not %s' % ordered)
def permute_signs(t):
"""Return iterator in which the signs of non-zero elements
of t are permuted.
Examples
========
>>> from sympy.utilities.iterables import permute_signs
>>> list(permute_signs((0, 1, 2)))
[(0, 1, 2), (0, -1, 2), (0, 1, -2), (0, -1, -2)]
"""
for signs in cartes(*[(1, -1)]*(len(t) - t.count(0))):
signs = list(signs)
yield type(t)([i*signs.pop() if i else i for i in t])
def signed_permutations(t):
"""Return iterator in which the signs of non-zero elements
of t and the order of the elements are permuted.
Examples
========
>>> from sympy.utilities.iterables import signed_permutations
>>> list(signed_permutations((0, 1, 2)))
[(0, 1, 2), (0, -1, 2), (0, 1, -2), (0, -1, -2), (0, 2, 1),
(0, -2, 1), (0, 2, -1), (0, -2, -1), (1, 0, 2), (-1, 0, 2),
(1, 0, -2), (-1, 0, -2), (1, 2, 0), (-1, 2, 0), (1, -2, 0),
(-1, -2, 0), (2, 0, 1), (-2, 0, 1), (2, 0, -1), (-2, 0, -1),
(2, 1, 0), (-2, 1, 0), (2, -1, 0), (-2, -1, 0)]
"""
return (type(t)(i) for j in permutations(t)
for i in permute_signs(j))
def rotations(s, dir=1):
"""Return a generator giving the items in s as list where
each subsequent list has the items rotated to the left (default)
or right (dir=-1) relative to the previous list.
Examples
========
>>> from sympy.utilities.iterables import rotations
>>> list(rotations([1,2,3]))
[[1, 2, 3], [2, 3, 1], [3, 1, 2]]
>>> list(rotations([1,2,3], -1))
[[1, 2, 3], [3, 1, 2], [2, 3, 1]]
"""
seq = list(s)
for i in range(len(seq)):
yield seq
seq = rotate_left(seq, dir)
def roundrobin(*iterables):
"""roundrobin recipe taken from itertools documentation:
https://docs.python.org/2/library/itertools.html#recipes
roundrobin('ABC', 'D', 'EF') --> A D E B F C
Recipe credited to George Sakkis
"""
import itertools
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
pending = len(iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending))
|
5359449d9fa55e2073811b9613b9650fed082e07901a1adcae6351f1c89be9c9 | """
This is a shim file to provide backwards compatibility (cxxcode.py was renamed
to cxx.py in SymPy 1.7).
"""
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
feature="importing from sympy.printing.cxxcode",
useinstead="Import from sympy.printing.cxx",
issue=20256,
deprecated_since_version="1.7").warn()
from .cxx import (cxxcode, reserved, CXX98CodePrinter, # noqa:F401
CXX11CodePrinter, CXX17CodePrinter, cxx_code_printers)
|
4a4d988f93d7e42793ea8df31f1f99542d6425303b610333c17a632709547ae7 | """A module providing information about the necessity of brackets"""
from sympy.core.function import _coeff_isneg
# Default precedence values for some basic types
PRECEDENCE = {
"Lambda": 1,
"Xor": 10,
"Or": 20,
"And": 30,
"Relational": 35,
"Add": 40,
"Mul": 50,
"Pow": 60,
"Func": 70,
"Not": 100,
"Atom": 1000,
"BitwiseOr": 36,
"BitwiseXor": 37,
"BitwiseAnd": 38
}
# A dictionary assigning precedence values to certain classes. These values are
# treated like they were inherited, so not every single class has to be named
# here.
# Do not use this with printers other than StrPrinter
PRECEDENCE_VALUES = {
"Equivalent": PRECEDENCE["Xor"],
"Xor": PRECEDENCE["Xor"],
"Implies": PRECEDENCE["Xor"],
"Or": PRECEDENCE["Or"],
"And": PRECEDENCE["And"],
"Add": PRECEDENCE["Add"],
"Pow": PRECEDENCE["Pow"],
"Relational": PRECEDENCE["Relational"],
"Sub": PRECEDENCE["Add"],
"Not": PRECEDENCE["Not"],
"Function" : PRECEDENCE["Func"],
"NegativeInfinity": PRECEDENCE["Add"],
"MatAdd": PRECEDENCE["Add"],
"MatPow": PRECEDENCE["Pow"],
"MatrixSolve": PRECEDENCE["Mul"],
"TensAdd": PRECEDENCE["Add"],
# As soon as `TensMul` is a subclass of `Mul`, remove this:
"TensMul": PRECEDENCE["Mul"],
"HadamardProduct": PRECEDENCE["Mul"],
"HadamardPower": PRECEDENCE["Pow"],
"KroneckerProduct": PRECEDENCE["Mul"],
"Equality": PRECEDENCE["Mul"],
"Unequality": PRECEDENCE["Mul"],
}
# Sometimes it's not enough to assign a fixed precedence value to a
# class. Then a function can be inserted in this dictionary that takes
# an instance of this class as argument and returns the appropriate
# precedence value.
# Precedence functions
def precedence_Mul(item):
if _coeff_isneg(item):
return PRECEDENCE["Add"]
return PRECEDENCE["Mul"]
def precedence_Rational(item):
if item.p < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Mul"]
def precedence_Integer(item):
if item.p < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Atom"]
def precedence_Float(item):
if item < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Atom"]
def precedence_PolyElement(item):
if item.is_generator:
return PRECEDENCE["Atom"]
elif item.is_ground:
return precedence(item.coeff(1))
elif item.is_term:
return PRECEDENCE["Mul"]
else:
return PRECEDENCE["Add"]
def precedence_FracElement(item):
if item.denom == 1:
return precedence_PolyElement(item.numer)
else:
return PRECEDENCE["Mul"]
def precedence_UnevaluatedExpr(item):
return precedence(item.args[0])
PRECEDENCE_FUNCTIONS = {
"Integer": precedence_Integer,
"Mul": precedence_Mul,
"Rational": precedence_Rational,
"Float": precedence_Float,
"PolyElement": precedence_PolyElement,
"FracElement": precedence_FracElement,
"UnevaluatedExpr": precedence_UnevaluatedExpr,
}
def precedence(item):
"""Returns the precedence of a given object.
This is the precedence for StrPrinter.
"""
if hasattr(item, "precedence"):
return item.precedence
try:
mro = item.__class__.__mro__
except AttributeError:
return PRECEDENCE["Atom"]
for i in mro:
n = i.__name__
if n in PRECEDENCE_FUNCTIONS:
return PRECEDENCE_FUNCTIONS[n](item)
elif n in PRECEDENCE_VALUES:
return PRECEDENCE_VALUES[n]
return PRECEDENCE["Atom"]
PRECEDENCE_TRADITIONAL = PRECEDENCE.copy()
PRECEDENCE_TRADITIONAL['Integral'] = PRECEDENCE["Mul"]
PRECEDENCE_TRADITIONAL['Sum'] = PRECEDENCE["Mul"]
PRECEDENCE_TRADITIONAL['Product'] = PRECEDENCE["Mul"]
PRECEDENCE_TRADITIONAL['Limit'] = PRECEDENCE["Mul"]
PRECEDENCE_TRADITIONAL['Derivative'] = PRECEDENCE["Mul"]
PRECEDENCE_TRADITIONAL['TensorProduct'] = PRECEDENCE["Mul"]
PRECEDENCE_TRADITIONAL['Transpose'] = PRECEDENCE["Pow"]
PRECEDENCE_TRADITIONAL['Adjoint'] = PRECEDENCE["Pow"]
PRECEDENCE_TRADITIONAL['Dot'] = PRECEDENCE["Mul"] - 1
PRECEDENCE_TRADITIONAL['Cross'] = PRECEDENCE["Mul"] - 1
PRECEDENCE_TRADITIONAL['Gradient'] = PRECEDENCE["Mul"] - 1
PRECEDENCE_TRADITIONAL['Divergence'] = PRECEDENCE["Mul"] - 1
PRECEDENCE_TRADITIONAL['Curl'] = PRECEDENCE["Mul"] - 1
PRECEDENCE_TRADITIONAL['Laplacian'] = PRECEDENCE["Mul"] - 1
PRECEDENCE_TRADITIONAL['Union'] = PRECEDENCE['Xor']
PRECEDENCE_TRADITIONAL['Intersection'] = PRECEDENCE['Xor']
PRECEDENCE_TRADITIONAL['Complement'] = PRECEDENCE['Xor']
PRECEDENCE_TRADITIONAL['SymmetricDifference'] = PRECEDENCE['Xor']
PRECEDENCE_TRADITIONAL['ProductSet'] = PRECEDENCE['Xor']
def precedence_traditional(item):
"""Returns the precedence of a given object according to the
traditional rules of mathematics.
This is the precedence for the LaTeX and pretty printer.
"""
# Integral, Sum, Product, Limit have the precedence of Mul in LaTeX,
# the precedence of Atom for other printers:
from sympy.core.expr import UnevaluatedExpr
if isinstance(item, UnevaluatedExpr):
return precedence_traditional(item.args[0])
n = item.__class__.__name__
if n in PRECEDENCE_TRADITIONAL:
return PRECEDENCE_TRADITIONAL[n]
return precedence(item)
|
a6d9f063126be68c772c6e130836d1220c6c0a291a56f1335816aa2292135177 | from sympy.printing.mathml import mathml
from sympy.utilities.mathml import c2p
import tempfile
import subprocess
def print_gtk(x, start_viewer=True):
"""Print to Gtkmathview, a gtk widget capable of rendering MathML.
Needs libgtkmathview-bin"""
with tempfile.NamedTemporaryFile('w') as file:
file.write(c2p(mathml(x), simple=True))
file.flush()
if start_viewer:
subprocess.check_call(('mathmlviewer', file.name))
|
5bc85f19902188d42d8dc16766e92de828c8b07056502212bde859aadb9effcc | """
Python code printers
This module contains python code printers for plain python as well as NumPy & SciPy enabled code.
"""
from collections import defaultdict
from itertools import chain
from sympy.core import S
from .precedence import precedence
from .codeprinter import CodePrinter
_kw_py2and3 = {
'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif',
'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while',
'with', 'yield', 'None' # 'None' is actually not in Python 2's keyword.kwlist
}
_kw_only_py2 = {'exec', 'print'}
_kw_only_py3 = {'False', 'nonlocal', 'True'}
_known_functions = {
'Abs': 'abs',
}
_known_functions_math = {
'acos': 'acos',
'acosh': 'acosh',
'asin': 'asin',
'asinh': 'asinh',
'atan': 'atan',
'atan2': 'atan2',
'atanh': 'atanh',
'ceiling': 'ceil',
'cos': 'cos',
'cosh': 'cosh',
'erf': 'erf',
'erfc': 'erfc',
'exp': 'exp',
'expm1': 'expm1',
'factorial': 'factorial',
'floor': 'floor',
'gamma': 'gamma',
'hypot': 'hypot',
'loggamma': 'lgamma',
'log': 'log',
'ln': 'log',
'log10': 'log10',
'log1p': 'log1p',
'log2': 'log2',
'sin': 'sin',
'sinh': 'sinh',
'Sqrt': 'sqrt',
'tan': 'tan',
'tanh': 'tanh'
} # Not used from ``math``: [copysign isclose isfinite isinf isnan ldexp frexp pow modf
# radians trunc fmod fsum gcd degrees fabs]
_known_constants_math = {
'Exp1': 'e',
'Pi': 'pi',
'E': 'e'
# Only in python >= 3.5:
# 'Infinity': 'inf',
# 'NaN': 'nan'
}
def _print_known_func(self, expr):
known = self.known_functions[expr.__class__.__name__]
return '{name}({args})'.format(name=self._module_format(known),
args=', '.join(map(lambda arg: self._print(arg), expr.args)))
def _print_known_const(self, expr):
known = self.known_constants[expr.__class__.__name__]
return self._module_format(known)
class AbstractPythonCodePrinter(CodePrinter):
printmethod = "_pythoncode"
language = "Python"
reserved_words = _kw_py2and3.union(_kw_only_py3)
modules = None # initialized to a set in __init__
tab = ' '
_kf = dict(chain(
_known_functions.items(),
[(k, 'math.' + v) for k, v in _known_functions_math.items()]
))
_kc = {k: 'math.'+v for k, v in _known_constants_math.items()}
_operators = {'and': 'and', 'or': 'or', 'not': 'not'}
_default_settings = dict(
CodePrinter._default_settings,
user_functions={},
precision=17,
inline=True,
fully_qualified_modules=True,
contract=False,
standard='python3',
)
def __init__(self, settings=None):
super().__init__(settings)
# Python standard handler
std = self._settings['standard']
if std is None:
import sys
std = 'python{}'.format(sys.version_info.major)
if std not in ('python2', 'python3'):
raise ValueError('Unrecognized python standard : {}'.format(std))
self.standard = std
self.module_imports = defaultdict(set)
# Known functions and constants handler
self.known_functions = dict(self._kf, **(settings or {}).get(
'user_functions', {}))
self.known_constants = dict(self._kc, **(settings or {}).get(
'user_constants', {}))
def _declare_number_const(self, name, value):
return "%s = %s" % (name, value)
def _module_format(self, fqn, register=True):
parts = fqn.split('.')
if register and len(parts) > 1:
self.module_imports['.'.join(parts[:-1])].add(parts[-1])
if self._settings['fully_qualified_modules']:
return fqn
else:
return fqn.split('(')[0].split('[')[0].split('.')[-1]
def _format_code(self, lines):
return lines
def _get_statement(self, codestring):
return "{}".format(codestring)
def _get_comment(self, text):
return " # {}".format(text)
def _expand_fold_binary_op(self, op, args):
"""
This method expands a fold on binary operations.
``functools.reduce`` is an example of a folded operation.
For example, the expression
`A + B + C + D`
is folded into
`((A + B) + C) + D`
"""
if len(args) == 1:
return self._print(args[0])
else:
return "%s(%s, %s)" % (
self._module_format(op),
self._expand_fold_binary_op(op, args[:-1]),
self._print(args[-1]),
)
def _expand_reduce_binary_op(self, op, args):
"""
This method expands a reductin on binary operations.
Notice: this is NOT the same as ``functools.reduce``.
For example, the expression
`A + B + C + D`
is reduced into:
`(A + B) + (C + D)`
"""
if len(args) == 1:
return self._print(args[0])
else:
N = len(args)
Nhalf = N // 2
return "%s(%s, %s)" % (
self._module_format(op),
self._expand_reduce_binary_op(args[:Nhalf]),
self._expand_reduce_binary_op(args[Nhalf:]),
)
def _get_einsum_string(self, subranks, contraction_indices):
letters = self._get_letter_generator_for_einsum()
contraction_string = ""
counter = 0
d = {j: min(i) for i in contraction_indices for j in i}
indices = []
for rank_arg in subranks:
lindices = []
for i in range(rank_arg):
if counter in d:
lindices.append(d[counter])
else:
lindices.append(counter)
counter += 1
indices.append(lindices)
mapping = {}
letters_free = []
letters_dum = []
for i in indices:
for j in i:
if j not in mapping:
l = next(letters)
mapping[j] = l
else:
l = mapping[j]
contraction_string += l
if j in d:
if l not in letters_dum:
letters_dum.append(l)
else:
letters_free.append(l)
contraction_string += ","
contraction_string = contraction_string[:-1]
return contraction_string, letters_free, letters_dum
def _print_NaN(self, expr):
return "float('nan')"
def _print_Infinity(self, expr):
return "float('inf')"
def _print_NegativeInfinity(self, expr):
return "float('-inf')"
def _print_ComplexInfinity(self, expr):
return self._print_NaN(expr)
def _print_Mod(self, expr):
PREC = precedence(expr)
return ('{} % {}'.format(*map(lambda x: self.parenthesize(x, PREC), expr.args)))
def _print_Piecewise(self, expr):
result = []
i = 0
for arg in expr.args:
e = arg.expr
c = arg.cond
if i == 0:
result.append('(')
result.append('(')
result.append(self._print(e))
result.append(')')
result.append(' if ')
result.append(self._print(c))
result.append(' else ')
i += 1
result = result[:-1]
if result[-1] == 'True':
result = result[:-2]
result.append(')')
else:
result.append(' else None)')
return ''.join(result)
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '({lhs} {op} {rhs})'.format(op=expr.rel_op, lhs=lhs, rhs=rhs)
return super()._print_Relational(expr)
def _print_ITE(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
return self._print(expr.rewrite(Piecewise))
def _print_Sum(self, expr):
loops = (
'for {i} in range({a}, {b}+1)'.format(
i=self._print(i),
a=self._print(a),
b=self._print(b))
for i, a, b in expr.limits)
return '(builtins.sum({function} {loops}))'.format(
function=self._print(expr.function),
loops=' '.join(loops))
def _print_ImaginaryUnit(self, expr):
return '1j'
def _print_KroneckerDelta(self, expr):
a, b = expr.args
return '(1 if {a} == {b} else 0)'.format(
a = self._print(a),
b = self._print(b)
)
def _print_MatrixBase(self, expr):
name = expr.__class__.__name__
func = self.known_functions.get(name, name)
return "%s(%s)" % (func, self._print(expr.tolist()))
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
lambda self, expr: self._print_MatrixBase(expr)
def _indent_codestring(self, codestring):
return '\n'.join([self.tab + line for line in codestring.split('\n')])
def _print_FunctionDefinition(self, fd):
body = '\n'.join(map(lambda arg: self._print(arg), fd.body))
return "def {name}({parameters}):\n{body}".format(
name=self._print(fd.name),
parameters=', '.join([self._print(var.symbol) for var in fd.parameters]),
body=self._indent_codestring(body)
)
def _print_While(self, whl):
body = '\n'.join(map(lambda arg: self._print(arg), whl.body))
return "while {cond}:\n{body}".format(
cond=self._print(whl.condition),
body=self._indent_codestring(body)
)
def _print_Declaration(self, decl):
return '%s = %s' % (
self._print(decl.variable.symbol),
self._print(decl.variable.value)
)
def _print_Return(self, ret):
arg, = ret.args
return 'return %s' % self._print(arg)
def _print_Print(self, prnt):
print_args = ', '.join(map(lambda arg: self._print(arg), prnt.print_args))
if prnt.format_string != None: # Must be '!= None', cannot be 'is not None'
print_args = '{} % ({})'.format(
self._print(prnt.format_string), print_args)
if prnt.file != None: # Must be '!= None', cannot be 'is not None'
print_args += ', file=%s' % self._print(prnt.file)
if self.standard == 'python2':
return 'print %s' % print_args
return 'print(%s)' % print_args
def _print_Stream(self, strm):
if str(strm.name) == 'stdout':
return self._module_format('sys.stdout')
elif str(strm.name) == 'stderr':
return self._module_format('sys.stderr')
else:
return self._print(strm.name)
def _print_NoneToken(self, arg):
return 'None'
def _hprint_Pow(self, expr, rational=False, sqrt='math.sqrt'):
"""Printing helper function for ``Pow``
Notes
=====
This only preprocesses the ``sqrt`` as math formatter
Examples
========
>>> from sympy.functions import sqrt
>>> from sympy.printing.pycode import PythonCodePrinter
>>> from sympy.abc import x
Python code printer automatically looks up ``math.sqrt``.
>>> printer = PythonCodePrinter({'standard':'python3'})
>>> printer._hprint_Pow(sqrt(x), rational=True)
'x**(1/2)'
>>> printer._hprint_Pow(sqrt(x), rational=False)
'math.sqrt(x)'
>>> printer._hprint_Pow(1/sqrt(x), rational=True)
'x**(-1/2)'
>>> printer._hprint_Pow(1/sqrt(x), rational=False)
'1/math.sqrt(x)'
Using sqrt from numpy or mpmath
>>> printer._hprint_Pow(sqrt(x), sqrt='numpy.sqrt')
'numpy.sqrt(x)'
>>> printer._hprint_Pow(sqrt(x), sqrt='mpmath.sqrt')
'mpmath.sqrt(x)'
See Also
========
sympy.printing.str.StrPrinter._print_Pow
"""
PREC = precedence(expr)
if expr.exp == S.Half and not rational:
func = self._module_format(sqrt)
arg = self._print(expr.base)
return '{func}({arg})'.format(func=func, arg=arg)
if expr.is_commutative:
if -expr.exp is S.Half and not rational:
func = self._module_format(sqrt)
num = self._print(S.One)
arg = self._print(expr.base)
return "{num}/{func}({arg})".format(
num=num, func=func, arg=arg)
base_str = self.parenthesize(expr.base, PREC, strict=False)
exp_str = self.parenthesize(expr.exp, PREC, strict=False)
return "{}**{}".format(base_str, exp_str)
class PythonCodePrinter(AbstractPythonCodePrinter):
def _print_sign(self, e):
return '(0.0 if {e} == 0 else {f}(1, {e}))'.format(
f=self._module_format('math.copysign'), e=self._print(e.args[0]))
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_Indexed(self, expr):
base = expr.args[0]
index = expr.args[1:]
return "{}[{}]".format(str(base), ", ".join([self._print(ind) for ind in index]))
def _print_Pow(self, expr, rational=False):
return self._hprint_Pow(expr, rational=rational)
def _print_Rational(self, expr):
if self.standard == 'python2':
return '{}./{}.'.format(expr.p, expr.q)
return '{}/{}'.format(expr.p, expr.q)
def _print_Half(self, expr):
return self._print_Rational(expr)
def _print_frac(self, expr):
from sympy import Mod
return self._print_Mod(Mod(expr.args[0], 1))
_print_lowergamma = CodePrinter._print_not_supported
_print_uppergamma = CodePrinter._print_not_supported
_print_fresnelc = CodePrinter._print_not_supported
_print_fresnels = CodePrinter._print_not_supported
for k in PythonCodePrinter._kf:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_math:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_const)
def pycode(expr, **settings):
""" Converts an expr to a string of Python code
Parameters
==========
expr : Expr
A SymPy expression.
fully_qualified_modules : bool
Whether or not to write out full module names of functions
(``math.sin`` vs. ``sin``). default: ``True``.
standard : str or None, optional
If 'python2', Python 2 sematics will be used.
If 'python3', Python 3 sematics will be used.
If None, the standard will be automatically detected.
Default is 'python3'. And this parameter may be removed in the
future.
Examples
========
>>> from sympy import tan, Symbol
>>> from sympy.printing.pycode import pycode
>>> pycode(tan(Symbol('x')) + 1)
'math.tan(x) + 1'
"""
return PythonCodePrinter(settings).doprint(expr)
_not_in_mpmath = 'log1p log2'.split()
_in_mpmath = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_mpmath]
_known_functions_mpmath = dict(_in_mpmath, **{
'beta': 'beta',
'frac': 'frac',
'fresnelc': 'fresnelc',
'fresnels': 'fresnels',
'sign': 'sign',
'loggamma': 'loggamma',
})
_known_constants_mpmath = {
'Exp1': 'e',
'Pi': 'pi',
'GoldenRatio': 'phi',
'EulerGamma': 'euler',
'Catalan': 'catalan',
'NaN': 'nan',
'Infinity': 'inf',
'NegativeInfinity': 'ninf'
}
def _unpack_integral_limits(integral_expr):
""" helper function for _print_Integral that
- accepts an Integral expression
- returns a tuple of
- a list variables of integration
- a list of tuples of the upper and lower limits of integration
"""
integration_vars = []
limits = []
for integration_range in integral_expr.limits:
if len(integration_range) == 3:
integration_var, lower_limit, upper_limit = integration_range
else:
raise NotImplementedError("Only definite integrals are supported")
integration_vars.append(integration_var)
limits.append((lower_limit, upper_limit))
return integration_vars, limits
class MpmathPrinter(PythonCodePrinter):
"""
Lambda printer for mpmath which maintains precision for floats
"""
printmethod = "_mpmathcode"
language = "Python with mpmath"
_kf = dict(chain(
_known_functions.items(),
[(k, 'mpmath.' + v) for k, v in _known_functions_mpmath.items()]
))
_kc = {k: 'mpmath.'+v for k, v in _known_constants_mpmath.items()}
def _print_Float(self, e):
# XXX: This does not handle setting mpmath.mp.dps. It is assumed that
# the caller of the lambdified function will have set it to sufficient
# precision to match the Floats in the expression.
# Remove 'mpz' if gmpy is installed.
args = str(tuple(map(int, e._mpf_)))
return '{func}({args})'.format(func=self._module_format('mpmath.mpf'), args=args)
def _print_Rational(self, e):
return "{func}({p})/{func}({q})".format(
func=self._module_format('mpmath.mpf'),
q=self._print(e.q),
p=self._print(e.p)
)
def _print_Half(self, e):
return self._print_Rational(e)
def _print_uppergamma(self, e):
return "{}({}, {}, {})".format(
self._module_format('mpmath.gammainc'),
self._print(e.args[0]),
self._print(e.args[1]),
self._module_format('mpmath.inf'))
def _print_lowergamma(self, e):
return "{}({}, 0, {})".format(
self._module_format('mpmath.gammainc'),
self._print(e.args[0]),
self._print(e.args[1]))
def _print_log2(self, e):
return '{0}({1})/{0}(2)'.format(
self._module_format('mpmath.log'), self._print(e.args[0]))
def _print_log1p(self, e):
return '{}({}+1)'.format(
self._module_format('mpmath.log'), self._print(e.args[0]))
def _print_Pow(self, expr, rational=False):
return self._hprint_Pow(expr, rational=rational, sqrt='mpmath.sqrt')
def _print_Integral(self, e):
integration_vars, limits = _unpack_integral_limits(e)
return "{}(lambda {}: {}, {})".format(
self._module_format("mpmath.quad"),
", ".join(map(self._print, integration_vars)),
self._print(e.args[0]),
", ".join("(%s, %s)" % tuple(map(self._print, l)) for l in limits))
for k in MpmathPrinter._kf:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_mpmath:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_const)
_not_in_numpy = 'erf erfc factorial gamma loggamma'.split()
_in_numpy = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_numpy]
_known_functions_numpy = dict(_in_numpy, **{
'acos': 'arccos',
'acosh': 'arccosh',
'asin': 'arcsin',
'asinh': 'arcsinh',
'atan': 'arctan',
'atan2': 'arctan2',
'atanh': 'arctanh',
'exp2': 'exp2',
'sign': 'sign',
'logaddexp': 'logaddexp',
'logaddexp2': 'logaddexp2',
})
_known_constants_numpy = {
'Exp1': 'e',
'Pi': 'pi',
'EulerGamma': 'euler_gamma',
'NaN': 'nan',
'Infinity': 'PINF',
'NegativeInfinity': 'NINF'
}
class NumPyPrinter(PythonCodePrinter):
"""
Numpy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
printmethod = "_numpycode"
language = "Python with NumPy"
_kf = dict(chain(
PythonCodePrinter._kf.items(),
[(k, 'numpy.' + v) for k, v in _known_functions_numpy.items()]
))
_kc = {k: 'numpy.'+v for k, v in _known_constants_numpy.items()}
def _print_seq(self, seq):
"General sequence printer: converts to tuple"
# Print tuples here instead of lists because numba supports
# tuples in nopython mode.
delimiter=', '
return '({},)'.format(delimiter.join(self._print(item) for item in seq))
def _print_MatMul(self, expr):
"Matrix multiplication printer"
if expr.as_coeff_matrices()[0] is not S.One:
expr_list = expr.as_coeff_matrices()[1]+[(expr.as_coeff_matrices()[0])]
return '({})'.format(').dot('.join(self._print(i) for i in expr_list))
return '({})'.format(').dot('.join(self._print(i) for i in expr.args))
def _print_MatPow(self, expr):
"Matrix power printer"
return '{}({}, {})'.format(self._module_format('numpy.linalg.matrix_power'),
self._print(expr.args[0]), self._print(expr.args[1]))
def _print_Inverse(self, expr):
"Matrix inverse printer"
return '{}({})'.format(self._module_format('numpy.linalg.inv'),
self._print(expr.args[0]))
def _print_DotProduct(self, expr):
# DotProduct allows any shape order, but numpy.dot does matrix
# multiplication, so we have to make sure it gets 1 x n by n x 1.
arg1, arg2 = expr.args
if arg1.shape[0] != 1:
arg1 = arg1.T
if arg2.shape[1] != 1:
arg2 = arg2.T
return "%s(%s, %s)" % (self._module_format('numpy.dot'),
self._print(arg1),
self._print(arg2))
def _print_MatrixSolve(self, expr):
return "%s(%s, %s)" % (self._module_format('numpy.linalg.solve'),
self._print(expr.matrix),
self._print(expr.vector))
def _print_ZeroMatrix(self, expr):
return '{}({})'.format(self._module_format('numpy.zeros'),
self._print(expr.shape))
def _print_OneMatrix(self, expr):
return '{}({})'.format(self._module_format('numpy.ones'),
self._print(expr.shape))
def _print_FunctionMatrix(self, expr):
from sympy.core.function import Lambda
from sympy.abc import i, j
lamda = expr.lamda
if not isinstance(lamda, Lambda):
lamda = Lambda((i, j), lamda(i, j))
return '{}(lambda {}: {}, {})'.format(self._module_format('numpy.fromfunction'),
', '.join(self._print(arg) for arg in lamda.args[0]),
self._print(lamda.args[1]), self._print(expr.shape))
def _print_HadamardProduct(self, expr):
func = self._module_format('numpy.multiply')
return ''.join('{}({}, '.format(func, self._print(arg)) \
for arg in expr.args[:-1]) + "{}{}".format(self._print(expr.args[-1]),
')' * (len(expr.args) - 1))
def _print_KroneckerProduct(self, expr):
func = self._module_format('numpy.kron')
return ''.join('{}({}, '.format(func, self._print(arg)) \
for arg in expr.args[:-1]) + "{}{}".format(self._print(expr.args[-1]),
')' * (len(expr.args) - 1))
def _print_Adjoint(self, expr):
return '{}({}({}))'.format(
self._module_format('numpy.conjugate'),
self._module_format('numpy.transpose'),
self._print(expr.args[0]))
def _print_DiagonalOf(self, expr):
vect = '{}({})'.format(
self._module_format('numpy.diag'),
self._print(expr.arg))
return '{}({}, (-1, 1))'.format(
self._module_format('numpy.reshape'), vect)
def _print_DiagMatrix(self, expr):
return '{}({})'.format(self._module_format('numpy.diagflat'),
self._print(expr.args[0]))
def _print_DiagonalMatrix(self, expr):
return '{}({}, {}({}, {}))'.format(self._module_format('numpy.multiply'),
self._print(expr.arg), self._module_format('numpy.eye'),
self._print(expr.shape[0]), self._print(expr.shape[1]))
def _print_Piecewise(self, expr):
"Piecewise function printer"
exprs = '[{}]'.format(','.join(self._print(arg.expr) for arg in expr.args))
conds = '[{}]'.format(','.join(self._print(arg.cond) for arg in expr.args))
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
return '{}({}, {}, default={})'.format(
self._module_format('numpy.select'), conds, exprs,
self._print(S.NaN))
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=self._module_format('numpy.'+op[expr.rel_op]),
lhs=lhs, rhs=rhs)
return super()._print_Relational(expr)
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{}.reduce(({}))'.format(self._module_format('numpy.logical_and'), ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{}.reduce(({}))'.format(self._module_format('numpy.logical_or'), ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{}({})'.format(self._module_format('numpy.logical_not'), ','.join(self._print(i) for i in expr.args))
def _print_Pow(self, expr, rational=False):
# XXX Workaround for negative integer power error
from sympy.core.power import Pow
if expr.exp.is_integer and expr.exp.is_negative:
expr = Pow(expr.base, expr.exp.evalf(), evaluate=False)
return self._hprint_Pow(expr, rational=rational, sqrt='numpy.sqrt')
def _print_Min(self, expr):
return '{}(({}), axis=0)'.format(self._module_format('numpy.amin'), ','.join(self._print(i) for i in expr.args))
def _print_Max(self, expr):
return '{}(({}), axis=0)'.format(self._module_format('numpy.amax'), ','.join(self._print(i) for i in expr.args))
def _print_arg(self, expr):
return "%s(%s)" % (self._module_format('numpy.angle'), self._print(expr.args[0]))
def _print_im(self, expr):
return "%s(%s)" % (self._module_format('numpy.imag'), self._print(expr.args[0]))
def _print_Mod(self, expr):
return "%s(%s)" % (self._module_format('numpy.mod'), ', '.join(
map(lambda arg: self._print(arg), expr.args)))
def _print_re(self, expr):
return "%s(%s)" % (self._module_format('numpy.real'), self._print(expr.args[0]))
def _print_sinc(self, expr):
return "%s(%s)" % (self._module_format('numpy.sinc'), self._print(expr.args[0]/S.Pi))
def _print_MatrixBase(self, expr):
func = self.known_functions.get(expr.__class__.__name__, None)
if func is None:
func = self._module_format('numpy.array')
return "%s(%s)" % (func, self._print(expr.tolist()))
def _print_Identity(self, expr):
shape = expr.shape
if all([dim.is_Integer for dim in shape]):
return "%s(%s)" % (self._module_format('numpy.eye'), self._print(expr.shape[0]))
else:
raise NotImplementedError("Symbolic matrix dimensions are not yet supported for identity matrices")
def _print_BlockMatrix(self, expr):
return '{}({})'.format(self._module_format('numpy.block'),
self._print(expr.args[0].tolist()))
def _print_CodegenArrayTensorProduct(self, expr):
array_list = [j for i, arg in enumerate(expr.args) for j in
(self._print(arg), "[%i, %i]" % (2*i, 2*i+1))]
return "%s(%s)" % (self._module_format('numpy.einsum'), ", ".join(array_list))
def _print_CodegenArrayContraction(self, expr):
from sympy.codegen.array_utils import CodegenArrayTensorProduct
base = expr.expr
contraction_indices = expr.contraction_indices
if not contraction_indices:
return self._print(base)
if isinstance(base, CodegenArrayTensorProduct):
counter = 0
d = {j: min(i) for i in contraction_indices for j in i}
indices = []
for rank_arg in base.subranks:
lindices = []
for i in range(rank_arg):
if counter in d:
lindices.append(d[counter])
else:
lindices.append(counter)
counter += 1
indices.append(lindices)
elems = ["%s, %s" % (self._print(arg), ind) for arg, ind in zip(base.args, indices)]
return "%s(%s)" % (
self._module_format('numpy.einsum'),
", ".join(elems)
)
raise NotImplementedError()
def _print_CodegenArrayDiagonal(self, expr):
diagonal_indices = list(expr.diagonal_indices)
if len(diagonal_indices) > 1:
# TODO: this should be handled in sympy.codegen.array_utils,
# possibly by creating the possibility of unfolding the
# CodegenArrayDiagonal object into nested ones. Same reasoning for
# the array contraction.
raise NotImplementedError
if len(diagonal_indices[0]) != 2:
raise NotImplementedError
return "%s(%s, 0, axis1=%s, axis2=%s)" % (
self._module_format("numpy.diagonal"),
self._print(expr.expr),
diagonal_indices[0][0],
diagonal_indices[0][1],
)
def _print_CodegenArrayPermuteDims(self, expr):
return "%s(%s, %s)" % (
self._module_format("numpy.transpose"),
self._print(expr.expr),
self._print(expr.permutation.array_form),
)
def _print_CodegenArrayElementwiseAdd(self, expr):
return self._expand_fold_binary_op('numpy.add', expr.args)
_print_lowergamma = CodePrinter._print_not_supported
_print_uppergamma = CodePrinter._print_not_supported
_print_fresnelc = CodePrinter._print_not_supported
_print_fresnels = CodePrinter._print_not_supported
for k in NumPyPrinter._kf:
setattr(NumPyPrinter, '_print_%s' % k, _print_known_func)
for k in NumPyPrinter._kc:
setattr(NumPyPrinter, '_print_%s' % k, _print_known_const)
_known_functions_scipy_special = {
'erf': 'erf',
'erfc': 'erfc',
'besselj': 'jv',
'bessely': 'yv',
'besseli': 'iv',
'besselk': 'kv',
'cosm1': 'cosm1',
'factorial': 'factorial',
'gamma': 'gamma',
'loggamma': 'gammaln',
'digamma': 'psi',
'RisingFactorial': 'poch',
'jacobi': 'eval_jacobi',
'gegenbauer': 'eval_gegenbauer',
'chebyshevt': 'eval_chebyt',
'chebyshevu': 'eval_chebyu',
'legendre': 'eval_legendre',
'hermite': 'eval_hermite',
'laguerre': 'eval_laguerre',
'assoc_laguerre': 'eval_genlaguerre',
'beta': 'beta',
'LambertW' : 'lambertw',
}
_known_constants_scipy_constants = {
'GoldenRatio': 'golden_ratio',
'Pi': 'pi',
}
class SciPyPrinter(NumPyPrinter):
language = "Python with SciPy"
_kf = dict(chain(
NumPyPrinter._kf.items(),
[(k, 'scipy.special.' + v) for k, v in _known_functions_scipy_special.items()]
))
_kc =dict(chain(
NumPyPrinter._kc.items(),
[(k, 'scipy.constants.' + v) for k, v in _known_constants_scipy_constants.items()]
))
def _print_SparseMatrix(self, expr):
i, j, data = [], [], []
for (r, c), v in expr._smat.items():
i.append(r)
j.append(c)
data.append(v)
return "{name}(({data}, ({i}, {j})), shape={shape})".format(
name=self._module_format('scipy.sparse.coo_matrix'),
data=data, i=i, j=j, shape=expr.shape
)
_print_ImmutableSparseMatrix = _print_SparseMatrix
# SciPy's lpmv has a different order of arguments from assoc_legendre
def _print_assoc_legendre(self, expr):
return "{0}({2}, {1}, {3})".format(
self._module_format('scipy.special.lpmv'),
self._print(expr.args[0]),
self._print(expr.args[1]),
self._print(expr.args[2]))
def _print_lowergamma(self, expr):
return "{0}({2})*{1}({2}, {3})".format(
self._module_format('scipy.special.gamma'),
self._module_format('scipy.special.gammainc'),
self._print(expr.args[0]),
self._print(expr.args[1]))
def _print_uppergamma(self, expr):
return "{0}({2})*{1}({2}, {3})".format(
self._module_format('scipy.special.gamma'),
self._module_format('scipy.special.gammaincc'),
self._print(expr.args[0]),
self._print(expr.args[1]))
def _print_fresnels(self, expr):
return "{}({})[0]".format(
self._module_format("scipy.special.fresnel"),
self._print(expr.args[0]))
def _print_fresnelc(self, expr):
return "{}({})[1]".format(
self._module_format("scipy.special.fresnel"),
self._print(expr.args[0]))
def _print_airyai(self, expr):
return "{}({})[0]".format(
self._module_format("scipy.special.airy"),
self._print(expr.args[0]))
def _print_airyaiprime(self, expr):
return "{}({})[1]".format(
self._module_format("scipy.special.airy"),
self._print(expr.args[0]))
def _print_airybi(self, expr):
return "{}({})[2]".format(
self._module_format("scipy.special.airy"),
self._print(expr.args[0]))
def _print_airybiprime(self, expr):
return "{}({})[3]".format(
self._module_format("scipy.special.airy"),
self._print(expr.args[0]))
def _print_Integral(self, e):
integration_vars, limits = _unpack_integral_limits(e)
if len(limits) == 1:
# nicer (but not necessary) to prefer quad over nquad for 1D case
module_str = self._module_format("scipy.integrate.quad")
limit_str = "%s, %s" % tuple(map(self._print, limits[0]))
else:
module_str = self._module_format("scipy.integrate.nquad")
limit_str = "({})".format(", ".join(
"(%s, %s)" % tuple(map(self._print, l)) for l in limits))
return "{}(lambda {}: {}, {})[0]".format(
module_str,
", ".join(map(self._print, integration_vars)),
self._print(e.args[0]),
limit_str)
for k in SciPyPrinter._kf:
setattr(SciPyPrinter, '_print_%s' % k, _print_known_func)
for k in SciPyPrinter._kc:
setattr(SciPyPrinter, '_print_%s' % k, _print_known_const)
class SymPyPrinter(AbstractPythonCodePrinter):
language = "Python with SymPy"
def _print_Function(self, expr):
mod = expr.func.__module__ or ''
return '%s(%s)' % (self._module_format(mod + ('.' if mod else '') + expr.func.__name__),
', '.join(map(lambda arg: self._print(arg), expr.args)))
def _print_Pow(self, expr, rational=False):
return self._hprint_Pow(expr, rational=rational, sqrt='sympy.sqrt')
|
d5abad2058348a36008c41c78a7dd78c4addbbcb434c56bb31355fc6ceee9b5b | """
A Printer for generating readable representation of most sympy classes.
"""
from typing import Any, Dict
from sympy.core import S, Rational, Pow, Basic, Mul, Number
from sympy.core.mul import _keep_coeff
from .printer import Printer, print_function
from sympy.printing.precedence import precedence, PRECEDENCE
from mpmath.libmp import prec_to_dps, to_str as mlib_to_str
from sympy.utilities import default_sort_key
class StrPrinter(Printer):
printmethod = "_sympystr"
_default_settings = {
"order": None,
"full_prec": "auto",
"sympy_integers": False,
"abbrev": False,
"perm_cyclic": True,
"min": None,
"max": None,
} # type: Dict[str, Any]
_relationals = dict() # type: Dict[str, str]
def parenthesize(self, item, level, strict=False):
if (precedence(item) < level) or ((not strict) and precedence(item) <= level):
return "(%s)" % self._print(item)
else:
return self._print(item)
def stringify(self, args, sep, level=0):
return sep.join([self.parenthesize(item, level) for item in args])
def emptyPrinter(self, expr):
if isinstance(expr, str):
return expr
elif isinstance(expr, Basic):
return repr(expr)
else:
return str(expr)
def _print_Add(self, expr, order=None):
terms = self._as_ordered_terms(expr, order=order)
PREC = precedence(expr)
l = []
for term in terms:
t = self._print(term)
if t.startswith('-'):
sign = "-"
t = t[1:]
else:
sign = "+"
if precedence(term) < PREC:
l.extend([sign, "(%s)" % t])
else:
l.extend([sign, t])
sign = l.pop(0)
if sign == '+':
sign = ""
return sign + ' '.join(l)
def _print_BooleanTrue(self, expr):
return "True"
def _print_BooleanFalse(self, expr):
return "False"
def _print_Not(self, expr):
return '~%s' %(self.parenthesize(expr.args[0],PRECEDENCE["Not"]))
def _print_And(self, expr):
return self.stringify(expr.args, " & ", PRECEDENCE["BitwiseAnd"])
def _print_Or(self, expr):
return self.stringify(expr.args, " | ", PRECEDENCE["BitwiseOr"])
def _print_Xor(self, expr):
return self.stringify(expr.args, " ^ ", PRECEDENCE["BitwiseXor"])
def _print_AppliedPredicate(self, expr):
return '%s(%s)' % (self._print(expr.func), self._print(expr.arg))
def _print_Basic(self, expr):
l = [self._print(o) for o in expr.args]
return expr.__class__.__name__ + "(%s)" % ", ".join(l)
def _print_BlockMatrix(self, B):
if B.blocks.shape == (1, 1):
self._print(B.blocks[0, 0])
return self._print(B.blocks)
def _print_Catalan(self, expr):
return 'Catalan'
def _print_ComplexInfinity(self, expr):
return 'zoo'
def _print_ConditionSet(self, s):
args = tuple([self._print(i) for i in (s.sym, s.condition)])
if s.base_set is S.UniversalSet:
return 'ConditionSet(%s, %s)' % args
args += (self._print(s.base_set),)
return 'ConditionSet(%s, %s, %s)' % args
def _print_Derivative(self, expr):
dexpr = expr.expr
dvars = [i[0] if i[1] == 1 else i for i in expr.variable_count]
return 'Derivative(%s)' % ", ".join(map(lambda arg: self._print(arg), [dexpr] + dvars))
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for key in keys:
item = "%s: %s" % (self._print(key), self._print(d[key]))
items.append(item)
return "{%s}" % ", ".join(items)
def _print_Dict(self, expr):
return self._print_dict(expr)
def _print_RandomDomain(self, d):
if hasattr(d, 'as_boolean'):
return 'Domain: ' + self._print(d.as_boolean())
elif hasattr(d, 'set'):
return ('Domain: ' + self._print(d.symbols) + ' in ' +
self._print(d.set))
else:
return 'Domain on ' + self._print(d.symbols)
def _print_Dummy(self, expr):
return '_' + expr.name
def _print_EulerGamma(self, expr):
return 'EulerGamma'
def _print_Exp1(self, expr):
return 'E'
def _print_ExprCondPair(self, expr):
return '(%s, %s)' % (self._print(expr.expr), self._print(expr.cond))
def _print_Function(self, expr):
return expr.func.__name__ + "(%s)" % self.stringify(expr.args, ", ")
def _print_GoldenRatio(self, expr):
return 'GoldenRatio'
def _print_TribonacciConstant(self, expr):
return 'TribonacciConstant'
def _print_ImaginaryUnit(self, expr):
return 'I'
def _print_Infinity(self, expr):
return 'oo'
def _print_Integral(self, expr):
def _xab_tostr(xab):
if len(xab) == 1:
return self._print(xab[0])
else:
return self._print((xab[0],) + tuple(xab[1:]))
L = ', '.join([_xab_tostr(l) for l in expr.limits])
return 'Integral(%s, %s)' % (self._print(expr.function), L)
def _print_Interval(self, i):
fin = 'Interval{m}({a}, {b})'
a, b, l, r = i.args
if a.is_infinite and b.is_infinite:
m = ''
elif a.is_infinite and not r:
m = ''
elif b.is_infinite and not l:
m = ''
elif not l and not r:
m = ''
elif l and r:
m = '.open'
elif l:
m = '.Lopen'
else:
m = '.Ropen'
return fin.format(**{'a': a, 'b': b, 'm': m})
def _print_AccumulationBounds(self, i):
return "AccumBounds(%s, %s)" % (self._print(i.min),
self._print(i.max))
def _print_Inverse(self, I):
return "%s**(-1)" % self.parenthesize(I.arg, PRECEDENCE["Pow"])
def _print_Lambda(self, obj):
expr = obj.expr
sig = obj.signature
if len(sig) == 1 and sig[0].is_symbol:
sig = sig[0]
return "Lambda(%s, %s)" % (self._print(sig), self._print(expr))
def _print_LatticeOp(self, expr):
args = sorted(expr.args, key=default_sort_key)
return expr.func.__name__ + "(%s)" % ", ".join(self._print(arg) for arg in args)
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
if str(dir) == "+":
return "Limit(%s, %s, %s)" % tuple(map(self._print, (e, z, z0)))
else:
return "Limit(%s, %s, %s, dir='%s')" % tuple(map(self._print,
(e, z, z0, dir)))
def _print_list(self, expr):
return "[%s]" % self.stringify(expr, ", ")
def _print_MatrixBase(self, expr):
return expr._format_str(self)
def _print_MatrixElement(self, expr):
return self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) \
+ '[%s, %s]' % (self._print(expr.i), self._print(expr.j))
def _print_MatrixSlice(self, expr):
def strslice(x, dim):
x = list(x)
if x[2] == 1:
del x[2]
if x[0] == 0:
x[0] = ''
if x[1] == dim:
x[1] = ''
return ':'.join(map(lambda arg: self._print(arg), x))
return (self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) + '[' +
strslice(expr.rowslice, expr.parent.rows) + ', ' +
strslice(expr.colslice, expr.parent.cols) + ']')
def _print_DeferredVector(self, expr):
return expr.name
def _print_Mul(self, expr):
prec = precedence(expr)
# Check for unevaluated Mul. In this case we need to make sure the
# identities are visible, multiple Rational factors are not combined
# etc so we display in a straight-forward form that fully preserves all
# args and their order.
args = expr.args
if args[0] is S.One or any(isinstance(arg, Number) for arg in args[1:]):
factors = [self.parenthesize(a, prec, strict=False) for a in args]
return '*'.join(factors)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
pow_paren = [] # Will collect all pow with more than one base element and exp = -1
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
if len(item.args[0].args) != 1 and isinstance(item.base, Mul): # To avoid situations like #14160
pow_paren.append(item)
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append(Rational(item.p))
if item.q != 1:
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec, strict=False) for x in a]
b_str = [self.parenthesize(x, prec, strict=False) for x in b]
# To parenthesize Pow with exp = -1 and having more than one Symbol
for item in pow_paren:
if item.base in b:
b_str[b.index(item.base)] = "(%s)" % b_str[b.index(item.base)]
if not b:
return sign + '*'.join(a_str)
elif len(b) == 1:
return sign + '*'.join(a_str) + "/" + b_str[0]
else:
return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str)
def _print_MatMul(self, expr):
c, m = expr.as_coeff_mmul()
sign = ""
if c.is_number:
re, im = c.as_real_imag()
if im.is_zero and re.is_negative:
expr = _keep_coeff(-c, m)
sign = "-"
elif re.is_zero and im.is_negative:
expr = _keep_coeff(-c, m)
sign = "-"
return sign + '*'.join(
[self.parenthesize(arg, precedence(expr)) for arg in expr.args]
)
def _print_ElementwiseApplyFunction(self, expr):
return "{}.({})".format(
expr.function,
self._print(expr.expr),
)
def _print_NaN(self, expr):
return 'nan'
def _print_NegativeInfinity(self, expr):
return '-oo'
def _print_Order(self, expr):
if not expr.variables or all(p is S.Zero for p in expr.point):
if len(expr.variables) <= 1:
return 'O(%s)' % self._print(expr.expr)
else:
return 'O(%s)' % self.stringify((expr.expr,) + expr.variables, ', ', 0)
else:
return 'O(%s)' % self.stringify(expr.args, ', ', 0)
def _print_Ordinal(self, expr):
return expr.__str__()
def _print_Cycle(self, expr):
return expr.__str__()
def _print_Permutation(self, expr):
from sympy.combinatorics.permutations import Permutation, Cycle
from sympy.utilities.exceptions import SymPyDeprecationWarning
perm_cyclic = Permutation.print_cyclic
if perm_cyclic is not None:
SymPyDeprecationWarning(
feature="Permutation.print_cyclic = {}".format(perm_cyclic),
useinstead="init_printing(perm_cyclic={})"
.format(perm_cyclic),
issue=15201,
deprecated_since_version="1.6").warn()
else:
perm_cyclic = self._settings.get("perm_cyclic", True)
if perm_cyclic:
if not expr.size:
return '()'
# before taking Cycle notation, see if the last element is
# a singleton and move it to the head of the string
s = Cycle(expr)(expr.size - 1).__repr__()[len('Cycle'):]
last = s.rfind('(')
if not last == 0 and ',' not in s[last:]:
s = s[last:] + s[:last]
s = s.replace(',', '')
return s
else:
s = expr.support()
if not s:
if expr.size < 5:
return 'Permutation(%s)' % self._print(expr.array_form)
return 'Permutation([], size=%s)' % self._print(expr.size)
trim = self._print(expr.array_form[:s[-1] + 1]) + ', size=%s' % self._print(expr.size)
use = full = self._print(expr.array_form)
if len(trim) < len(full):
use = trim
return 'Permutation(%s)' % use
def _print_Subs(self, obj):
expr, old, new = obj.args
if len(obj.point) == 1:
old = old[0]
new = new[0]
return "Subs(%s, %s, %s)" % (
self._print(expr), self._print(old), self._print(new))
def _print_TensorIndex(self, expr):
return expr._print()
def _print_TensorHead(self, expr):
return expr._print()
def _print_Tensor(self, expr):
return expr._print()
def _print_TensMul(self, expr):
# prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)"
sign, args = expr._get_args_for_traditional_printer()
return sign + "*".join(
[self.parenthesize(arg, precedence(expr)) for arg in args]
)
def _print_TensAdd(self, expr):
return expr._print()
def _print_PermutationGroup(self, expr):
p = [' %s' % self._print(a) for a in expr.args]
return 'PermutationGroup([\n%s])' % ',\n'.join(p)
def _print_Pi(self, expr):
return 'pi'
def _print_PolyRing(self, ring):
return "Polynomial ring in %s over %s with %s order" % \
(", ".join(map(lambda rs: self._print(rs), ring.symbols)),
self._print(ring.domain), self._print(ring.order))
def _print_FracField(self, field):
return "Rational function field in %s over %s with %s order" % \
(", ".join(map(lambda fs: self._print(fs), field.symbols)),
self._print(field.domain), self._print(field.order))
def _print_FreeGroupElement(self, elm):
return elm.__str__()
def _print_GaussianElement(self, poly):
return "(%s + %s*I)" % (poly.x, poly.y)
def _print_PolyElement(self, poly):
return poly.str(self, PRECEDENCE, "%s**%s", "*")
def _print_FracElement(self, frac):
if frac.denom == 1:
return self._print(frac.numer)
else:
numer = self.parenthesize(frac.numer, PRECEDENCE["Mul"], strict=True)
denom = self.parenthesize(frac.denom, PRECEDENCE["Atom"], strict=True)
return numer + "/" + denom
def _print_Poly(self, expr):
ATOM_PREC = PRECEDENCE["Atom"] - 1
terms, gens = [], [ self.parenthesize(s, ATOM_PREC) for s in expr.gens ]
for monom, coeff in expr.terms():
s_monom = []
for i, exp in enumerate(monom):
if exp > 0:
if exp == 1:
s_monom.append(gens[i])
else:
s_monom.append(gens[i] + "**%d" % exp)
s_monom = "*".join(s_monom)
if coeff.is_Add:
if s_monom:
s_coeff = "(" + self._print(coeff) + ")"
else:
s_coeff = self._print(coeff)
else:
if s_monom:
if coeff is S.One:
terms.extend(['+', s_monom])
continue
if coeff is S.NegativeOne:
terms.extend(['-', s_monom])
continue
s_coeff = self._print(coeff)
if not s_monom:
s_term = s_coeff
else:
s_term = s_coeff + "*" + s_monom
if s_term.startswith('-'):
terms.extend(['-', s_term[1:]])
else:
terms.extend(['+', s_term])
if terms[0] in ['-', '+']:
modifier = terms.pop(0)
if modifier == '-':
terms[0] = '-' + terms[0]
format = expr.__class__.__name__ + "(%s, %s"
from sympy.polys.polyerrors import PolynomialError
try:
format += ", modulus=%s" % expr.get_modulus()
except PolynomialError:
format += ", domain='%s'" % expr.get_domain()
format += ")"
for index, item in enumerate(gens):
if len(item) > 2 and (item[:1] == "(" and item[len(item) - 1:] == ")"):
gens[index] = item[1:len(item) - 1]
return format % (' '.join(terms), ', '.join(gens))
def _print_UniversalSet(self, p):
return 'UniversalSet'
def _print_AlgebraicNumber(self, expr):
if expr.is_aliased:
return self._print(expr.as_poly().as_expr())
else:
return self._print(expr.as_expr())
def _print_Pow(self, expr, rational=False):
"""Printing helper function for ``Pow``
Parameters
==========
rational : bool, optional
If ``True``, it will not attempt printing ``sqrt(x)`` or
``x**S.Half`` as ``sqrt``, and will use ``x**(1/2)``
instead.
See examples for additional details
Examples
========
>>> from sympy.functions import sqrt
>>> from sympy.printing.str import StrPrinter
>>> from sympy.abc import x
How ``rational`` keyword works with ``sqrt``:
>>> printer = StrPrinter()
>>> printer._print_Pow(sqrt(x), rational=True)
'x**(1/2)'
>>> printer._print_Pow(sqrt(x), rational=False)
'sqrt(x)'
>>> printer._print_Pow(1/sqrt(x), rational=True)
'x**(-1/2)'
>>> printer._print_Pow(1/sqrt(x), rational=False)
'1/sqrt(x)'
Notes
=====
``sqrt(x)`` is canonicalized as ``Pow(x, S.Half)`` in SymPy,
so there is no need of defining a separate printer for ``sqrt``.
Instead, it should be handled here as well.
"""
PREC = precedence(expr)
if expr.exp is S.Half and not rational:
return "sqrt(%s)" % self._print(expr.base)
if expr.is_commutative:
if -expr.exp is S.Half and not rational:
# Note: Don't test "expr.exp == -S.Half" here, because that will
# match -0.5, which we don't want.
return "%s/sqrt(%s)" % tuple(map(lambda arg: self._print(arg), (S.One, expr.base)))
if expr.exp is -S.One:
# Similarly to the S.Half case, don't test with "==" here.
return '%s/%s' % (self._print(S.One),
self.parenthesize(expr.base, PREC, strict=False))
e = self.parenthesize(expr.exp, PREC, strict=False)
if self.printmethod == '_sympyrepr' and expr.exp.is_Rational and expr.exp.q != 1:
# the parenthesized exp should be '(Rational(a, b))' so strip parens,
# but just check to be sure.
if e.startswith('(Rational'):
return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e[1:-1])
return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e)
def _print_UnevaluatedExpr(self, expr):
return self._print(expr.args[0])
def _print_MatPow(self, expr):
PREC = precedence(expr)
return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False),
self.parenthesize(expr.exp, PREC, strict=False))
def _print_Integer(self, expr):
if self._settings.get("sympy_integers", False):
return "S(%s)" % (expr)
return str(expr.p)
def _print_Integers(self, expr):
return 'Integers'
def _print_Naturals(self, expr):
return 'Naturals'
def _print_Naturals0(self, expr):
return 'Naturals0'
def _print_Rationals(self, expr):
return 'Rationals'
def _print_Reals(self, expr):
return 'Reals'
def _print_Complexes(self, expr):
return 'Complexes'
def _print_EmptySet(self, expr):
return 'EmptySet'
def _print_EmptySequence(self, expr):
return 'EmptySequence'
def _print_int(self, expr):
return str(expr)
def _print_mpz(self, expr):
return str(expr)
def _print_Rational(self, expr):
if expr.q == 1:
return str(expr.p)
else:
if self._settings.get("sympy_integers", False):
return "S(%s)/%s" % (expr.p, expr.q)
return "%s/%s" % (expr.p, expr.q)
def _print_PythonRational(self, expr):
if expr.q == 1:
return str(expr.p)
else:
return "%d/%d" % (expr.p, expr.q)
def _print_Fraction(self, expr):
if expr.denominator == 1:
return str(expr.numerator)
else:
return "%s/%s" % (expr.numerator, expr.denominator)
def _print_mpq(self, expr):
if expr.denominator == 1:
return str(expr.numerator)
else:
return "%s/%s" % (expr.numerator, expr.denominator)
def _print_Float(self, expr):
prec = expr._prec
if prec < 5:
dps = 0
else:
dps = prec_to_dps(expr._prec)
if self._settings["full_prec"] is True:
strip = False
elif self._settings["full_prec"] is False:
strip = True
elif self._settings["full_prec"] == "auto":
strip = self._print_level > 1
low = self._settings["min"] if "min" in self._settings else None
high = self._settings["max"] if "max" in self._settings else None
rv = mlib_to_str(expr._mpf_, dps, strip_zeros=strip, min_fixed=low, max_fixed=high)
if rv.startswith('-.0'):
rv = '-0.' + rv[3:]
elif rv.startswith('.0'):
rv = '0.' + rv[2:]
if rv.startswith('+'):
# e.g., +inf -> inf
rv = rv[1:]
return rv
def _print_Relational(self, expr):
charmap = {
"==": "Eq",
"!=": "Ne",
":=": "Assignment",
'+=': "AddAugmentedAssignment",
"-=": "SubAugmentedAssignment",
"*=": "MulAugmentedAssignment",
"/=": "DivAugmentedAssignment",
"%=": "ModAugmentedAssignment",
}
if expr.rel_op in charmap:
return '%s(%s, %s)' % (charmap[expr.rel_op], self._print(expr.lhs),
self._print(expr.rhs))
return '%s %s %s' % (self.parenthesize(expr.lhs, precedence(expr)),
self._relationals.get(expr.rel_op) or expr.rel_op,
self.parenthesize(expr.rhs, precedence(expr)))
def _print_ComplexRootOf(self, expr):
return "CRootOf(%s, %d)" % (self._print_Add(expr.expr, order='lex'),
expr.index)
def _print_RootSum(self, expr):
args = [self._print_Add(expr.expr, order='lex')]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
return "RootSum(%s)" % ", ".join(args)
def _print_GroebnerBasis(self, basis):
cls = basis.__class__.__name__
exprs = [self._print_Add(arg, order=basis.order) for arg in basis.exprs]
exprs = "[%s]" % ", ".join(exprs)
gens = [ self._print(gen) for gen in basis.gens ]
domain = "domain='%s'" % self._print(basis.domain)
order = "order='%s'" % self._print(basis.order)
args = [exprs] + gens + [domain, order]
return "%s(%s)" % (cls, ", ".join(args))
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
args = ', '.join(self._print(item) for item in items)
if not args:
return "set()"
return '{%s}' % args
def _print_frozenset(self, s):
if not s:
return "frozenset()"
return "frozenset(%s)" % self._print_set(s)
def _print_Sum(self, expr):
def _xab_tostr(xab):
if len(xab) == 1:
return self._print(xab[0])
else:
return self._print((xab[0],) + tuple(xab[1:]))
L = ', '.join([_xab_tostr(l) for l in expr.limits])
return 'Sum(%s, %s)' % (self._print(expr.function), L)
def _print_Symbol(self, expr):
return expr.name
_print_MatrixSymbol = _print_Symbol
_print_RandomSymbol = _print_Symbol
def _print_Identity(self, expr):
return "I"
def _print_ZeroMatrix(self, expr):
return "0"
def _print_OneMatrix(self, expr):
return "1"
def _print_Predicate(self, expr):
return "Q.%s" % expr.name
def _print_str(self, expr):
return str(expr)
def _print_tuple(self, expr):
if len(expr) == 1:
return "(%s,)" % self._print(expr[0])
else:
return "(%s)" % self.stringify(expr, ", ")
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_Transpose(self, T):
return "%s.T" % self.parenthesize(T.arg, PRECEDENCE["Pow"])
def _print_Uniform(self, expr):
return "Uniform(%s, %s)" % (self._print(expr.a), self._print(expr.b))
def _print_Quantity(self, expr):
if self._settings.get("abbrev", False):
return "%s" % expr.abbrev
return "%s" % expr.name
def _print_Quaternion(self, expr):
s = [self.parenthesize(i, PRECEDENCE["Mul"], strict=True) for i in expr.args]
a = [s[0]] + [i+"*"+j for i, j in zip(s[1:], "ijk")]
return " + ".join(a)
def _print_Dimension(self, expr):
return str(expr)
def _print_Wild(self, expr):
return expr.name + '_'
def _print_WildFunction(self, expr):
return expr.name + '_'
def _print_Zero(self, expr):
if self._settings.get("sympy_integers", False):
return "S(0)"
return "0"
def _print_DMP(self, p):
from sympy.core.sympify import SympifyError
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
cls = p.__class__.__name__
rep = self._print(p.rep)
dom = self._print(p.dom)
ring = self._print(p.ring)
return "%s(%s, %s, %s)" % (cls, rep, dom, ring)
def _print_DMF(self, expr):
return self._print_DMP(expr)
def _print_Object(self, obj):
return 'Object("%s")' % obj.name
def _print_IdentityMorphism(self, morphism):
return 'IdentityMorphism(%s)' % morphism.domain
def _print_NamedMorphism(self, morphism):
return 'NamedMorphism(%s, %s, "%s")' % \
(morphism.domain, morphism.codomain, morphism.name)
def _print_Category(self, category):
return 'Category("%s")' % category.name
def _print_Manifold(self, manifold):
return manifold.name.name
def _print_Patch(self, patch):
return patch.name.name
def _print_CoordSystem(self, coords):
return coords.name.name
def _print_BaseScalarField(self, field):
return field._coord_sys.symbols[field._index].name
def _print_BaseVectorField(self, field):
return 'e_%s' % field._coord_sys.symbols[field._index].name
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
return 'd%s' % field._coord_sys.symbols[field._index].name
else:
return 'd(%s)' % self._print(field)
def _print_Tr(self, expr):
#TODO : Handle indices
return "%s(%s)" % ("Tr", self._print(expr.args[0]))
def _print_Str(self, s):
return self._print(s.name)
@print_function(StrPrinter)
def sstr(expr, **settings):
"""Returns the expression as a string.
For large expressions where speed is a concern, use the setting
order='none'. If abbrev=True setting is used then units are printed in
abbreviated form.
Examples
========
>>> from sympy import symbols, Eq, sstr
>>> a, b = symbols('a b')
>>> sstr(Eq(a + b, 0))
'Eq(a + b, 0)'
"""
p = StrPrinter(settings)
s = p.doprint(expr)
return s
class StrReprPrinter(StrPrinter):
"""(internal) -- see sstrrepr"""
def _print_str(self, s):
return repr(s)
def _print_Str(self, s):
# Str does not to be printed same as str here
return "%s(%s)" % (s.__class__.__name__, self._print(s.name))
@print_function(StrReprPrinter)
def sstrrepr(expr, **settings):
"""return expr in mixed str/repr form
i.e. strings are returned in repr form with quotes, and everything else
is returned in str form.
This function could be useful for hooking into sys.displayhook
"""
p = StrReprPrinter(settings)
s = p.doprint(expr)
return s
|
1b1105d5621f94f3528653ab65cf0065c87623a1b10e56e92261ea5fad7f3f9a | def pprint_nodes(subtrees):
"""
Prettyprints systems of nodes.
Examples
========
>>> from sympy.printing.tree import pprint_nodes
>>> print(pprint_nodes(["a", "b1\\nb2", "c"]))
+-a
+-b1
| b2
+-c
"""
def indent(s, type=1):
x = s.split("\n")
r = "+-%s\n" % x[0]
for a in x[1:]:
if a == "":
continue
if type == 1:
r += "| %s\n" % a
else:
r += " %s\n" % a
return r
if not subtrees:
return ""
f = ""
for a in subtrees[:-1]:
f += indent(a)
f += indent(subtrees[-1], 2)
return f
def print_node(node, assumptions=True):
"""
Returns information about the "node".
This includes class name, string representation and assumptions.
Parameters
==========
assumptions : bool, optional
See the ``assumptions`` keyword in ``tree``
"""
s = "%s: %s\n" % (node.__class__.__name__, str(node))
if assumptions:
d = node._assumptions
else:
d = None
if d:
for a in sorted(d):
v = d[a]
if v is None:
continue
s += "%s: %s\n" % (a, v)
return s
def tree(node, assumptions=True):
"""
Returns a tree representation of "node" as a string.
It uses print_node() together with pprint_nodes() on node.args recursively.
Parameters
==========
asssumptions : bool, optional
The flag to decide whether to print out all the assumption data
(such as ``is_integer`, ``is_real``) associated with the
expression or not.
Enabling the flag makes the result verbose, and the printed
result may not be determinisitic because of the randomness used
in backtracing the assumptions.
See Also
========
print_tree
"""
subtrees = []
for arg in node.args:
subtrees.append(tree(arg, assumptions=assumptions))
s = print_node(node, assumptions=assumptions) + pprint_nodes(subtrees)
return s
def print_tree(node, assumptions=True):
"""
Prints a tree representation of "node".
Parameters
==========
asssumptions : bool, optional
The flag to decide whether to print out all the assumption data
(such as ``is_integer`, ``is_real``) associated with the
expression or not.
Enabling the flag makes the result verbose, and the printed
result may not be determinisitic because of the randomness used
in backtracing the assumptions.
Examples
========
>>> from sympy.printing import print_tree
>>> from sympy import Symbol
>>> x = Symbol('x', odd=True)
>>> y = Symbol('y', even=True)
Printing with full assumptions information:
>>> print_tree(y**x)
Pow: y**x
+-Symbol: y
| algebraic: True
| commutative: True
| complex: True
| even: True
| extended_real: True
| finite: True
| hermitian: True
| imaginary: False
| infinite: False
| integer: True
| irrational: False
| noninteger: False
| odd: False
| rational: True
| real: True
| transcendental: False
+-Symbol: x
algebraic: True
commutative: True
complex: True
even: False
extended_nonzero: True
extended_real: True
finite: True
hermitian: True
imaginary: False
infinite: False
integer: True
irrational: False
noninteger: False
nonzero: True
odd: True
rational: True
real: True
transcendental: False
zero: False
Hiding the assumptions:
>>> print_tree(y**x, assumptions=False)
Pow: y**x
+-Symbol: y
+-Symbol: x
See Also
========
tree
"""
print(tree(node, assumptions=assumptions))
|
7667356eadb26d9f12aa8ea8875ab411fa95b54016d741f82bf7a12e2827b5a0 | """
Rust code printer
The `RustCodePrinter` converts SymPy expressions into Rust expressions.
A complete code generator, which uses `rust_code` extensively, can be found
in `sympy.utilities.codegen`. The `codegen` module can be used to generate
complete source code files.
"""
# Possible Improvement
#
# * make sure we follow Rust Style Guidelines_
# * make use of pattern matching
# * better support for reference
# * generate generic code and use trait to make sure they have specific methods
# * use crates_ to get more math support
# - num_
# + BigInt_, BigUint_
# + Complex_
# + Rational64_, Rational32_, BigRational_
#
# .. _crates: https://crates.io/
# .. _Guidelines: https://github.com/rust-lang/rust/tree/master/src/doc/style
# .. _num: http://rust-num.github.io/num/num/
# .. _BigInt: http://rust-num.github.io/num/num/bigint/struct.BigInt.html
# .. _BigUint: http://rust-num.github.io/num/num/bigint/struct.BigUint.html
# .. _Complex: http://rust-num.github.io/num/num/complex/struct.Complex.html
# .. _Rational32: http://rust-num.github.io/num/num/rational/type.Rational32.html
# .. _Rational64: http://rust-num.github.io/num/num/rational/type.Rational64.html
# .. _BigRational: http://rust-num.github.io/num/num/rational/type.BigRational.html
from typing import Any, Dict
from sympy.core import S, Rational, Float, Lambda
from sympy.printing.codeprinter import CodePrinter
# Rust's methods for integer and float can be found at here :
#
# * `Rust - Primitive Type f64 <https://doc.rust-lang.org/std/primitive.f64.html>`_
# * `Rust - Primitive Type i64 <https://doc.rust-lang.org/std/primitive.i64.html>`_
#
# Function Style :
#
# 1. args[0].func(args[1:]), method with arguments
# 2. args[0].func(), method without arguments
# 3. args[1].func(), method without arguments (e.g. (e, x) => x.exp())
# 4. func(args), function with arguments
# dictionary mapping sympy function to (argument_conditions, Rust_function).
# Used in RustCodePrinter._print_Function(self)
# f64 method in Rust
known_functions = {
# "": "is_nan",
# "": "is_infinite",
# "": "is_finite",
# "": "is_normal",
# "": "classify",
"floor": "floor",
"ceiling": "ceil",
# "": "round",
# "": "trunc",
# "": "fract",
"Abs": "abs",
"sign": "signum",
# "": "is_sign_positive",
# "": "is_sign_negative",
# "": "mul_add",
"Pow": [(lambda base, exp: exp == -S.One, "recip", 2), # 1.0/x
(lambda base, exp: exp == S.Half, "sqrt", 2), # x ** 0.5
(lambda base, exp: exp == -S.Half, "sqrt().recip", 2), # 1/(x ** 0.5)
(lambda base, exp: exp == Rational(1, 3), "cbrt", 2), # x ** (1/3)
(lambda base, exp: base == S.One*2, "exp2", 3), # 2 ** x
(lambda base, exp: exp.is_integer, "powi", 1), # x ** y, for i32
(lambda base, exp: not exp.is_integer, "powf", 1)], # x ** y, for f64
"exp": [(lambda exp: True, "exp", 2)], # e ** x
"log": "ln",
# "": "log", # number.log(base)
# "": "log2",
# "": "log10",
# "": "to_degrees",
# "": "to_radians",
"Max": "max",
"Min": "min",
# "": "hypot", # (x**2 + y**2) ** 0.5
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
# "": "sin_cos",
# "": "exp_m1", # e ** x - 1
# "": "ln_1p", # ln(1 + x)
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
}
# i64 method in Rust
# known_functions_i64 = {
# "": "min_value",
# "": "max_value",
# "": "from_str_radix",
# "": "count_ones",
# "": "count_zeros",
# "": "leading_zeros",
# "": "trainling_zeros",
# "": "rotate_left",
# "": "rotate_right",
# "": "swap_bytes",
# "": "from_be",
# "": "from_le",
# "": "to_be", # to big endian
# "": "to_le", # to little endian
# "": "checked_add",
# "": "checked_sub",
# "": "checked_mul",
# "": "checked_div",
# "": "checked_rem",
# "": "checked_neg",
# "": "checked_shl",
# "": "checked_shr",
# "": "checked_abs",
# "": "saturating_add",
# "": "saturating_sub",
# "": "saturating_mul",
# "": "wrapping_add",
# "": "wrapping_sub",
# "": "wrapping_mul",
# "": "wrapping_div",
# "": "wrapping_rem",
# "": "wrapping_neg",
# "": "wrapping_shl",
# "": "wrapping_shr",
# "": "wrapping_abs",
# "": "overflowing_add",
# "": "overflowing_sub",
# "": "overflowing_mul",
# "": "overflowing_div",
# "": "overflowing_rem",
# "": "overflowing_neg",
# "": "overflowing_shl",
# "": "overflowing_shr",
# "": "overflowing_abs",
# "Pow": "pow",
# "Abs": "abs",
# "sign": "signum",
# "": "is_positive",
# "": "is_negnative",
# }
# These are the core reserved words in the Rust language. Taken from:
# http://doc.rust-lang.org/grammar.html#keywords
reserved_words = ['abstract',
'alignof',
'as',
'become',
'box',
'break',
'const',
'continue',
'crate',
'do',
'else',
'enum',
'extern',
'false',
'final',
'fn',
'for',
'if',
'impl',
'in',
'let',
'loop',
'macro',
'match',
'mod',
'move',
'mut',
'offsetof',
'override',
'priv',
'proc',
'pub',
'pure',
'ref',
'return',
'Self',
'self',
'sizeof',
'static',
'struct',
'super',
'trait',
'true',
'type',
'typeof',
'unsafe',
'unsized',
'use',
'virtual',
'where',
'while',
'yield']
class RustCodePrinter(CodePrinter):
"""A printer to convert python expressions to strings of Rust code"""
printmethod = "_rust_code"
language = "Rust"
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'contract': True,
'dereference': set(),
'error_on_reserved': False,
'reserved_word_suffix': '_',
'inline': False,
} # type: Dict[str, Any]
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
self._dereference = set(settings.get('dereference', []))
self.reserved_words = set(reserved_words)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// %s" % text
def _declare_number_const(self, name, value):
return "const %s: f64 = %s;" % (name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for %(var)s in %(start)s..%(end)s {"
for i in indices:
# Rust arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'var': self._print(i),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_caller_var(self, expr):
if len(expr.args) > 1:
# for something like `sin(x + y + z)`,
# make sure we can get '(x + y + z).sin()'
# instead of 'x + y + z.sin()'
return '(' + self._print(expr) + ')'
elif expr.is_number:
return self._print(expr, _type=True)
else:
return self._print(expr)
def _print_Function(self, expr):
"""
basic function for printing `Function`
Function Style :
1. args[0].func(args[1:]), method with arguments
2. args[0].func(), method without arguments
3. args[1].func(), method without arguments (e.g. (e, x) => x.exp())
4. func(args), function with arguments
"""
if expr.func.__name__ in self.known_functions:
cond_func = self.known_functions[expr.func.__name__]
func = None
style = 1
if isinstance(cond_func, str):
func = cond_func
else:
for cond, func, style in cond_func:
if cond(*expr.args):
break
if func is not None:
if style == 1:
ret = "%(var)s.%(method)s(%(args)s)" % {
'var': self._print_caller_var(expr.args[0]),
'method': func,
'args': self.stringify(expr.args[1:], ", ") if len(expr.args) > 1 else ''
}
elif style == 2:
ret = "%(var)s.%(method)s()" % {
'var': self._print_caller_var(expr.args[0]),
'method': func,
}
elif style == 3:
ret = "%(var)s.%(method)s()" % {
'var': self._print_caller_var(expr.args[1]),
'method': func,
}
else:
ret = "%(func)s(%(args)s)" % {
'func': func,
'args': self.stringify(expr.args, ", "),
}
return ret
elif hasattr(expr, '_imp_') and isinstance(expr._imp_, Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
else:
return self._print_not_supported(expr)
def _print_Pow(self, expr):
if expr.base.is_integer and not expr.exp.is_integer:
expr = type(expr)(Float(expr.base), expr.exp)
return self._print(expr)
return self._print_Function(expr)
def _print_Float(self, expr, _type=False):
ret = super()._print_Float(expr)
if _type:
return ret + '_f64'
else:
return ret
def _print_Integer(self, expr, _type=False):
ret = super()._print_Integer(expr)
if _type:
return ret + '_i32'
else:
return ret
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d_f64/%d.0' % (p, q)
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i]*offset
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
def _print_Idx(self, expr):
return expr.label.name
def _print_Dummy(self, expr):
return expr.name
def _print_Exp1(self, expr, _type=False):
return "E"
def _print_Pi(self, expr, _type=False):
return 'PI'
def _print_Infinity(self, expr, _type=False):
return 'INFINITY'
def _print_NegativeInfinity(self, expr, _type=False):
return 'NEG_INFINITY'
def _print_BooleanTrue(self, expr, _type=False):
return "true"
def _print_BooleanFalse(self, expr, _type=False):
return "false"
def _print_bool(self, expr, _type=False):
return str(expr).lower()
def _print_NaN(self, expr, _type=False):
return "NAN"
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines[-1] += " else {"
else:
lines[-1] += " else if (%s) {" % self._print(c)
code0 = self._print(e)
lines.append(code0)
lines.append("}")
if self._settings['inline']:
return " ".join(lines)
else:
return "\n".join(lines)
def _print_ITE(self, expr):
from sympy.functions import Piecewise
_piecewise = Piecewise((expr.args[1], expr.args[0]), (expr.args[2], True))
return self._print(_piecewise)
def _print_MatrixBase(self, A):
if A.cols == 1:
return "[%s]" % ", ".join(self._print(a) for a in A)
else:
raise ValueError("Full Matrix Support in Rust need Crates (https://crates.io/keywords/matrix).")
def _print_SparseMatrix(self, mat):
# do not allow sparse matrices to be made dense
return self._print_not_supported(mat)
def _print_MatrixElement(self, expr):
return "%s[%s]" % (expr.parent,
expr.j + expr.i*expr.parent.shape[1])
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if expr in self._dereference:
return '(*%s)' % name
else:
return name
def _print_Assignment(self, expr):
from sympy.tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
if self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def rust_code(expr, assign_to=None, **settings):
"""Converts an expr to a string of Rust code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where the keys are string representations of either
``FunctionClass`` or ``UndefinedFunction`` instances and the values
are their desired C string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)]. See below for examples.
dereference : iterable, optional
An iterable of symbols that should be dereferenced in the printed code
expression. These would be values passed by address to the function.
For example, if ``dereference=[a]``, the resulting code would print
``(*a)`` instead of ``a``.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import rust_code, symbols, Rational, sin, ceiling, Abs, Function
>>> x, tau = symbols("x, tau")
>>> rust_code((2*tau)**Rational(7, 2))
'8*1.4142135623731*tau.powf(7_f64/2.0)'
>>> rust_code(sin(x), assign_to="s")
's = x.sin();'
Simple custom printing can be defined for certain types by passing a
dictionary of {"type" : "function"} to the ``user_functions`` kwarg.
Alternatively, the dictionary value can be a list of tuples i.e.
[(argument_test, cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs", 4),
... (lambda x: x.is_integer, "ABS", 4)],
... "func": "f"
... }
>>> func = Function('func')
>>> rust_code(func(Abs(x) + ceiling(x)), user_functions=custom_functions)
'(fabs(x) + x.CEIL()).f()'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(rust_code(expr, tau))
tau = if (x > 0) {
x + 1
} else {
x
};
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> rust_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(rust_code(mat, A))
A = [x.powi(2), if (x > 0) {
x + 1
} else {
x
}, x.sin()];
"""
return RustCodePrinter(settings).doprint(expr, assign_to)
def print_rust_code(expr, **settings):
"""Prints Rust representation of the given expression."""
print(rust_code(expr, **settings))
|
13ad35f79d34139f8dd984ed7c3b6fc764a631493817e041242011d9b230b4ed | """
C++ code printer
"""
from itertools import chain
from sympy.codegen.ast import Type, none
from .c import C89CodePrinter, C99CodePrinter
# These are defined in the other file so we can avoid importing sympy.codegen
# from the top-level 'import sympy'. Export them here as well.
from sympy.printing.codeprinter import cxxcode # noqa:F401
# from http://en.cppreference.com/w/cpp/keyword
reserved = {
'C++98': [
'and', 'and_eq', 'asm', 'auto', 'bitand', 'bitor', 'bool', 'break',
'case', 'catch,', 'char', 'class', 'compl', 'const', 'const_cast',
'continue', 'default', 'delete', 'do', 'double', 'dynamic_cast',
'else', 'enum', 'explicit', 'export', 'extern', 'false', 'float',
'for', 'friend', 'goto', 'if', 'inline', 'int', 'long', 'mutable',
'namespace', 'new', 'not', 'not_eq', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'return', 'short', 'signed', 'sizeof', 'static', 'static_cast',
'struct', 'switch', 'template', 'this', 'throw', 'true', 'try',
'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using',
'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq'
]
}
reserved['C++11'] = reserved['C++98'][:] + [
'alignas', 'alignof', 'char16_t', 'char32_t', 'constexpr', 'decltype',
'noexcept', 'nullptr', 'static_assert', 'thread_local'
]
reserved['C++17'] = reserved['C++11'][:]
reserved['C++17'].remove('register')
# TM TS: atomic_cancel, atomic_commit, atomic_noexcept, synchronized
# concepts TS: concept, requires
# module TS: import, module
_math_functions = {
'C++98': {
'Mod': 'fmod',
'ceiling': 'ceil',
},
'C++11': {
'gamma': 'tgamma',
},
'C++17': {
'beta': 'beta',
'Ei': 'expint',
'zeta': 'riemann_zeta',
}
}
# from http://en.cppreference.com/w/cpp/header/cmath
for k in ('Abs', 'exp', 'log', 'log10', 'sqrt', 'sin', 'cos', 'tan', # 'Pow'
'asin', 'acos', 'atan', 'atan2', 'sinh', 'cosh', 'tanh', 'floor'):
_math_functions['C++98'][k] = k.lower()
for k in ('asinh', 'acosh', 'atanh', 'erf', 'erfc'):
_math_functions['C++11'][k] = k.lower()
def _attach_print_method(cls, sympy_name, func_name):
meth_name = '_print_%s' % sympy_name
if hasattr(cls, meth_name):
raise ValueError("Edit method (or subclass) instead of overwriting.")
def _print_method(self, expr):
return '{}{}({})'.format(self._ns, func_name, ', '.join(map(self._print, expr.args)))
_print_method.__doc__ = "Prints code for %s" % k
setattr(cls, meth_name, _print_method)
def _attach_print_methods(cls, cont):
for sympy_name, cxx_name in cont[cls.standard].items():
_attach_print_method(cls, sympy_name, cxx_name)
class _CXXCodePrinterBase:
printmethod = "_cxxcode"
language = 'C++'
_ns = 'std::' # namespace
def __init__(self, settings=None):
super().__init__(settings or {})
def _print_Max(self, expr):
from sympy import Max
if len(expr.args) == 1:
return self._print(expr.args[0])
return "%smax(%s, %s)" % (self._ns, expr.args[0], self._print(Max(*expr.args[1:])))
def _print_Min(self, expr):
from sympy import Min
if len(expr.args) == 1:
return self._print(expr.args[0])
return "%smin(%s, %s)" % (self._ns, expr.args[0], self._print(Min(*expr.args[1:])))
def _print_using(self, expr):
if expr.alias == none:
return 'using %s' % expr.type
else:
raise ValueError("C++98 does not support type aliases")
class CXX98CodePrinter(_CXXCodePrinterBase, C89CodePrinter):
standard = 'C++98'
reserved_words = set(reserved['C++98'])
# _attach_print_methods(CXX98CodePrinter, _math_functions)
class CXX11CodePrinter(_CXXCodePrinterBase, C99CodePrinter):
standard = 'C++11'
reserved_words = set(reserved['C++11'])
type_mappings = dict(chain(
CXX98CodePrinter.type_mappings.items(),
{
Type('int8'): ('int8_t', {'cstdint'}),
Type('int16'): ('int16_t', {'cstdint'}),
Type('int32'): ('int32_t', {'cstdint'}),
Type('int64'): ('int64_t', {'cstdint'}),
Type('uint8'): ('uint8_t', {'cstdint'}),
Type('uint16'): ('uint16_t', {'cstdint'}),
Type('uint32'): ('uint32_t', {'cstdint'}),
Type('uint64'): ('uint64_t', {'cstdint'}),
Type('complex64'): ('std::complex<float>', {'complex'}),
Type('complex128'): ('std::complex<double>', {'complex'}),
Type('bool'): ('bool', None),
}.items()
))
def _print_using(self, expr):
if expr.alias == none:
return super()._print_using(expr)
else:
return 'using %(alias)s = %(type)s' % expr.kwargs(apply=self._print)
# _attach_print_methods(CXX11CodePrinter, _math_functions)
class CXX17CodePrinter(_CXXCodePrinterBase, C99CodePrinter):
standard = 'C++17'
reserved_words = set(reserved['C++17'])
_kf = dict(C99CodePrinter._kf, **_math_functions['C++17'])
def _print_beta(self, expr):
return self._print_math_func(expr)
def _print_Ei(self, expr):
return self._print_math_func(expr)
def _print_zeta(self, expr):
return self._print_math_func(expr)
# _attach_print_methods(CXX17CodePrinter, _math_functions)
cxx_code_printers = {
'c++98': CXX98CodePrinter,
'c++11': CXX11CodePrinter,
'c++17': CXX17CodePrinter
}
|
87f685c022e66623460379d2592826df4c8b0fd146e255692247df5bc1dc0a6b | '''
Use llvmlite to create executable functions from Sympy expressions
This module requires llvmlite (https://github.com/numba/llvmlite).
'''
import ctypes
from sympy.external import import_module
from sympy.printing.printer import Printer
from sympy import S, IndexedBase
from sympy.utilities.decorator import doctest_depends_on
llvmlite = import_module('llvmlite')
if llvmlite:
ll = import_module('llvmlite.ir').ir
llvm = import_module('llvmlite.binding').binding
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
__doctest_requires__ = {('llvm_callable'): ['llvmlite']}
class LLVMJitPrinter(Printer):
'''Convert expressions to LLVM IR'''
def __init__(self, module, builder, fn, *args, **kwargs):
self.func_arg_map = kwargs.pop("func_arg_map", {})
if not llvmlite:
raise ImportError("llvmlite is required for LLVMJITPrinter")
super().__init__(*args, **kwargs)
self.fp_type = ll.DoubleType()
self.module = module
self.builder = builder
self.fn = fn
self.ext_fn = {} # keep track of wrappers to external functions
self.tmp_var = {}
def _add_tmp_var(self, name, value):
self.tmp_var[name] = value
def _print_Number(self, n):
return ll.Constant(self.fp_type, float(n))
def _print_Integer(self, expr):
return ll.Constant(self.fp_type, float(expr.p))
def _print_Symbol(self, s):
val = self.tmp_var.get(s)
if not val:
# look up parameter with name s
val = self.func_arg_map.get(s)
if not val:
raise LookupError("Symbol not found: %s" % s)
return val
def _print_Pow(self, expr):
base0 = self._print(expr.base)
if expr.exp == S.NegativeOne:
return self.builder.fdiv(ll.Constant(self.fp_type, 1.0), base0)
if expr.exp == S.Half:
fn = self.ext_fn.get("sqrt")
if not fn:
fn_type = ll.FunctionType(self.fp_type, [self.fp_type])
fn = ll.Function(self.module, fn_type, "sqrt")
self.ext_fn["sqrt"] = fn
return self.builder.call(fn, [base0], "sqrt")
if expr.exp == 2:
return self.builder.fmul(base0, base0)
exp0 = self._print(expr.exp)
fn = self.ext_fn.get("pow")
if not fn:
fn_type = ll.FunctionType(self.fp_type, [self.fp_type, self.fp_type])
fn = ll.Function(self.module, fn_type, "pow")
self.ext_fn["pow"] = fn
return self.builder.call(fn, [base0, exp0], "pow")
def _print_Mul(self, expr):
nodes = [self._print(a) for a in expr.args]
e = nodes[0]
for node in nodes[1:]:
e = self.builder.fmul(e, node)
return e
def _print_Add(self, expr):
nodes = [self._print(a) for a in expr.args]
e = nodes[0]
for node in nodes[1:]:
e = self.builder.fadd(e, node)
return e
# TODO - assumes all called functions take one double precision argument.
# Should have a list of math library functions to validate this.
def _print_Function(self, expr):
name = expr.func.__name__
e0 = self._print(expr.args[0])
fn = self.ext_fn.get(name)
if not fn:
fn_type = ll.FunctionType(self.fp_type, [self.fp_type])
fn = ll.Function(self.module, fn_type, name)
self.ext_fn[name] = fn
return self.builder.call(fn, [e0], name)
def emptyPrinter(self, expr):
raise TypeError("Unsupported type for LLVM JIT conversion: %s"
% type(expr))
# Used when parameters are passed by array. Often used in callbacks to
# handle a variable number of parameters.
class LLVMJitCallbackPrinter(LLVMJitPrinter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _print_Indexed(self, expr):
array, idx = self.func_arg_map[expr.base]
offset = int(expr.indices[0].evalf())
array_ptr = self.builder.gep(array, [ll.Constant(ll.IntType(32), offset)])
fp_array_ptr = self.builder.bitcast(array_ptr, ll.PointerType(self.fp_type))
value = self.builder.load(fp_array_ptr)
return value
def _print_Symbol(self, s):
val = self.tmp_var.get(s)
if val:
return val
array, idx = self.func_arg_map.get(s, [None, 0])
if not array:
raise LookupError("Symbol not found: %s" % s)
array_ptr = self.builder.gep(array, [ll.Constant(ll.IntType(32), idx)])
fp_array_ptr = self.builder.bitcast(array_ptr,
ll.PointerType(self.fp_type))
value = self.builder.load(fp_array_ptr)
return value
# ensure lifetime of the execution engine persists (else call to compiled
# function will seg fault)
exe_engines = []
# ensure names for generated functions are unique
link_names = set()
current_link_suffix = 0
class LLVMJitCode:
def __init__(self, signature):
self.signature = signature
self.fp_type = ll.DoubleType()
self.module = ll.Module('mod1')
self.fn = None
self.llvm_arg_types = []
self.llvm_ret_type = self.fp_type
self.param_dict = {} # map symbol name to LLVM function argument
self.link_name = ''
def _from_ctype(self, ctype):
if ctype == ctypes.c_int:
return ll.IntType(32)
if ctype == ctypes.c_double:
return self.fp_type
if ctype == ctypes.POINTER(ctypes.c_double):
return ll.PointerType(self.fp_type)
if ctype == ctypes.c_void_p:
return ll.PointerType(ll.IntType(32))
if ctype == ctypes.py_object:
return ll.PointerType(ll.IntType(32))
print("Unhandled ctype = %s" % str(ctype))
def _create_args(self, func_args):
"""Create types for function arguments"""
self.llvm_ret_type = self._from_ctype(self.signature.ret_type)
self.llvm_arg_types = \
[self._from_ctype(a) for a in self.signature.arg_ctypes]
def _create_function_base(self):
"""Create function with name and type signature"""
global link_names, current_link_suffix
default_link_name = 'jit_func'
current_link_suffix += 1
self.link_name = default_link_name + str(current_link_suffix)
link_names.add(self.link_name)
fn_type = ll.FunctionType(self.llvm_ret_type, self.llvm_arg_types)
self.fn = ll.Function(self.module, fn_type, name=self.link_name)
def _create_param_dict(self, func_args):
"""Mapping of symbolic values to function arguments"""
for i, a in enumerate(func_args):
self.fn.args[i].name = str(a)
self.param_dict[a] = self.fn.args[i]
def _create_function(self, expr):
"""Create function body and return LLVM IR"""
bb_entry = self.fn.append_basic_block('entry')
builder = ll.IRBuilder(bb_entry)
lj = LLVMJitPrinter(self.module, builder, self.fn,
func_arg_map=self.param_dict)
ret = self._convert_expr(lj, expr)
lj.builder.ret(self._wrap_return(lj, ret))
strmod = str(self.module)
return strmod
def _wrap_return(self, lj, vals):
# Return a single double if there is one return value,
# else return a tuple of doubles.
# Don't wrap return value in this case
if self.signature.ret_type == ctypes.c_double:
return vals[0]
# Use this instead of a real PyObject*
void_ptr = ll.PointerType(ll.IntType(32))
# Create a wrapped double: PyObject* PyFloat_FromDouble(double v)
wrap_type = ll.FunctionType(void_ptr, [self.fp_type])
wrap_fn = ll.Function(lj.module, wrap_type, "PyFloat_FromDouble")
wrapped_vals = [lj.builder.call(wrap_fn, [v]) for v in vals]
if len(vals) == 1:
final_val = wrapped_vals[0]
else:
# Create a tuple: PyObject* PyTuple_Pack(Py_ssize_t n, ...)
# This should be Py_ssize_t
tuple_arg_types = [ll.IntType(32)]
tuple_arg_types.extend([void_ptr]*len(vals))
tuple_type = ll.FunctionType(void_ptr, tuple_arg_types)
tuple_fn = ll.Function(lj.module, tuple_type, "PyTuple_Pack")
tuple_args = [ll.Constant(ll.IntType(32), len(wrapped_vals))]
tuple_args.extend(wrapped_vals)
final_val = lj.builder.call(tuple_fn, tuple_args)
return final_val
def _convert_expr(self, lj, expr):
try:
# Match CSE return data structure.
if len(expr) == 2:
tmp_exprs = expr[0]
final_exprs = expr[1]
if len(final_exprs) != 1 and self.signature.ret_type == ctypes.c_double:
raise NotImplementedError("Return of multiple expressions not supported for this callback")
for name, e in tmp_exprs:
val = lj._print(e)
lj._add_tmp_var(name, val)
except TypeError:
final_exprs = [expr]
vals = [lj._print(e) for e in final_exprs]
return vals
def _compile_function(self, strmod):
global exe_engines
llmod = llvm.parse_assembly(strmod)
pmb = llvm.create_pass_manager_builder()
pmb.opt_level = 2
pass_manager = llvm.create_module_pass_manager()
pmb.populate(pass_manager)
pass_manager.run(llmod)
target_machine = \
llvm.Target.from_default_triple().create_target_machine()
exe_eng = llvm.create_mcjit_compiler(llmod, target_machine)
exe_eng.finalize_object()
exe_engines.append(exe_eng)
if False:
print("Assembly")
print(target_machine.emit_assembly(llmod))
fptr = exe_eng.get_function_address(self.link_name)
return fptr
class LLVMJitCodeCallback(LLVMJitCode):
def __init__(self, signature):
super().__init__(signature)
def _create_param_dict(self, func_args):
for i, a in enumerate(func_args):
if isinstance(a, IndexedBase):
self.param_dict[a] = (self.fn.args[i], i)
self.fn.args[i].name = str(a)
else:
self.param_dict[a] = (self.fn.args[self.signature.input_arg],
i)
def _create_function(self, expr):
"""Create function body and return LLVM IR"""
bb_entry = self.fn.append_basic_block('entry')
builder = ll.IRBuilder(bb_entry)
lj = LLVMJitCallbackPrinter(self.module, builder, self.fn,
func_arg_map=self.param_dict)
ret = self._convert_expr(lj, expr)
if self.signature.ret_arg:
output_fp_ptr = builder.bitcast(self.fn.args[self.signature.ret_arg],
ll.PointerType(self.fp_type))
for i, val in enumerate(ret):
index = ll.Constant(ll.IntType(32), i)
output_array_ptr = builder.gep(output_fp_ptr, [index])
builder.store(val, output_array_ptr)
builder.ret(ll.Constant(ll.IntType(32), 0)) # return success
else:
lj.builder.ret(self._wrap_return(lj, ret))
strmod = str(self.module)
return strmod
class CodeSignature:
def __init__(self, ret_type):
self.ret_type = ret_type
self.arg_ctypes = []
# Input argument array element index
self.input_arg = 0
# For the case output value is referenced through a parameter rather
# than the return value
self.ret_arg = None
def _llvm_jit_code(args, expr, signature, callback_type):
"""Create a native code function from a Sympy expression"""
if callback_type is None:
jit = LLVMJitCode(signature)
else:
jit = LLVMJitCodeCallback(signature)
jit._create_args(args)
jit._create_function_base()
jit._create_param_dict(args)
strmod = jit._create_function(expr)
if False:
print("LLVM IR")
print(strmod)
fptr = jit._compile_function(strmod)
return fptr
@doctest_depends_on(modules=('llvmlite', 'scipy'))
def llvm_callable(args, expr, callback_type=None):
'''Compile function from a Sympy expression
Expressions are evaluated using double precision arithmetic.
Some single argument math functions (exp, sin, cos, etc.) are supported
in expressions.
Parameters
==========
args : List of Symbol
Arguments to the generated function. Usually the free symbols in
the expression. Currently each one is assumed to convert to
a double precision scalar.
expr : Expr, or (Replacements, Expr) as returned from 'cse'
Expression to compile.
callback_type : string
Create function with signature appropriate to use as a callback.
Currently supported:
'scipy.integrate'
'scipy.integrate.test'
'cubature'
Returns
=======
Compiled function that can evaluate the expression.
Examples
========
>>> import sympy.printing.llvmjitcode as jit
>>> from sympy.abc import a
>>> e = a*a + a + 1
>>> e1 = jit.llvm_callable([a], e)
>>> e.subs(a, 1.1) # Evaluate via substitution
3.31000000000000
>>> e1(1.1) # Evaluate using JIT-compiled code
3.3100000000000005
Callbacks for integration functions can be JIT compiled.
>>> import sympy.printing.llvmjitcode as jit
>>> from sympy.abc import a
>>> from sympy import integrate
>>> from scipy.integrate import quad
>>> e = a*a
>>> e1 = jit.llvm_callable([a], e, callback_type='scipy.integrate')
>>> integrate(e, (a, 0.0, 2.0))
2.66666666666667
>>> quad(e1, 0.0, 2.0)[0]
2.66666666666667
The 'cubature' callback is for the Python wrapper around the
cubature package ( https://github.com/saullocastro/cubature )
and ( http://ab-initio.mit.edu/wiki/index.php/Cubature )
There are two signatures for the SciPy integration callbacks.
The first ('scipy.integrate') is the function to be passed to the
integration routine, and will pass the signature checks.
The second ('scipy.integrate.test') is only useful for directly calling
the function using ctypes variables. It will not pass the signature checks
for scipy.integrate.
The return value from the cse module can also be compiled. This
can improve the performance of the compiled function. If multiple
expressions are given to cse, the compiled function returns a tuple.
The 'cubature' callback handles multiple expressions (set `fdim`
to match in the integration call.)
>>> import sympy.printing.llvmjitcode as jit
>>> from sympy import cse
>>> from sympy.abc import x,y
>>> e1 = x*x + y*y
>>> e2 = 4*(x*x + y*y) + 8.0
>>> after_cse = cse([e1,e2])
>>> after_cse
([(x0, x**2), (x1, y**2)], [x0 + x1, 4*x0 + 4*x1 + 8.0])
>>> j1 = jit.llvm_callable([x,y], after_cse)
>>> j1(1.0, 2.0)
(5.0, 28.0)
'''
if not llvmlite:
raise ImportError("llvmlite is required for llvmjitcode")
signature = CodeSignature(ctypes.py_object)
arg_ctypes = []
if callback_type is None:
for _ in args:
arg_ctype = ctypes.c_double
arg_ctypes.append(arg_ctype)
elif callback_type == 'scipy.integrate' or callback_type == 'scipy.integrate.test':
signature.ret_type = ctypes.c_double
arg_ctypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_double)]
arg_ctypes_formal = [ctypes.c_int, ctypes.c_double]
signature.input_arg = 1
elif callback_type == 'cubature':
arg_ctypes = [ctypes.c_int,
ctypes.POINTER(ctypes.c_double),
ctypes.c_void_p,
ctypes.c_int,
ctypes.POINTER(ctypes.c_double)
]
signature.ret_type = ctypes.c_int
signature.input_arg = 1
signature.ret_arg = 4
else:
raise ValueError("Unknown callback type: %s" % callback_type)
signature.arg_ctypes = arg_ctypes
fptr = _llvm_jit_code(args, expr, signature, callback_type)
if callback_type and callback_type == 'scipy.integrate':
arg_ctypes = arg_ctypes_formal
cfunc = ctypes.CFUNCTYPE(signature.ret_type, *arg_ctypes)(fptr)
return cfunc
|
92660d821a3ff9220ca9bd043b0e90ca4630395cbf8e3760bab2bf272d3e71fd | """
A few practical conventions common to all printers.
"""
import re
from sympy.core.compatibility import Iterable
from sympy import Derivative
_name_with_digits_p = re.compile(r'^([a-zA-Z]+)([0-9]+)$')
def split_super_sub(text):
"""Split a symbol name into a name, superscripts and subscripts
The first part of the symbol name is considered to be its actual
'name', followed by super- and subscripts. Each superscript is
preceded with a "^" character or by "__". Each subscript is preceded
by a "_" character. The three return values are the actual name, a
list with superscripts and a list with subscripts.
Examples
========
>>> from sympy.printing.conventions import split_super_sub
>>> split_super_sub('a_x^1')
('a', ['1'], ['x'])
>>> split_super_sub('var_sub1__sup_sub2')
('var', ['sup'], ['sub1', 'sub2'])
"""
if not text:
return text, [], []
pos = 0
name = None
supers = []
subs = []
while pos < len(text):
start = pos + 1
if text[pos:pos + 2] == "__":
start += 1
pos_hat = text.find("^", start)
if pos_hat < 0:
pos_hat = len(text)
pos_usc = text.find("_", start)
if pos_usc < 0:
pos_usc = len(text)
pos_next = min(pos_hat, pos_usc)
part = text[pos:pos_next]
pos = pos_next
if name is None:
name = part
elif part.startswith("^"):
supers.append(part[1:])
elif part.startswith("__"):
supers.append(part[2:])
elif part.startswith("_"):
subs.append(part[1:])
else:
raise RuntimeError("This should never happen.")
# make a little exception when a name ends with digits, i.e. treat them
# as a subscript too.
m = _name_with_digits_p.match(name)
if m:
name, sub = m.groups()
subs.insert(0, sub)
return name, supers, subs
def requires_partial(expr):
"""Return whether a partial derivative symbol is required for printing
This requires checking how many free variables there are,
filtering out the ones that are integers. Some expressions don't have
free variables. In that case, check its variable list explicitly to
get the context of the expression.
"""
if isinstance(expr, Derivative):
return requires_partial(expr.expr)
if not isinstance(expr.free_symbols, Iterable):
return len(set(expr.variables)) > 1
return sum(not s.is_integer for s in expr.free_symbols) > 1
|
8e7eb188f89152bd63e88a65e06d76d41e26b0b1e4e9df569746680fc178dbb9 | """
A Printer which converts an expression into its LaTeX equivalent.
"""
from typing import Any, Dict
import itertools
from sympy.core import Add, Float, Mod, Mul, Number, S, Symbol
from sympy.core.alphabets import greeks
from sympy.core.containers import Tuple
from sympy.core.function import _coeff_isneg, AppliedUndef, Derivative
from sympy.core.operations import AssocOp
from sympy.core.sympify import SympifyError
from sympy.logic.boolalg import true
# sympy.printing imports
from sympy.printing.precedence import precedence_traditional
from sympy.printing.printer import Printer, print_function
from sympy.printing.conventions import split_super_sub, requires_partial
from sympy.printing.precedence import precedence, PRECEDENCE
import mpmath.libmp as mlib
from mpmath.libmp import prec_to_dps
from sympy.core.compatibility import default_sort_key
from sympy.utilities.iterables import has_variety
import re
# Hand-picked functions which can be used directly in both LaTeX and MathJax
# Complete list at
# https://docs.mathjax.org/en/latest/tex.html#supported-latex-commands
# This variable only contains those functions which sympy uses.
accepted_latex_functions = ['arcsin', 'arccos', 'arctan', 'sin', 'cos', 'tan',
'sinh', 'cosh', 'tanh', 'sqrt', 'ln', 'log', 'sec',
'csc', 'cot', 'coth', 're', 'im', 'frac', 'root',
'arg',
]
tex_greek_dictionary = {
'Alpha': 'A',
'Beta': 'B',
'Gamma': r'\Gamma',
'Delta': r'\Delta',
'Epsilon': 'E',
'Zeta': 'Z',
'Eta': 'H',
'Theta': r'\Theta',
'Iota': 'I',
'Kappa': 'K',
'Lambda': r'\Lambda',
'Mu': 'M',
'Nu': 'N',
'Xi': r'\Xi',
'omicron': 'o',
'Omicron': 'O',
'Pi': r'\Pi',
'Rho': 'P',
'Sigma': r'\Sigma',
'Tau': 'T',
'Upsilon': r'\Upsilon',
'Phi': r'\Phi',
'Chi': 'X',
'Psi': r'\Psi',
'Omega': r'\Omega',
'lamda': r'\lambda',
'Lamda': r'\Lambda',
'khi': r'\chi',
'Khi': r'X',
'varepsilon': r'\varepsilon',
'varkappa': r'\varkappa',
'varphi': r'\varphi',
'varpi': r'\varpi',
'varrho': r'\varrho',
'varsigma': r'\varsigma',
'vartheta': r'\vartheta',
}
other_symbols = {'aleph', 'beth', 'daleth', 'gimel', 'ell', 'eth', 'hbar',
'hslash', 'mho', 'wp'}
# Variable name modifiers
modifier_dict = {
# Accents
'mathring': lambda s: r'\mathring{'+s+r'}',
'ddddot': lambda s: r'\ddddot{'+s+r'}',
'dddot': lambda s: r'\dddot{'+s+r'}',
'ddot': lambda s: r'\ddot{'+s+r'}',
'dot': lambda s: r'\dot{'+s+r'}',
'check': lambda s: r'\check{'+s+r'}',
'breve': lambda s: r'\breve{'+s+r'}',
'acute': lambda s: r'\acute{'+s+r'}',
'grave': lambda s: r'\grave{'+s+r'}',
'tilde': lambda s: r'\tilde{'+s+r'}',
'hat': lambda s: r'\hat{'+s+r'}',
'bar': lambda s: r'\bar{'+s+r'}',
'vec': lambda s: r'\vec{'+s+r'}',
'prime': lambda s: "{"+s+"}'",
'prm': lambda s: "{"+s+"}'",
# Faces
'bold': lambda s: r'\boldsymbol{'+s+r'}',
'bm': lambda s: r'\boldsymbol{'+s+r'}',
'cal': lambda s: r'\mathcal{'+s+r'}',
'scr': lambda s: r'\mathscr{'+s+r'}',
'frak': lambda s: r'\mathfrak{'+s+r'}',
# Brackets
'norm': lambda s: r'\left\|{'+s+r'}\right\|',
'avg': lambda s: r'\left\langle{'+s+r'}\right\rangle',
'abs': lambda s: r'\left|{'+s+r'}\right|',
'mag': lambda s: r'\left|{'+s+r'}\right|',
}
greek_letters_set = frozenset(greeks)
_between_two_numbers_p = (
re.compile(r'[0-9][} ]*$'), # search
re.compile(r'[{ ]*[-+0-9]'), # match
)
def latex_escape(s):
"""
Escape a string such that latex interprets it as plaintext.
We can't use verbatim easily with mathjax, so escaping is easier.
Rules from https://tex.stackexchange.com/a/34586/41112.
"""
s = s.replace('\\', r'\textbackslash')
for c in '&%$#_{}':
s = s.replace(c, '\\' + c)
s = s.replace('~', r'\textasciitilde')
s = s.replace('^', r'\textasciicircum')
return s
class LatexPrinter(Printer):
printmethod = "_latex"
_default_settings = {
"full_prec": False,
"fold_frac_powers": False,
"fold_func_brackets": False,
"fold_short_frac": None,
"inv_trig_style": "abbreviated",
"itex": False,
"ln_notation": False,
"long_frac_ratio": None,
"mat_delim": "[",
"mat_str": None,
"mode": "plain",
"mul_symbol": None,
"order": None,
"symbol_names": {},
"root_notation": True,
"mat_symbol_style": "plain",
"imaginary_unit": "i",
"gothic_re_im": False,
"decimal_separator": "period",
"perm_cyclic": True,
"parenthesize_super": True,
"min": None,
"max": None,
} # type: Dict[str, Any]
def __init__(self, settings=None):
Printer.__init__(self, settings)
if 'mode' in self._settings:
valid_modes = ['inline', 'plain', 'equation',
'equation*']
if self._settings['mode'] not in valid_modes:
raise ValueError("'mode' must be one of 'inline', 'plain', "
"'equation' or 'equation*'")
if self._settings['fold_short_frac'] is None and \
self._settings['mode'] == 'inline':
self._settings['fold_short_frac'] = True
mul_symbol_table = {
None: r" ",
"ldot": r" \,.\, ",
"dot": r" \cdot ",
"times": r" \times "
}
try:
self._settings['mul_symbol_latex'] = \
mul_symbol_table[self._settings['mul_symbol']]
except KeyError:
self._settings['mul_symbol_latex'] = \
self._settings['mul_symbol']
try:
self._settings['mul_symbol_latex_numbers'] = \
mul_symbol_table[self._settings['mul_symbol'] or 'dot']
except KeyError:
if (self._settings['mul_symbol'].strip() in
['', ' ', '\\', '\\,', '\\:', '\\;', '\\quad']):
self._settings['mul_symbol_latex_numbers'] = \
mul_symbol_table['dot']
else:
self._settings['mul_symbol_latex_numbers'] = \
self._settings['mul_symbol']
self._delim_dict = {'(': ')', '[': ']'}
imaginary_unit_table = {
None: r"i",
"i": r"i",
"ri": r"\mathrm{i}",
"ti": r"\text{i}",
"j": r"j",
"rj": r"\mathrm{j}",
"tj": r"\text{j}",
}
try:
self._settings['imaginary_unit_latex'] = \
imaginary_unit_table[self._settings['imaginary_unit']]
except KeyError:
self._settings['imaginary_unit_latex'] = \
self._settings['imaginary_unit']
def _add_parens(self, s):
return r"\left({}\right)".format(s)
# TODO: merge this with the above, which requires a lot of test changes
def _add_parens_lspace(self, s):
return r"\left( {}\right)".format(s)
def parenthesize(self, item, level, is_neg=False, strict=False):
prec_val = precedence_traditional(item)
if is_neg and strict:
return self._add_parens(self._print(item))
if (prec_val < level) or ((not strict) and prec_val <= level):
return self._add_parens(self._print(item))
else:
return self._print(item)
def parenthesize_super(self, s):
"""
Protect superscripts in s
If the parenthesize_super option is set, protect with parentheses, else
wrap in braces.
"""
if "^" in s:
if self._settings['parenthesize_super']:
return self._add_parens(s)
else:
return "{{{}}}".format(s)
return s
def doprint(self, expr):
tex = Printer.doprint(self, expr)
if self._settings['mode'] == 'plain':
return tex
elif self._settings['mode'] == 'inline':
return r"$%s$" % tex
elif self._settings['itex']:
return r"$$%s$$" % tex
else:
env_str = self._settings['mode']
return r"\begin{%s}%s\end{%s}" % (env_str, tex, env_str)
def _needs_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed, False otherwise. For example: a + b => True; a => False;
10 => False; -10 => True.
"""
return not ((expr.is_Integer and expr.is_nonnegative)
or (expr.is_Atom and (expr is not S.NegativeOne
and expr.is_Rational is False)))
def _needs_function_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
passed as an argument to a function, False otherwise. This is a more
liberal version of _needs_brackets, in that many expressions which need
to be wrapped in brackets when added/subtracted/raised to a power do
not need them when passed to a function. Such an example is a*b.
"""
if not self._needs_brackets(expr):
return False
else:
# Muls of the form a*b*c... can be folded
if expr.is_Mul and not self._mul_is_clean(expr):
return True
# Pows which don't need brackets can be folded
elif expr.is_Pow and not self._pow_is_clean(expr):
return True
# Add and Function always need brackets
elif expr.is_Add or expr.is_Function:
return True
else:
return False
def _needs_mul_brackets(self, expr, first=False, last=False):
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of a Mul, False otherwise. This is True for Add,
but also for some container objects that would not need brackets
when appearing last in a Mul, e.g. an Integral. ``last=True``
specifies that this expr is the last to appear in a Mul.
``first=True`` specifies that this expr is the first to appear in
a Mul.
"""
from sympy import Integral, Product, Sum
if expr.is_Mul:
if not first and _coeff_isneg(expr):
return True
elif precedence_traditional(expr) < PRECEDENCE["Mul"]:
return True
elif expr.is_Relational:
return True
if expr.is_Piecewise:
return True
if any([expr.has(x) for x in (Mod,)]):
return True
if (not last and
any([expr.has(x) for x in (Integral, Product, Sum)])):
return True
return False
def _needs_add_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of an Add, False otherwise. This is False for most
things.
"""
if expr.is_Relational:
return True
if any([expr.has(x) for x in (Mod,)]):
return True
if expr.is_Add:
return True
return False
def _mul_is_clean(self, expr):
for arg in expr.args:
if arg.is_Function:
return False
return True
def _pow_is_clean(self, expr):
return not self._needs_brackets(expr.base)
def _do_exponent(self, expr, exp):
if exp is not None:
return r"\left(%s\right)^{%s}" % (expr, exp)
else:
return expr
def _print_Basic(self, expr):
ls = [self._print(o) for o in expr.args]
return self._deal_with_super_sub(expr.__class__.__name__) + \
r"\left(%s\right)" % ", ".join(ls)
def _print_bool(self, e):
return r"\text{%s}" % e
_print_BooleanTrue = _print_bool
_print_BooleanFalse = _print_bool
def _print_NoneType(self, e):
return r"\text{%s}" % e
def _print_Add(self, expr, order=None):
terms = self._as_ordered_terms(expr, order=order)
tex = ""
for i, term in enumerate(terms):
if i == 0:
pass
elif _coeff_isneg(term):
tex += " - "
term = -term
else:
tex += " + "
term_tex = self._print(term)
if self._needs_add_brackets(term):
term_tex = r"\left(%s\right)" % term_tex
tex += term_tex
return tex
def _print_Cycle(self, expr):
from sympy.combinatorics.permutations import Permutation
if expr.size == 0:
return r"\left( \right)"
expr = Permutation(expr)
expr_perm = expr.cyclic_form
siz = expr.size
if expr.array_form[-1] == siz - 1:
expr_perm = expr_perm + [[siz - 1]]
term_tex = ''
for i in expr_perm:
term_tex += str(i).replace(',', r"\;")
term_tex = term_tex.replace('[', r"\left( ")
term_tex = term_tex.replace(']', r"\right)")
return term_tex
def _print_Permutation(self, expr):
from sympy.combinatorics.permutations import Permutation
from sympy.utilities.exceptions import SymPyDeprecationWarning
perm_cyclic = Permutation.print_cyclic
if perm_cyclic is not None:
SymPyDeprecationWarning(
feature="Permutation.print_cyclic = {}".format(perm_cyclic),
useinstead="init_printing(perm_cyclic={})"
.format(perm_cyclic),
issue=15201,
deprecated_since_version="1.6").warn()
else:
perm_cyclic = self._settings.get("perm_cyclic", True)
if perm_cyclic:
return self._print_Cycle(expr)
if expr.size == 0:
return r"\left( \right)"
lower = [self._print(arg) for arg in expr.array_form]
upper = [self._print(arg) for arg in range(len(lower))]
row1 = " & ".join(upper)
row2 = " & ".join(lower)
mat = r" \\ ".join((row1, row2))
return r"\begin{pmatrix} %s \end{pmatrix}" % mat
def _print_AppliedPermutation(self, expr):
perm, var = expr.args
return r"\sigma_{%s}(%s)" % (self._print(perm), self._print(var))
def _print_Float(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
strip = False if self._settings['full_prec'] else True
low = self._settings["min"] if "min" in self._settings else None
high = self._settings["max"] if "max" in self._settings else None
str_real = mlib.to_str(expr._mpf_, dps, strip_zeros=strip, min_fixed=low, max_fixed=high)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
# thus we use the number separator
separator = self._settings['mul_symbol_latex_numbers']
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
if self._settings['decimal_separator'] == 'comma':
mant = mant.replace('.','{,}')
return r"%s%s10^{%s}" % (mant, separator, exp)
elif str_real == "+inf":
return r"\infty"
elif str_real == "-inf":
return r"- \infty"
else:
if self._settings['decimal_separator'] == 'comma':
str_real = str_real.replace('.','{,}')
return str_real
def _print_Cross(self, expr):
vec1 = expr._expr1
vec2 = expr._expr2
return r"%s \times %s" % (self.parenthesize(vec1, PRECEDENCE['Mul']),
self.parenthesize(vec2, PRECEDENCE['Mul']))
def _print_Curl(self, expr):
vec = expr._expr
return r"\nabla\times %s" % self.parenthesize(vec, PRECEDENCE['Mul'])
def _print_Divergence(self, expr):
vec = expr._expr
return r"\nabla\cdot %s" % self.parenthesize(vec, PRECEDENCE['Mul'])
def _print_Dot(self, expr):
vec1 = expr._expr1
vec2 = expr._expr2
return r"%s \cdot %s" % (self.parenthesize(vec1, PRECEDENCE['Mul']),
self.parenthesize(vec2, PRECEDENCE['Mul']))
def _print_Gradient(self, expr):
func = expr._expr
return r"\nabla %s" % self.parenthesize(func, PRECEDENCE['Mul'])
def _print_Laplacian(self, expr):
func = expr._expr
return r"\triangle %s" % self.parenthesize(func, PRECEDENCE['Mul'])
def _print_Mul(self, expr):
from sympy.core.power import Pow
from sympy.physics.units import Quantity
from sympy.simplify import fraction
separator = self._settings['mul_symbol_latex']
numbersep = self._settings['mul_symbol_latex_numbers']
def convert(expr):
if not expr.is_Mul:
return str(self._print(expr))
else:
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
args = list(expr.args)
# If quantities are present append them at the back
args = sorted(args, key=lambda x: isinstance(x, Quantity) or
(isinstance(x, Pow) and
isinstance(x.base, Quantity)))
return convert_args(args)
def convert_args(args):
_tex = last_term_tex = ""
for i, term in enumerate(args):
term_tex = self._print(term)
if self._needs_mul_brackets(term, first=(i == 0),
last=(i == len(args) - 1)):
term_tex = r"\left(%s\right)" % term_tex
if _between_two_numbers_p[0].search(last_term_tex) and \
_between_two_numbers_p[1].match(term_tex):
# between two numbers
_tex += numbersep
elif _tex:
_tex += separator
_tex += term_tex
last_term_tex = term_tex
return _tex
# Check for unevaluated Mul. In this case we need to make sure the
# identities are visible, multiple Rational factors are not combined
# etc so we display in a straight-forward form that fully preserves all
# args and their order.
# XXX: _print_Pow calls this routine with instances of Pow...
if isinstance(expr, Mul):
args = expr.args
if args[0] is S.One or any(isinstance(arg, Number) for arg in args[1:]):
return convert_args(args)
include_parens = False
if _coeff_isneg(expr):
expr = -expr
tex = "- "
if expr.is_Add:
tex += "("
include_parens = True
else:
tex = ""
numer, denom = fraction(expr, exact=True)
if denom is S.One and Pow(1, -1, evaluate=False) not in expr.args:
# use the original expression here, since fraction() may have
# altered it when producing numer and denom
tex += convert(expr)
else:
snumer = convert(numer)
sdenom = convert(denom)
ldenom = len(sdenom.split())
ratio = self._settings['long_frac_ratio']
if self._settings['fold_short_frac'] and ldenom <= 2 and \
"^" not in sdenom:
# handle short fractions
if self._needs_mul_brackets(numer, last=False):
tex += r"\left(%s\right) / %s" % (snumer, sdenom)
else:
tex += r"%s / %s" % (snumer, sdenom)
elif ratio is not None and \
len(snumer.split()) > ratio*ldenom:
# handle long fractions
if self._needs_mul_brackets(numer, last=True):
tex += r"\frac{1}{%s}%s\left(%s\right)" \
% (sdenom, separator, snumer)
elif numer.is_Mul:
# split a long numerator
a = S.One
b = S.One
for x in numer.args:
if self._needs_mul_brackets(x, last=False) or \
len(convert(a*x).split()) > ratio*ldenom or \
(b.is_commutative is x.is_commutative is False):
b *= x
else:
a *= x
if self._needs_mul_brackets(b, last=True):
tex += r"\frac{%s}{%s}%s\left(%s\right)" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{%s}{%s}%s%s" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{1}{%s}%s%s" % (sdenom, separator, snumer)
else:
tex += r"\frac{%s}{%s}" % (snumer, sdenom)
if include_parens:
tex += ")"
return tex
def _print_Pow(self, expr):
# Treat x**Rational(1,n) as special case
if expr.exp.is_Rational and abs(expr.exp.p) == 1 and expr.exp.q != 1 \
and self._settings['root_notation']:
base = self._print(expr.base)
expq = expr.exp.q
if expq == 2:
tex = r"\sqrt{%s}" % base
elif self._settings['itex']:
tex = r"\root{%d}{%s}" % (expq, base)
else:
tex = r"\sqrt[%d]{%s}" % (expq, base)
if expr.exp.is_negative:
return r"\frac{1}{%s}" % tex
else:
return tex
elif self._settings['fold_frac_powers'] \
and expr.exp.is_Rational \
and expr.exp.q != 1:
base = self.parenthesize(expr.base, PRECEDENCE['Pow'])
p, q = expr.exp.p, expr.exp.q
# issue #12886: add parentheses for superscripts raised to powers
if expr.base.is_Symbol:
base = self.parenthesize_super(base)
if expr.base.is_Function:
return self._print(expr.base, exp="%s/%s" % (p, q))
return r"%s^{%s/%s}" % (base, p, q)
elif expr.exp.is_Rational and expr.exp.is_negative and \
expr.base.is_commutative:
# special case for 1^(-x), issue 9216
if expr.base == 1:
return r"%s^{%s}" % (expr.base, expr.exp)
# things like 1/x
return self._print_Mul(expr)
else:
if expr.base.is_Function:
return self._print(expr.base, exp=self._print(expr.exp))
else:
tex = r"%s^{%s}"
return self._helper_print_standard_power(expr, tex)
def _helper_print_standard_power(self, expr, template):
exp = self._print(expr.exp)
# issue #12886: add parentheses around superscripts raised
# to powers
base = self.parenthesize(expr.base, PRECEDENCE['Pow'])
if expr.base.is_Symbol:
base = self.parenthesize_super(base)
elif (isinstance(expr.base, Derivative)
and base.startswith(r'\left(')
and re.match(r'\\left\(\\d?d?dot', base)
and base.endswith(r'\right)')):
# don't use parentheses around dotted derivative
base = base[6: -7] # remove outermost added parens
return template % (base, exp)
def _print_UnevaluatedExpr(self, expr):
return self._print(expr.args[0])
def _print_Sum(self, expr):
if len(expr.limits) == 1:
tex = r"\sum_{%s=%s}^{%s} " % \
tuple([self._print(i) for i in expr.limits[0]])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\sum_{\substack{%s}} " % \
str.join('\\\\', [_format_ineq(l) for l in expr.limits])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_Product(self, expr):
if len(expr.limits) == 1:
tex = r"\prod_{%s=%s}^{%s} " % \
tuple([self._print(i) for i in expr.limits[0]])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\prod_{\substack{%s}} " % \
str.join('\\\\', [_format_ineq(l) for l in expr.limits])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_BasisDependent(self, expr):
from sympy.vector import Vector
o1 = []
if expr == expr.zero:
return expr.zero._latex_form
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key=lambda x: x[0].__str__())
for k, v in inneritems:
if v == 1:
o1.append(' + ' + k._latex_form)
elif v == -1:
o1.append(' - ' + k._latex_form)
else:
arg_str = '(' + self._print(v) + ')'
o1.append(' + ' + arg_str + k._latex_form)
outstr = (''.join(o1))
if outstr[1] != '-':
outstr = outstr[3:]
else:
outstr = outstr[1:]
return outstr
def _print_Indexed(self, expr):
tex_base = self._print(expr.base)
tex = '{'+tex_base+'}'+'_{%s}' % ','.join(
map(self._print, expr.indices))
return tex
def _print_IndexedBase(self, expr):
return self._print(expr.label)
def _print_Derivative(self, expr):
if requires_partial(expr.expr):
diff_symbol = r'\partial'
else:
diff_symbol = r'd'
tex = ""
dim = 0
for x, num in reversed(expr.variable_count):
dim += num
if num == 1:
tex += r"%s %s" % (diff_symbol, self._print(x))
else:
tex += r"%s %s^{%s}" % (diff_symbol,
self.parenthesize_super(self._print(x)),
self._print(num))
if dim == 1:
tex = r"\frac{%s}{%s}" % (diff_symbol, tex)
else:
tex = r"\frac{%s^{%s}}{%s}" % (diff_symbol, self._print(dim), tex)
if any(_coeff_isneg(i) for i in expr.args):
return r"%s %s" % (tex, self.parenthesize(expr.expr,
PRECEDENCE["Mul"],
is_neg=True,
strict=True))
return r"%s %s" % (tex, self.parenthesize(expr.expr,
PRECEDENCE["Mul"],
is_neg=False,
strict=True))
def _print_Subs(self, subs):
expr, old, new = subs.args
latex_expr = self._print(expr)
latex_old = (self._print(e) for e in old)
latex_new = (self._print(e) for e in new)
latex_subs = r'\\ '.join(
e[0] + '=' + e[1] for e in zip(latex_old, latex_new))
return r'\left. %s \right|_{\substack{ %s }}' % (latex_expr,
latex_subs)
def _print_Integral(self, expr):
tex, symbols = "", []
# Only up to \iiiint exists
if len(expr.limits) <= 4 and all(len(lim) == 1 for lim in expr.limits):
# Use len(expr.limits)-1 so that syntax highlighters don't think
# \" is an escaped quote
tex = r"\i" + "i"*(len(expr.limits) - 1) + "nt"
symbols = [r"\, d%s" % self._print(symbol[0])
for symbol in expr.limits]
else:
for lim in reversed(expr.limits):
symbol = lim[0]
tex += r"\int"
if len(lim) > 1:
if self._settings['mode'] != 'inline' \
and not self._settings['itex']:
tex += r"\limits"
if len(lim) == 3:
tex += "_{%s}^{%s}" % (self._print(lim[1]),
self._print(lim[2]))
if len(lim) == 2:
tex += "^{%s}" % (self._print(lim[1]))
symbols.insert(0, r"\, d%s" % self._print(symbol))
return r"%s %s%s" % (tex, self.parenthesize(expr.function,
PRECEDENCE["Mul"],
is_neg=any(_coeff_isneg(i) for i in expr.args),
strict=True),
"".join(symbols))
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
tex = r"\lim_{%s \to " % self._print(z)
if str(dir) == '+-' or z0 in (S.Infinity, S.NegativeInfinity):
tex += r"%s}" % self._print(z0)
else:
tex += r"%s^%s}" % (self._print(z0), self._print(dir))
if isinstance(e, AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(e))
else:
return r"%s %s" % (tex, self._print(e))
def _hprint_Function(self, func):
r'''
Logic to decide how to render a function to latex
- if it is a recognized latex name, use the appropriate latex command
- if it is a single letter, just use that letter
- if it is a longer name, then put \operatorname{} around it and be
mindful of undercores in the name
'''
func = self._deal_with_super_sub(func)
if func in accepted_latex_functions:
name = r"\%s" % func
elif len(func) == 1 or func.startswith('\\'):
name = func
else:
name = r"\operatorname{%s}" % func
return name
def _print_Function(self, expr, exp=None):
r'''
Render functions to LaTeX, handling functions that LaTeX knows about
e.g., sin, cos, ... by using the proper LaTeX command (\sin, \cos, ...).
For single-letter function names, render them as regular LaTeX math
symbols. For multi-letter function names that LaTeX does not know
about, (e.g., Li, sech) use \operatorname{} so that the function name
is rendered in Roman font and LaTeX handles spacing properly.
expr is the expression involving the function
exp is an exponent
'''
func = expr.func.__name__
if hasattr(self, '_print_' + func) and \
not isinstance(expr, AppliedUndef):
return getattr(self, '_print_' + func)(expr, exp)
else:
args = [str(self._print(arg)) for arg in expr.args]
# How inverse trig functions should be displayed, formats are:
# abbreviated: asin, full: arcsin, power: sin^-1
inv_trig_style = self._settings['inv_trig_style']
# If we are dealing with a power-style inverse trig function
inv_trig_power_case = False
# If it is applicable to fold the argument brackets
can_fold_brackets = self._settings['fold_func_brackets'] and \
len(args) == 1 and \
not self._needs_function_brackets(expr.args[0])
inv_trig_table = [
"asin", "acos", "atan",
"acsc", "asec", "acot",
"asinh", "acosh", "atanh",
"acsch", "asech", "acoth",
]
# If the function is an inverse trig function, handle the style
if func in inv_trig_table:
if inv_trig_style == "abbreviated":
pass
elif inv_trig_style == "full":
func = "arc" + func[1:]
elif inv_trig_style == "power":
func = func[1:]
inv_trig_power_case = True
# Can never fold brackets if we're raised to a power
if exp is not None:
can_fold_brackets = False
if inv_trig_power_case:
if func in accepted_latex_functions:
name = r"\%s^{-1}" % func
else:
name = r"\operatorname{%s}^{-1}" % func
elif exp is not None:
func_tex = self._hprint_Function(func)
func_tex = self.parenthesize_super(func_tex)
name = r'%s^{%s}' % (func_tex, exp)
else:
name = self._hprint_Function(func)
if can_fold_brackets:
if func in accepted_latex_functions:
# Wrap argument safely to avoid parse-time conflicts
# with the function name itself
name += r" {%s}"
else:
name += r"%s"
else:
name += r"{\left(%s \right)}"
if inv_trig_power_case and exp is not None:
name += r"^{%s}" % exp
return name % ",".join(args)
def _print_UndefinedFunction(self, expr):
return self._hprint_Function(str(expr))
def _print_ElementwiseApplyFunction(self, expr):
return r"{%s}_{\circ}\left({%s}\right)" % (
self._print(expr.function),
self._print(expr.expr),
)
@property
def _special_function_classes(self):
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.functions.special.gamma_functions import gamma, lowergamma
from sympy.functions.special.beta_functions import beta
from sympy.functions.special.delta_functions import DiracDelta
from sympy.functions.special.error_functions import Chi
return {KroneckerDelta: r'\delta',
gamma: r'\Gamma',
lowergamma: r'\gamma',
beta: r'\operatorname{B}',
DiracDelta: r'\delta',
Chi: r'\operatorname{Chi}'}
def _print_FunctionClass(self, expr):
for cls in self._special_function_classes:
if issubclass(expr, cls) and expr.__name__ == cls.__name__:
return self._special_function_classes[cls]
return self._hprint_Function(str(expr))
def _print_Lambda(self, expr):
symbols, expr = expr.args
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(tuple(symbols))
tex = r"\left( %s \mapsto %s \right)" % (symbols, self._print(expr))
return tex
def _print_IdentityFunction(self, expr):
return r"\left( x \mapsto x \right)"
def _hprint_variadic_function(self, expr, exp=None):
args = sorted(expr.args, key=default_sort_key)
texargs = [r"%s" % self._print(symbol) for symbol in args]
tex = r"\%s\left(%s\right)" % (str(expr.func).lower(),
", ".join(texargs))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
_print_Min = _print_Max = _hprint_variadic_function
def _print_floor(self, expr, exp=None):
tex = r"\left\lfloor{%s}\right\rfloor" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_ceiling(self, expr, exp=None):
tex = r"\left\lceil{%s}\right\rceil" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_log(self, expr, exp=None):
if not self._settings["ln_notation"]:
tex = r"\log{\left(%s \right)}" % self._print(expr.args[0])
else:
tex = r"\ln{\left(%s \right)}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Abs(self, expr, exp=None):
tex = r"\left|{%s}\right|" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
_print_Determinant = _print_Abs
def _print_re(self, expr, exp=None):
if self._settings['gothic_re_im']:
tex = r"\Re{%s}" % self.parenthesize(expr.args[0], PRECEDENCE['Atom'])
else:
tex = r"\operatorname{{re}}{{{}}}".format(self.parenthesize(expr.args[0], PRECEDENCE['Atom']))
return self._do_exponent(tex, exp)
def _print_im(self, expr, exp=None):
if self._settings['gothic_re_im']:
tex = r"\Im{%s}" % self.parenthesize(expr.args[0], PRECEDENCE['Atom'])
else:
tex = r"\operatorname{{im}}{{{}}}".format(self.parenthesize(expr.args[0], PRECEDENCE['Atom']))
return self._do_exponent(tex, exp)
def _print_Not(self, e):
from sympy import Equivalent, Implies
if isinstance(e.args[0], Equivalent):
return self._print_Equivalent(e.args[0], r"\not\Leftrightarrow")
if isinstance(e.args[0], Implies):
return self._print_Implies(e.args[0], r"\not\Rightarrow")
if (e.args[0].is_Boolean):
return r"\neg \left(%s\right)" % self._print(e.args[0])
else:
return r"\neg %s" % self._print(e.args[0])
def _print_LogOp(self, args, char):
arg = args[0]
if arg.is_Boolean and not arg.is_Not:
tex = r"\left(%s\right)" % self._print(arg)
else:
tex = r"%s" % self._print(arg)
for arg in args[1:]:
if arg.is_Boolean and not arg.is_Not:
tex += r" %s \left(%s\right)" % (char, self._print(arg))
else:
tex += r" %s %s" % (char, self._print(arg))
return tex
def _print_And(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\wedge")
def _print_Or(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\vee")
def _print_Xor(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\veebar")
def _print_Implies(self, e, altchar=None):
return self._print_LogOp(e.args, altchar or r"\Rightarrow")
def _print_Equivalent(self, e, altchar=None):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, altchar or r"\Leftrightarrow")
def _print_conjugate(self, expr, exp=None):
tex = r"\overline{%s}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_polar_lift(self, expr, exp=None):
func = r"\operatorname{polar\_lift}"
arg = r"{\left(%s \right)}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (func, exp, arg)
else:
return r"%s%s" % (func, arg)
def _print_ExpBase(self, expr, exp=None):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
tex = r"e^{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_elliptic_k(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"K^{%s}%s" % (exp, tex)
else:
return r"K%s" % tex
def _print_elliptic_f(self, expr, exp=None):
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"F^{%s}%s" % (exp, tex)
else:
return r"F%s" % tex
def _print_elliptic_e(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"E^{%s}%s" % (exp, tex)
else:
return r"E%s" % tex
def _print_elliptic_pi(self, expr, exp=None):
if len(expr.args) == 3:
tex = r"\left(%s; %s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]),
self._print(expr.args[2]))
else:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"\Pi^{%s}%s" % (exp, tex)
else:
return r"\Pi%s" % tex
def _print_beta(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\operatorname{B}^{%s}%s" % (exp, tex)
else:
return r"\operatorname{B}%s" % tex
def _print_uppergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_lowergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\gamma^{%s}%s" % (exp, tex)
else:
return r"\gamma%s" % tex
def _hprint_one_arg_func(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (self._print(expr.func), exp, tex)
else:
return r"%s%s" % (self._print(expr.func), tex)
_print_gamma = _hprint_one_arg_func
def _print_Chi(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\operatorname{Chi}^{%s}%s" % (exp, tex)
else:
return r"\operatorname{Chi}%s" % tex
def _print_expint(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[1])
nu = self._print(expr.args[0])
if exp is not None:
return r"\operatorname{E}_{%s}^{%s}%s" % (nu, exp, tex)
else:
return r"\operatorname{E}_{%s}%s" % (nu, tex)
def _print_fresnels(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"S^{%s}%s" % (exp, tex)
else:
return r"S%s" % tex
def _print_fresnelc(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"C^{%s}%s" % (exp, tex)
else:
return r"C%s" % tex
def _print_subfactorial(self, expr, exp=None):
tex = r"!%s" % self.parenthesize(expr.args[0], PRECEDENCE["Func"])
if exp is not None:
return r"\left(%s\right)^{%s}" % (tex, exp)
else:
return tex
def _print_factorial(self, expr, exp=None):
tex = r"%s!" % self.parenthesize(expr.args[0], PRECEDENCE["Func"])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial2(self, expr, exp=None):
tex = r"%s!!" % self.parenthesize(expr.args[0], PRECEDENCE["Func"])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_binomial(self, expr, exp=None):
tex = r"{\binom{%s}{%s}}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_RisingFactorial(self, expr, exp=None):
n, k = expr.args
base = r"%s" % self.parenthesize(n, PRECEDENCE['Func'])
tex = r"{%s}^{\left(%s\right)}" % (base, self._print(k))
return self._do_exponent(tex, exp)
def _print_FallingFactorial(self, expr, exp=None):
n, k = expr.args
sub = r"%s" % self.parenthesize(k, PRECEDENCE['Func'])
tex = r"{\left(%s\right)}_{%s}" % (self._print(n), sub)
return self._do_exponent(tex, exp)
def _hprint_BesselBase(self, expr, exp, sym):
tex = r"%s" % (sym)
need_exp = False
if exp is not None:
if tex.find('^') == -1:
tex = r"%s^{%s}" % (tex, exp)
else:
need_exp = True
tex = r"%s_{%s}\left(%s\right)" % (tex, self._print(expr.order),
self._print(expr.argument))
if need_exp:
tex = self._do_exponent(tex, exp)
return tex
def _hprint_vec(self, vec):
if not vec:
return ""
s = ""
for i in vec[:-1]:
s += "%s, " % self._print(i)
s += self._print(vec[-1])
return s
def _print_besselj(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'J')
def _print_besseli(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'I')
def _print_besselk(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'K')
def _print_bessely(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'Y')
def _print_yn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'y')
def _print_jn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'j')
def _print_hankel1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(1)}')
def _print_hankel2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(2)}')
def _print_hn1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'h^{(1)}')
def _print_hn2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'h^{(2)}')
def _hprint_airy(self, expr, exp=None, notation=""):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (notation, exp, tex)
else:
return r"%s%s" % (notation, tex)
def _hprint_airy_prime(self, expr, exp=None, notation=""):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"{%s^\prime}^{%s}%s" % (notation, exp, tex)
else:
return r"%s^\prime%s" % (notation, tex)
def _print_airyai(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Ai')
def _print_airybi(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Bi')
def _print_airyaiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Ai')
def _print_airybiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Bi')
def _print_hyper(self, expr, exp=None):
tex = r"{{}_{%s}F_{%s}\left(\begin{matrix} %s \\ %s \end{matrix}" \
r"\middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._hprint_vec(expr.ap), self._hprint_vec(expr.bq),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, exp)
return tex
def _print_meijerg(self, expr, exp=None):
tex = r"{G_{%s, %s}^{%s, %s}\left(\begin{matrix} %s & %s \\" \
r"%s & %s \end{matrix} \middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._print(len(expr.bm)), self._print(len(expr.an)),
self._hprint_vec(expr.an), self._hprint_vec(expr.aother),
self._hprint_vec(expr.bm), self._hprint_vec(expr.bother),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, exp)
return tex
def _print_dirichlet_eta(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\eta^{%s}%s" % (exp, tex)
return r"\eta%s" % tex
def _print_zeta(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s, %s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\zeta^{%s}%s" % (exp, tex)
return r"\zeta%s" % tex
def _print_stieltjes(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_{%s}\left(%s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"_{%s}" % self._print(expr.args[0])
if exp is not None:
return r"\gamma%s^{%s}" % (tex, exp)
return r"\gamma%s" % tex
def _print_lerchphi(self, expr, exp=None):
tex = r"\left(%s, %s, %s\right)" % tuple(map(self._print, expr.args))
if exp is None:
return r"\Phi%s" % tex
return r"\Phi^{%s}%s" % (exp, tex)
def _print_polylog(self, expr, exp=None):
s, z = map(self._print, expr.args)
tex = r"\left(%s\right)" % z
if exp is None:
return r"\operatorname{Li}_{%s}%s" % (s, tex)
return r"\operatorname{Li}_{%s}^{%s}%s" % (s, exp, tex)
def _print_jacobi(self, expr, exp=None):
n, a, b, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s,%s\right)}\left(%s\right)" % (n, a, b, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_gegenbauer(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"C_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_chebyshevt(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"T_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_chebyshevu(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"U_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_legendre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"P_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_assoc_legendre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_hermite(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"H_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_laguerre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"L_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_assoc_laguerre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"L_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_Ynm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Y_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_Znm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Z_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def __print_mathieu_functions(self, character, args, prime=False, exp=None):
a, q, z = map(self._print, args)
sup = r"^{\prime}" if prime else ""
exp = "" if not exp else "^{%s}" % exp
return r"%s%s\left(%s, %s, %s\right)%s" % (character, sup, a, q, z, exp)
def _print_mathieuc(self, expr, exp=None):
return self.__print_mathieu_functions("C", expr.args, exp=exp)
def _print_mathieus(self, expr, exp=None):
return self.__print_mathieu_functions("S", expr.args, exp=exp)
def _print_mathieucprime(self, expr, exp=None):
return self.__print_mathieu_functions("C", expr.args, prime=True, exp=exp)
def _print_mathieusprime(self, expr, exp=None):
return self.__print_mathieu_functions("S", expr.args, prime=True, exp=exp)
def _print_Rational(self, expr):
if expr.q != 1:
sign = ""
p = expr.p
if expr.p < 0:
sign = "- "
p = -p
if self._settings['fold_short_frac']:
return r"%s%d / %d" % (sign, p, expr.q)
return r"%s\frac{%d}{%d}" % (sign, p, expr.q)
else:
return self._print(expr.p)
def _print_Order(self, expr):
s = self._print(expr.expr)
if expr.point and any(p != S.Zero for p in expr.point) or \
len(expr.variables) > 1:
s += '; '
if len(expr.variables) > 1:
s += self._print(expr.variables)
elif expr.variables:
s += self._print(expr.variables[0])
s += r'\rightarrow '
if len(expr.point) > 1:
s += self._print(expr.point)
else:
s += self._print(expr.point[0])
return r"O\left(%s\right)" % s
def _print_Symbol(self, expr, style='plain'):
if expr in self._settings['symbol_names']:
return self._settings['symbol_names'][expr]
return self._deal_with_super_sub(expr.name, style=style)
_print_RandomSymbol = _print_Symbol
def _deal_with_super_sub(self, string, style='plain'):
if '{' in string:
name, supers, subs = string, [], []
else:
name, supers, subs = split_super_sub(string)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
# apply the style only to the name
if style == 'bold':
name = "\\mathbf{{{}}}".format(name)
# glue all items together:
if supers:
name += "^{%s}" % " ".join(supers)
if subs:
name += "_{%s}" % " ".join(subs)
return name
def _print_Relational(self, expr):
if self._settings['itex']:
gt = r"\gt"
lt = r"\lt"
else:
gt = ">"
lt = "<"
charmap = {
"==": "=",
">": gt,
"<": lt,
">=": r"\geq",
"<=": r"\leq",
"!=": r"\neq",
}
return "%s %s %s" % (self._print(expr.lhs),
charmap[expr.rel_op], self._print(expr.rhs))
def _print_Piecewise(self, expr):
ecpairs = [r"%s & \text{for}\: %s" % (self._print(e), self._print(c))
for e, c in expr.args[:-1]]
if expr.args[-1].cond == true:
ecpairs.append(r"%s & \text{otherwise}" %
self._print(expr.args[-1].expr))
else:
ecpairs.append(r"%s & \text{for}\: %s" %
(self._print(expr.args[-1].expr),
self._print(expr.args[-1].cond)))
tex = r"\begin{cases} %s \end{cases}"
return tex % r" \\".join(ecpairs)
def _print_MatrixBase(self, expr):
lines = []
for line in range(expr.rows): # horrible, should be 'rows'
lines.append(" & ".join([self._print(i) for i in expr[line, :]]))
mat_str = self._settings['mat_str']
if mat_str is None:
if self._settings['mode'] == 'inline':
mat_str = 'smallmatrix'
else:
if (expr.cols <= 10) is True:
mat_str = 'matrix'
else:
mat_str = 'array'
out_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
out_str = out_str.replace('%MATSTR%', mat_str)
if mat_str == 'array':
out_str = out_str.replace('%s', '{' + 'c'*expr.cols + '}%s')
if self._settings['mat_delim']:
left_delim = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
out_str = r'\left' + left_delim + out_str + \
r'\right' + right_delim
return out_str % r"\\".join(lines)
def _print_MatrixElement(self, expr):
return self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True)\
+ '_{%s, %s}' % (self._print(expr.i), self._print(expr.j))
def _print_MatrixSlice(self, expr):
def latexslice(x, dim):
x = list(x)
if x[2] == 1:
del x[2]
if x[0] == 0:
x[0] = None
if x[1] == dim:
x[1] = None
return ':'.join(self._print(xi) if xi is not None else '' for xi in x)
return (self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) + r'\left[' +
latexslice(expr.rowslice, expr.parent.rows) + ', ' +
latexslice(expr.colslice, expr.parent.cols) + r'\right]')
def _print_BlockMatrix(self, expr):
return self._print(expr.blocks)
def _print_Transpose(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol
if not isinstance(mat, MatrixSymbol):
return r"\left(%s\right)^{T}" % self._print(mat)
else:
return "%s^{T}" % self.parenthesize(mat, precedence_traditional(expr), True)
def _print_Trace(self, expr):
mat = expr.arg
return r"\operatorname{tr}\left(%s \right)" % self._print(mat)
def _print_Adjoint(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol
if not isinstance(mat, MatrixSymbol):
return r"\left(%s\right)^{\dagger}" % self._print(mat)
else:
return r"%s^{\dagger}" % self._print(mat)
def _print_MatMul(self, expr):
from sympy import MatMul, Mul
parens = lambda x: self.parenthesize(x, precedence_traditional(expr),
False)
args = expr.args
if isinstance(args[0], Mul):
args = args[0].as_ordered_factors() + list(args[1:])
else:
args = list(args)
if isinstance(expr, MatMul) and _coeff_isneg(expr):
if args[0] == -1:
args = args[1:]
else:
args[0] = -args[0]
return '- ' + ' '.join(map(parens, args))
else:
return ' '.join(map(parens, args))
def _print_Mod(self, expr, exp=None):
if exp is not None:
return r'\left(%s\bmod{%s}\right)^{%s}' % \
(self.parenthesize(expr.args[0], PRECEDENCE['Mul'],
strict=True), self._print(expr.args[1]),
exp)
return r'%s\bmod{%s}' % (self.parenthesize(expr.args[0],
PRECEDENCE['Mul'], strict=True),
self._print(expr.args[1]))
def _print_HadamardProduct(self, expr):
args = expr.args
prec = PRECEDENCE['Pow']
parens = self.parenthesize
return r' \circ '.join(
map(lambda arg: parens(arg, prec, strict=True), args))
def _print_HadamardPower(self, expr):
if precedence_traditional(expr.exp) < PRECEDENCE["Mul"]:
template = r"%s^{\circ \left({%s}\right)}"
else:
template = r"%s^{\circ {%s}}"
return self._helper_print_standard_power(expr, template)
def _print_KroneckerProduct(self, expr):
args = expr.args
prec = PRECEDENCE['Pow']
parens = self.parenthesize
return r' \otimes '.join(
map(lambda arg: parens(arg, prec, strict=True), args))
def _print_MatPow(self, expr):
base, exp = expr.base, expr.exp
from sympy.matrices import MatrixSymbol
if not isinstance(base, MatrixSymbol):
return "\\left(%s\\right)^{%s}" % (self._print(base),
self._print(exp))
else:
return "%s^{%s}" % (self._print(base), self._print(exp))
def _print_MatrixSymbol(self, expr):
return self._print_Symbol(expr, style=self._settings[
'mat_symbol_style'])
def _print_ZeroMatrix(self, Z):
return r"\mathbb{0}" if self._settings[
'mat_symbol_style'] == 'plain' else r"\mathbf{0}"
def _print_OneMatrix(self, O):
return r"\mathbb{1}" if self._settings[
'mat_symbol_style'] == 'plain' else r"\mathbf{1}"
def _print_Identity(self, I):
return r"\mathbb{I}" if self._settings[
'mat_symbol_style'] == 'plain' else r"\mathbf{I}"
def _print_PermutationMatrix(self, P):
perm_str = self._print(P.args[0])
return "P_{%s}" % perm_str
def _print_NDimArray(self, expr):
if expr.rank() == 0:
return self._print(expr[()])
mat_str = self._settings['mat_str']
if mat_str is None:
if self._settings['mode'] == 'inline':
mat_str = 'smallmatrix'
else:
if (expr.rank() == 0) or (expr.shape[-1] <= 10):
mat_str = 'matrix'
else:
mat_str = 'array'
block_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
block_str = block_str.replace('%MATSTR%', mat_str)
if self._settings['mat_delim']:
left_delim = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
block_str = r'\left' + left_delim + block_str + \
r'\right' + right_delim
if expr.rank() == 0:
return block_str % ""
level_str = [[]] + [[] for i in range(expr.rank())]
shape_ranges = [list(range(i)) for i in expr.shape]
for outer_i in itertools.product(*shape_ranges):
level_str[-1].append(self._print(expr[outer_i]))
even = True
for back_outer_i in range(expr.rank()-1, -1, -1):
if len(level_str[back_outer_i+1]) < expr.shape[back_outer_i]:
break
if even:
level_str[back_outer_i].append(
r" & ".join(level_str[back_outer_i+1]))
else:
level_str[back_outer_i].append(
block_str % (r"\\".join(level_str[back_outer_i+1])))
if len(level_str[back_outer_i+1]) == 1:
level_str[back_outer_i][-1] = r"\left[" + \
level_str[back_outer_i][-1] + r"\right]"
even = not even
level_str[back_outer_i+1] = []
out_str = level_str[0][0]
if expr.rank() % 2 == 1:
out_str = block_str % out_str
return out_str
def _printer_tensor_indices(self, name, indices, index_map={}):
out_str = self._print(name)
last_valence = None
prev_map = None
for index in indices:
new_valence = index.is_up
if ((index in index_map) or prev_map) and \
last_valence == new_valence:
out_str += ","
if last_valence != new_valence:
if last_valence is not None:
out_str += "}"
if index.is_up:
out_str += "{}^{"
else:
out_str += "{}_{"
out_str += self._print(index.args[0])
if index in index_map:
out_str += "="
out_str += self._print(index_map[index])
prev_map = True
else:
prev_map = False
last_valence = new_valence
if last_valence is not None:
out_str += "}"
return out_str
def _print_Tensor(self, expr):
name = expr.args[0].args[0]
indices = expr.get_indices()
return self._printer_tensor_indices(name, indices)
def _print_TensorElement(self, expr):
name = expr.expr.args[0].args[0]
indices = expr.expr.get_indices()
index_map = expr.index_map
return self._printer_tensor_indices(name, indices, index_map)
def _print_TensMul(self, expr):
# prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)"
sign, args = expr._get_args_for_traditional_printer()
return sign + "".join(
[self.parenthesize(arg, precedence(expr)) for arg in args]
)
def _print_TensAdd(self, expr):
a = []
args = expr.args
for x in args:
a.append(self.parenthesize(x, precedence(expr)))
a.sort()
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
def _print_TensorIndex(self, expr):
return "{}%s{%s}" % (
"^" if expr.is_up else "_",
self._print(expr.args[0])
)
def _print_PartialDerivative(self, expr):
if len(expr.variables) == 1:
return r"\frac{\partial}{\partial {%s}}{%s}" % (
self._print(expr.variables[0]),
self.parenthesize(expr.expr, PRECEDENCE["Mul"], False)
)
else:
return r"\frac{\partial^{%s}}{%s}{%s}" % (
len(expr.variables),
" ".join([r"\partial {%s}" % self._print(i) for i in expr.variables]),
self.parenthesize(expr.expr, PRECEDENCE["Mul"], False)
)
def _print_UniversalSet(self, expr):
return r"\mathbb{U}"
def _print_frac(self, expr, exp=None):
if exp is None:
return r"\operatorname{frac}{\left(%s\right)}" % self._print(expr.args[0])
else:
return r"\operatorname{frac}{\left(%s\right)}^{%s}" % (
self._print(expr.args[0]), exp)
def _print_tuple(self, expr):
if self._settings['decimal_separator'] == 'comma':
sep = ";"
elif self._settings['decimal_separator'] == 'period':
sep = ","
else:
raise ValueError('Unknown Decimal Separator')
if len(expr) == 1:
# 1-tuple needs a trailing separator
return self._add_parens_lspace(self._print(expr[0]) + sep)
else:
return self._add_parens_lspace(
(sep + r" \ ").join([self._print(i) for i in expr]))
def _print_TensorProduct(self, expr):
elements = [self._print(a) for a in expr.args]
return r' \otimes '.join(elements)
def _print_WedgeProduct(self, expr):
elements = [self._print(a) for a in expr.args]
return r' \wedge '.join(elements)
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_list(self, expr):
if self._settings['decimal_separator'] == 'comma':
return r"\left[ %s\right]" % \
r"; \ ".join([self._print(i) for i in expr])
elif self._settings['decimal_separator'] == 'period':
return r"\left[ %s\right]" % \
r", \ ".join([self._print(i) for i in expr])
else:
raise ValueError('Unknown Decimal Separator')
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for key in keys:
val = d[key]
items.append("%s : %s" % (self._print(key), self._print(val)))
return r"\left\{ %s\right\}" % r", \ ".join(items)
def _print_Dict(self, expr):
return self._print_dict(expr)
def _print_DiracDelta(self, expr, exp=None):
if len(expr.args) == 1 or expr.args[1] == 0:
tex = r"\delta\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\delta^{\left( %s \right)}\left( %s \right)" % (
self._print(expr.args[1]), self._print(expr.args[0]))
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_SingularityFunction(self, expr):
shift = self._print(expr.args[0] - expr.args[1])
power = self._print(expr.args[2])
tex = r"{\left\langle %s \right\rangle}^{%s}" % (shift, power)
return tex
def _print_Heaviside(self, expr, exp=None):
tex = r"\theta\left(%s\right)" % self._print(expr.args[0])
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_KroneckerDelta(self, expr, exp=None):
i = self._print(expr.args[0])
j = self._print(expr.args[1])
if expr.args[0].is_Atom and expr.args[1].is_Atom:
tex = r'\delta_{%s %s}' % (i, j)
else:
tex = r'\delta_{%s, %s}' % (i, j)
if exp is not None:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_LeviCivita(self, expr, exp=None):
indices = map(self._print, expr.args)
if all(x.is_Atom for x in expr.args):
tex = r'\varepsilon_{%s}' % " ".join(indices)
else:
tex = r'\varepsilon_{%s}' % ", ".join(indices)
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_RandomDomain(self, d):
if hasattr(d, 'as_boolean'):
return '\\text{Domain: }' + self._print(d.as_boolean())
elif hasattr(d, 'set'):
return ('\\text{Domain: }' + self._print(d.symbols) + '\\text{ in }' +
self._print(d.set))
elif hasattr(d, 'symbols'):
return '\\text{Domain on }' + self._print(d.symbols)
else:
return self._print(None)
def _print_FiniteSet(self, s):
items = sorted(s.args, key=default_sort_key)
return self._print_set(items)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
if self._settings['decimal_separator'] == 'comma':
items = "; ".join(map(self._print, items))
elif self._settings['decimal_separator'] == 'period':
items = ", ".join(map(self._print, items))
else:
raise ValueError('Unknown Decimal Separator')
return r"\left\{%s\right\}" % items
_print_frozenset = _print_set
def _print_Range(self, s):
dots = object()
if s.has(Symbol):
return self._print_Basic(s)
if s.start.is_infinite and s.stop.is_infinite:
if s.step.is_positive:
printset = dots, -1, 0, 1, dots
else:
printset = dots, 1, 0, -1, dots
elif s.start.is_infinite:
printset = dots, s[-1] - s.step, s[-1]
elif s.stop.is_infinite:
it = iter(s)
printset = next(it), next(it), dots
elif len(s) > 4:
it = iter(s)
printset = next(it), next(it), dots, s[-1]
else:
printset = tuple(s)
return (r"\left\{" +
r", ".join(self._print(el) if el is not dots else r'\ldots' for el in printset) +
r"\right\}")
def __print_number_polynomial(self, expr, letter, exp=None):
if len(expr.args) == 2:
if exp is not None:
return r"%s_{%s}^{%s}\left(%s\right)" % (letter,
self._print(expr.args[0]), exp,
self._print(expr.args[1]))
return r"%s_{%s}\left(%s\right)" % (letter,
self._print(expr.args[0]), self._print(expr.args[1]))
tex = r"%s_{%s}" % (letter, self._print(expr.args[0]))
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
return tex
def _print_bernoulli(self, expr, exp=None):
return self.__print_number_polynomial(expr, "B", exp)
def _print_bell(self, expr, exp=None):
if len(expr.args) == 3:
tex1 = r"B_{%s, %s}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
tex2 = r"\left(%s\right)" % r", ".join(self._print(el) for
el in expr.args[2])
if exp is not None:
tex = r"%s^{%s}%s" % (tex1, exp, tex2)
else:
tex = tex1 + tex2
return tex
return self.__print_number_polynomial(expr, "B", exp)
def _print_fibonacci(self, expr, exp=None):
return self.__print_number_polynomial(expr, "F", exp)
def _print_lucas(self, expr, exp=None):
tex = r"L_{%s}" % self._print(expr.args[0])
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
return tex
def _print_tribonacci(self, expr, exp=None):
return self.__print_number_polynomial(expr, "T", exp)
def _print_SeqFormula(self, s):
dots = object()
if len(s.start.free_symbols) > 0 or len(s.stop.free_symbols) > 0:
return r"\left\{%s\right\}_{%s=%s}^{%s}" % (
self._print(s.formula),
self._print(s.variables[0]),
self._print(s.start),
self._print(s.stop)
)
if s.start is S.NegativeInfinity:
stop = s.stop
printset = (dots, s.coeff(stop - 3), s.coeff(stop - 2),
s.coeff(stop - 1), s.coeff(stop))
elif s.stop is S.Infinity or s.length > 4:
printset = s[:4]
printset.append(dots)
else:
printset = tuple(s)
return (r"\left[" +
r", ".join(self._print(el) if el is not dots else r'\ldots' for el in printset) +
r"\right]")
_print_SeqPer = _print_SeqFormula
_print_SeqAdd = _print_SeqFormula
_print_SeqMul = _print_SeqFormula
def _print_Interval(self, i):
if i.start == i.end:
return r"\left\{%s\right\}" % self._print(i.start)
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return r"\left%s%s, %s\right%s" % \
(left, self._print(i.start), self._print(i.end), right)
def _print_AccumulationBounds(self, i):
return r"\left\langle %s, %s\right\rangle" % \
(self._print(i.min), self._print(i.max))
def _print_Union(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \cup ".join(args_str)
def _print_Complement(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \setminus ".join(args_str)
def _print_Intersection(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \cap ".join(args_str)
def _print_SymmetricDifference(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \triangle ".join(args_str)
def _print_ProductSet(self, p):
prec = precedence_traditional(p)
if len(p.sets) >= 1 and not has_variety(p.sets):
return self.parenthesize(p.sets[0], prec) + "^{%d}" % len(p.sets)
return r" \times ".join(
self.parenthesize(set, prec) for set in p.sets)
def _print_EmptySet(self, e):
return r"\emptyset"
def _print_Naturals(self, n):
return r"\mathbb{N}"
def _print_Naturals0(self, n):
return r"\mathbb{N}_0"
def _print_Integers(self, i):
return r"\mathbb{Z}"
def _print_Rationals(self, i):
return r"\mathbb{Q}"
def _print_Reals(self, i):
return r"\mathbb{R}"
def _print_Complexes(self, i):
return r"\mathbb{C}"
def _print_ImageSet(self, s):
expr = s.lamda.expr
sig = s.lamda.signature
xys = ((self._print(x), self._print(y)) for x, y in zip(sig, s.base_sets))
xinys = r" , ".join(r"%s \in %s" % xy for xy in xys)
return r"\left\{%s\; |\; %s\right\}" % (self._print(expr), xinys)
def _print_ConditionSet(self, s):
vars_print = ', '.join([self._print(var) for var in Tuple(s.sym)])
if s.base_set is S.UniversalSet:
return r"\left\{%s \mid %s \right\}" % \
(vars_print, self._print(s.condition))
return r"\left\{%s \mid %s \in %s \wedge %s \right\}" % (
vars_print,
vars_print,
self._print(s.base_set),
self._print(s.condition))
def _print_ComplexRegion(self, s):
vars_print = ', '.join([self._print(var) for var in s.variables])
return r"\left\{%s\; |\; %s \in %s \right\}" % (
self._print(s.expr),
vars_print,
self._print(s.sets))
def _print_Contains(self, e):
return r"%s \in %s" % tuple(self._print(a) for a in e.args)
def _print_FourierSeries(self, s):
return self._print_Add(s.truncate()) + r' + \ldots'
def _print_FormalPowerSeries(self, s):
return self._print_Add(s.infinite)
def _print_FiniteField(self, expr):
return r"\mathbb{F}_{%s}" % expr.mod
def _print_IntegerRing(self, expr):
return r"\mathbb{Z}"
def _print_RationalField(self, expr):
return r"\mathbb{Q}"
def _print_RealField(self, expr):
return r"\mathbb{R}"
def _print_ComplexField(self, expr):
return r"\mathbb{C}"
def _print_PolynomialRing(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left[%s\right]" % (domain, symbols)
def _print_FractionField(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left(%s\right)" % (domain, symbols)
def _print_PolynomialRingBase(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
inv = ""
if not expr.is_Poly:
inv = r"S_<^{-1}"
return r"%s%s\left[%s\right]" % (inv, domain, symbols)
def _print_Poly(self, poly):
cls = poly.__class__.__name__
terms = []
for monom, coeff in poly.terms():
s_monom = ''
for i, exp in enumerate(monom):
if exp > 0:
if exp == 1:
s_monom += self._print(poly.gens[i])
else:
s_monom += self._print(pow(poly.gens[i], exp))
if coeff.is_Add:
if s_monom:
s_coeff = r"\left(%s\right)" % self._print(coeff)
else:
s_coeff = self._print(coeff)
else:
if s_monom:
if coeff is S.One:
terms.extend(['+', s_monom])
continue
if coeff is S.NegativeOne:
terms.extend(['-', s_monom])
continue
s_coeff = self._print(coeff)
if not s_monom:
s_term = s_coeff
else:
s_term = s_coeff + " " + s_monom
if s_term.startswith('-'):
terms.extend(['-', s_term[1:]])
else:
terms.extend(['+', s_term])
if terms[0] in ['-', '+']:
modifier = terms.pop(0)
if modifier == '-':
terms[0] = '-' + terms[0]
expr = ' '.join(terms)
gens = list(map(self._print, poly.gens))
domain = "domain=%s" % self._print(poly.get_domain())
args = ", ".join([expr] + gens + [domain])
if cls in accepted_latex_functions:
tex = r"\%s {\left(%s \right)}" % (cls, args)
else:
tex = r"\operatorname{%s}{\left( %s \right)}" % (cls, args)
return tex
def _print_ComplexRootOf(self, root):
cls = root.__class__.__name__
if cls == "ComplexRootOf":
cls = "CRootOf"
expr = self._print(root.expr)
index = root.index
if cls in accepted_latex_functions:
return r"\%s {\left(%s, %d\right)}" % (cls, expr, index)
else:
return r"\operatorname{%s} {\left(%s, %d\right)}" % (cls, expr,
index)
def _print_RootSum(self, expr):
cls = expr.__class__.__name__
args = [self._print(expr.expr)]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
if cls in accepted_latex_functions:
return r"\%s {\left(%s\right)}" % (cls, ", ".join(args))
else:
return r"\operatorname{%s} {\left(%s\right)}" % (cls,
", ".join(args))
def _print_PolyElement(self, poly):
mul_symbol = self._settings['mul_symbol_latex']
return poly.str(self, PRECEDENCE, "{%s}^{%d}", mul_symbol)
def _print_FracElement(self, frac):
if frac.denom == 1:
return self._print(frac.numer)
else:
numer = self._print(frac.numer)
denom = self._print(frac.denom)
return r"\frac{%s}{%s}" % (numer, denom)
def _print_euler(self, expr, exp=None):
m, x = (expr.args[0], None) if len(expr.args) == 1 else expr.args
tex = r"E_{%s}" % self._print(m)
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
if x is not None:
tex = r"%s\left(%s\right)" % (tex, self._print(x))
return tex
def _print_catalan(self, expr, exp=None):
tex = r"C_{%s}" % self._print(expr.args[0])
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
return tex
def _print_UnifiedTransform(self, expr, s, inverse=False):
return r"\mathcal{{{}}}{}_{{{}}}\left[{}\right]\left({}\right)".format(s, '^{-1}' if inverse else '', self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_MellinTransform(self, expr):
return self._print_UnifiedTransform(expr, 'M')
def _print_InverseMellinTransform(self, expr):
return self._print_UnifiedTransform(expr, 'M', True)
def _print_LaplaceTransform(self, expr):
return self._print_UnifiedTransform(expr, 'L')
def _print_InverseLaplaceTransform(self, expr):
return self._print_UnifiedTransform(expr, 'L', True)
def _print_FourierTransform(self, expr):
return self._print_UnifiedTransform(expr, 'F')
def _print_InverseFourierTransform(self, expr):
return self._print_UnifiedTransform(expr, 'F', True)
def _print_SineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'SIN')
def _print_InverseSineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'SIN', True)
def _print_CosineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'COS')
def _print_InverseCosineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'COS', True)
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
return self._print(repr(p))
def _print_DMF(self, p):
return self._print_DMP(p)
def _print_Object(self, object):
return self._print(Symbol(object.name))
def _print_LambertW(self, expr):
if len(expr.args) == 1:
return r"W\left(%s\right)" % self._print(expr.args[0])
return r"W_{%s}\left(%s\right)" % \
(self._print(expr.args[1]), self._print(expr.args[0]))
def _print_Morphism(self, morphism):
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
return "%s\\rightarrow %s" % (domain, codomain)
def _print_TransferFunction(self, expr):
from sympy.core import Mul, Pow
num, den = expr.num, expr.den
res = Mul(num, Pow(den, -1, evaluate=False), evaluate=False)
return self._print_Mul(res)
def _print_Series(self, expr):
args = list(expr.args)
parens = lambda x: self.parenthesize(x, precedence_traditional(expr),
False)
return ' '.join(map(parens, args))
def _print_Parallel(self, expr):
args = list(expr.args)
parens = lambda x: self.parenthesize(x, precedence_traditional(expr),
False)
return ' '.join(map(parens, args))
def _print_Feedback(self, expr):
from sympy.physics.control import TransferFunction, Parallel, Series
num, tf = expr.num, TransferFunction(1, 1, expr.num.var)
num_arg_list = list(num.args) if isinstance(num, Series) else [num]
den_arg_list = list(expr.den.args) if isinstance(expr.den, Series) else [expr.den]
if isinstance(num, Series) and isinstance(expr.den, Series):
den = Parallel(tf, Series(*num_arg_list, *den_arg_list))
elif isinstance(num, Series) and isinstance(expr.den, TransferFunction):
if expr.den == tf:
den = Parallel(tf, Series(*num_arg_list))
else:
den = Parallel(tf, Series(*num_arg_list, expr.den))
elif isinstance(num, TransferFunction) and isinstance(expr.den, Series):
if num == tf:
den = Parallel(tf, Series(*den_arg_list))
else:
den = Parallel(tf, Series(num, *den_arg_list))
else:
if num == tf:
den = Parallel(tf, *den_arg_list)
elif expr.den == tf:
den = Parallel(tf, *num_arg_list)
else:
den = Parallel(tf, Series(*num_arg_list, *den_arg_list))
numer = self._print(num)
denom = self._print(den)
return r"\frac{%s}{%s}" % (numer, denom)
def _print_NamedMorphism(self, morphism):
pretty_name = self._print(Symbol(morphism.name))
pretty_morphism = self._print_Morphism(morphism)
return "%s:%s" % (pretty_name, pretty_morphism)
def _print_IdentityMorphism(self, morphism):
from sympy.categories import NamedMorphism
return self._print_NamedMorphism(NamedMorphism(
morphism.domain, morphism.codomain, "id"))
def _print_CompositeMorphism(self, morphism):
# All components of the morphism have names and it is thus
# possible to build the name of the composite.
component_names_list = [self._print(Symbol(component.name)) for
component in morphism.components]
component_names_list.reverse()
component_names = "\\circ ".join(component_names_list) + ":"
pretty_morphism = self._print_Morphism(morphism)
return component_names + pretty_morphism
def _print_Category(self, morphism):
return r"\mathbf{{{}}}".format(self._print(Symbol(morphism.name)))
def _print_Diagram(self, diagram):
if not diagram.premises:
# This is an empty diagram.
return self._print(S.EmptySet)
latex_result = self._print(diagram.premises)
if diagram.conclusions:
latex_result += "\\Longrightarrow %s" % \
self._print(diagram.conclusions)
return latex_result
def _print_DiagramGrid(self, grid):
latex_result = "\\begin{array}{%s}\n" % ("c" * grid.width)
for i in range(grid.height):
for j in range(grid.width):
if grid[i, j]:
latex_result += latex(grid[i, j])
latex_result += " "
if j != grid.width - 1:
latex_result += "& "
if i != grid.height - 1:
latex_result += "\\\\"
latex_result += "\n"
latex_result += "\\end{array}\n"
return latex_result
def _print_FreeModule(self, M):
return '{{{}}}^{{{}}}'.format(self._print(M.ring), self._print(M.rank))
def _print_FreeModuleElement(self, m):
# Print as row vector for convenience, for now.
return r"\left[ {} \right]".format(",".join(
'{' + self._print(x) + '}' for x in m))
def _print_SubModule(self, m):
return r"\left\langle {} \right\rangle".format(",".join(
'{' + self._print(x) + '}' for x in m.gens))
def _print_ModuleImplementedIdeal(self, m):
return r"\left\langle {} \right\rangle".format(",".join(
'{' + self._print(x) + '}' for [x] in m._module.gens))
def _print_Quaternion(self, expr):
# TODO: This expression is potentially confusing,
# shall we print it as `Quaternion( ... )`?
s = [self.parenthesize(i, PRECEDENCE["Mul"], strict=True)
for i in expr.args]
a = [s[0]] + [i+" "+j for i, j in zip(s[1:], "ijk")]
return " + ".join(a)
def _print_QuotientRing(self, R):
# TODO nicer fractions for few generators...
return r"\frac{{{}}}{{{}}}".format(self._print(R.ring),
self._print(R.base_ideal))
def _print_QuotientRingElement(self, x):
return r"{{{}}} + {{{}}}".format(self._print(x.data),
self._print(x.ring.base_ideal))
def _print_QuotientModuleElement(self, m):
return r"{{{}}} + {{{}}}".format(self._print(m.data),
self._print(m.module.killed_module))
def _print_QuotientModule(self, M):
# TODO nicer fractions for few generators...
return r"\frac{{{}}}{{{}}}".format(self._print(M.base),
self._print(M.killed_module))
def _print_MatrixHomomorphism(self, h):
return r"{{{}}} : {{{}}} \to {{{}}}".format(self._print(h._sympy_matrix()),
self._print(h.domain), self._print(h.codomain))
def _print_Manifold(self, manifold):
string = manifold.name.name
if '{' in string:
name, supers, subs = string, [], []
else:
name, supers, subs = split_super_sub(string)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
name = r'\text{%s}' % name
if supers:
name += "^{%s}" % " ".join(supers)
if subs:
name += "_{%s}" % " ".join(subs)
return name
def _print_Patch(self, patch):
return r'\text{%s}_{%s}' % (self._print(patch.name), self._print(patch.manifold))
def _print_CoordSystem(self, coordsys):
return r'\text{%s}^{\text{%s}}_{%s}' % (
self._print(coordsys.name), self._print(coordsys.patch.name), self._print(coordsys.manifold)
)
def _print_CovarDerivativeOp(self, cvd):
return r'\mathbb{\nabla}_{%s}' % self._print(cvd._wrt)
def _print_BaseScalarField(self, field):
string = field._coord_sys.symbols[field._index].name
return r'\mathbf{{{}}}'.format(self._print(Symbol(string)))
def _print_BaseVectorField(self, field):
string = field._coord_sys.symbols[field._index].name
return r'\partial_{{{}}}'.format(self._print(Symbol(string)))
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys.symbols[field._index].name
return r'\operatorname{{d}}{}'.format(self._print(Symbol(string)))
else:
string = self._print(field)
return r'\operatorname{{d}}\left({}\right)'.format(string)
def _print_Tr(self, p):
# TODO: Handle indices
contents = self._print(p.args[0])
return r'\operatorname{{tr}}\left({}\right)'.format(contents)
def _print_totient(self, expr, exp=None):
if exp is not None:
return r'\left(\phi\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\phi\left(%s\right)' % self._print(expr.args[0])
def _print_reduced_totient(self, expr, exp=None):
if exp is not None:
return r'\left(\lambda\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\lambda\left(%s\right)' % self._print(expr.args[0])
def _print_divisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^{%s}%s" % (exp, tex)
return r"\sigma%s" % tex
def _print_udivisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^*^{%s}%s" % (exp, tex)
return r"\sigma^*%s" % tex
def _print_primenu(self, expr, exp=None):
if exp is not None:
return r'\left(\nu\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\nu\left(%s\right)' % self._print(expr.args[0])
def _print_primeomega(self, expr, exp=None):
if exp is not None:
return r'\left(\Omega\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\Omega\left(%s\right)' % self._print(expr.args[0])
def _print_Str(self, s):
return str(s.name)
def _print_float(self, expr):
return self._print(Float(expr))
def _print_int(self, expr):
return str(expr)
def _print_mpz(self, expr):
return str(expr)
def _print_mpq(self, expr):
return str(expr)
def emptyPrinter(self, expr):
# default to just printing as monospace, like would normally be shown
s = super().emptyPrinter(expr)
return r"\mathtt{\text{%s}}" % latex_escape(s)
def translate(s):
r'''
Check for a modifier ending the string. If present, convert the
modifier to latex and translate the rest recursively.
Given a description of a Greek letter or other special character,
return the appropriate latex.
Let everything else pass as given.
>>> from sympy.printing.latex import translate
>>> translate('alphahatdotprime')
"{\\dot{\\hat{\\alpha}}}'"
'''
# Process the rest
tex = tex_greek_dictionary.get(s)
if tex:
return tex
elif s.lower() in greek_letters_set:
return "\\" + s.lower()
elif s in other_symbols:
return "\\" + s
else:
# Process modifiers, if any, and recurse
for key in sorted(modifier_dict.keys(), key=lambda k:len(k), reverse=True):
if s.lower().endswith(key) and len(s) > len(key):
return modifier_dict[key](translate(s[:-len(key)]))
return s
@print_function(LatexPrinter)
def latex(expr, **settings):
r"""Convert the given expression to LaTeX string representation.
Parameters
==========
full_prec: boolean, optional
If set to True, a floating point number is printed with full precision.
fold_frac_powers : boolean, optional
Emit ``^{p/q}`` instead of ``^{\frac{p}{q}}`` for fractional powers.
fold_func_brackets : boolean, optional
Fold function brackets where applicable.
fold_short_frac : boolean, optional
Emit ``p / q`` instead of ``\frac{p}{q}`` when the denominator is
simple enough (at most two terms and no powers). The default value is
``True`` for inline mode, ``False`` otherwise.
inv_trig_style : string, optional
How inverse trig functions should be displayed. Can be one of
``abbreviated``, ``full``, or ``power``. Defaults to ``abbreviated``.
itex : boolean, optional
Specifies if itex-specific syntax is used, including emitting
``$$...$$``.
ln_notation : boolean, optional
If set to ``True``, ``\ln`` is used instead of default ``\log``.
long_frac_ratio : float or None, optional
The allowed ratio of the width of the numerator to the width of the
denominator before the printer breaks off long fractions. If ``None``
(the default value), long fractions are not broken up.
mat_delim : string, optional
The delimiter to wrap around matrices. Can be one of ``[``, ``(``, or
the empty string. Defaults to ``[``.
mat_str : string, optional
Which matrix environment string to emit. ``smallmatrix``, ``matrix``,
``array``, etc. Defaults to ``smallmatrix`` for inline mode, ``matrix``
for matrices of no more than 10 columns, and ``array`` otherwise.
mode: string, optional
Specifies how the generated code will be delimited. ``mode`` can be one
of ``plain``, ``inline``, ``equation`` or ``equation*``. If ``mode``
is set to ``plain``, then the resulting code will not be delimited at
all (this is the default). If ``mode`` is set to ``inline`` then inline
LaTeX ``$...$`` will be used. If ``mode`` is set to ``equation`` or
``equation*``, the resulting code will be enclosed in the ``equation``
or ``equation*`` environment (remember to import ``amsmath`` for
``equation*``), unless the ``itex`` option is set. In the latter case,
the ``$$...$$`` syntax is used.
mul_symbol : string or None, optional
The symbol to use for multiplication. Can be one of ``None``, ``ldot``,
``dot``, or ``times``.
order: string, optional
Any of the supported monomial orderings (currently ``lex``, ``grlex``,
or ``grevlex``), ``old``, and ``none``. This parameter does nothing for
Mul objects. Setting order to ``old`` uses the compatibility ordering
for Add defined in Printer. For very large expressions, set the
``order`` keyword to ``none`` if speed is a concern.
symbol_names : dictionary of strings mapped to symbols, optional
Dictionary of symbols and the custom strings they should be emitted as.
root_notation : boolean, optional
If set to ``False``, exponents of the form 1/n are printed in fractonal
form. Default is ``True``, to print exponent in root form.
mat_symbol_style : string, optional
Can be either ``plain`` (default) or ``bold``. If set to ``bold``,
a MatrixSymbol A will be printed as ``\mathbf{A}``, otherwise as ``A``.
imaginary_unit : string, optional
String to use for the imaginary unit. Defined options are "i" (default)
and "j". Adding "r" or "t" in front gives ``\mathrm`` or ``\text``, so
"ri" leads to ``\mathrm{i}`` which gives `\mathrm{i}`.
gothic_re_im : boolean, optional
If set to ``True``, `\Re` and `\Im` is used for ``re`` and ``im``, respectively.
The default is ``False`` leading to `\operatorname{re}` and `\operatorname{im}`.
decimal_separator : string, optional
Specifies what separator to use to separate the whole and fractional parts of a
floating point number as in `2.5` for the default, ``period`` or `2{,}5`
when ``comma`` is specified. Lists, sets, and tuple are printed with semicolon
separating the elements when ``comma`` is chosen. For example, [1; 2; 3] when
``comma`` is chosen and [1,2,3] for when ``period`` is chosen.
parenthesize_super : boolean, optional
If set to ``False``, superscripted expressions will not be parenthesized when
powered. Default is ``True``, which parenthesizes the expression when powered.
min: Integer or None, optional
Sets the lower bound for the exponent to print floating point numbers in
fixed-point format.
max: Integer or None, optional
Sets the upper bound for the exponent to print floating point numbers in
fixed-point format.
Notes
=====
Not using a print statement for printing, results in double backslashes for
latex commands since that's the way Python escapes backslashes in strings.
>>> from sympy import latex, Rational
>>> from sympy.abc import tau
>>> latex((2*tau)**Rational(7,2))
'8 \\sqrt{2} \\tau^{\\frac{7}{2}}'
>>> print(latex((2*tau)**Rational(7,2)))
8 \sqrt{2} \tau^{\frac{7}{2}}
Examples
========
>>> from sympy import latex, pi, sin, asin, Integral, Matrix, Rational, log
>>> from sympy.abc import x, y, mu, r, tau
Basic usage:
>>> print(latex((2*tau)**Rational(7,2)))
8 \sqrt{2} \tau^{\frac{7}{2}}
``mode`` and ``itex`` options:
>>> print(latex((2*mu)**Rational(7,2), mode='plain'))
8 \sqrt{2} \mu^{\frac{7}{2}}
>>> print(latex((2*tau)**Rational(7,2), mode='inline'))
$8 \sqrt{2} \tau^{7 / 2}$
>>> print(latex((2*mu)**Rational(7,2), mode='equation*'))
\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}
>>> print(latex((2*mu)**Rational(7,2), mode='equation'))
\begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation}
>>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True))
$$8 \sqrt{2} \mu^{\frac{7}{2}}$$
>>> print(latex((2*mu)**Rational(7,2), mode='plain'))
8 \sqrt{2} \mu^{\frac{7}{2}}
>>> print(latex((2*tau)**Rational(7,2), mode='inline'))
$8 \sqrt{2} \tau^{7 / 2}$
>>> print(latex((2*mu)**Rational(7,2), mode='equation*'))
\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}
>>> print(latex((2*mu)**Rational(7,2), mode='equation'))
\begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation}
>>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True))
$$8 \sqrt{2} \mu^{\frac{7}{2}}$$
Fraction options:
>>> print(latex((2*tau)**Rational(7,2), fold_frac_powers=True))
8 \sqrt{2} \tau^{7/2}
>>> print(latex((2*tau)**sin(Rational(7,2))))
\left(2 \tau\right)^{\sin{\left(\frac{7}{2} \right)}}
>>> print(latex((2*tau)**sin(Rational(7,2)), fold_func_brackets=True))
\left(2 \tau\right)^{\sin {\frac{7}{2}}}
>>> print(latex(3*x**2/y))
\frac{3 x^{2}}{y}
>>> print(latex(3*x**2/y, fold_short_frac=True))
3 x^{2} / y
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=2))
\frac{\int r\, dr}{2 \pi}
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=0))
\frac{1}{2 \pi} \int r\, dr
Multiplication options:
>>> print(latex((2*tau)**sin(Rational(7,2)), mul_symbol="times"))
\left(2 \times \tau\right)^{\sin{\left(\frac{7}{2} \right)}}
Trig options:
>>> print(latex(asin(Rational(7,2))))
\operatorname{asin}{\left(\frac{7}{2} \right)}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="full"))
\arcsin{\left(\frac{7}{2} \right)}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="power"))
\sin^{-1}{\left(\frac{7}{2} \right)}
Matrix options:
>>> print(latex(Matrix(2, 1, [x, y])))
\left[\begin{matrix}x\\y\end{matrix}\right]
>>> print(latex(Matrix(2, 1, [x, y]), mat_str = "array"))
\left[\begin{array}{c}x\\y\end{array}\right]
>>> print(latex(Matrix(2, 1, [x, y]), mat_delim="("))
\left(\begin{matrix}x\\y\end{matrix}\right)
Custom printing of symbols:
>>> print(latex(x**2, symbol_names={x: 'x_i'}))
x_i^{2}
Logarithms:
>>> print(latex(log(10)))
\log{\left(10 \right)}
>>> print(latex(log(10), ln_notation=True))
\ln{\left(10 \right)}
``latex()`` also supports the builtin container types :class:`list`,
:class:`tuple`, and :class:`dict`:
>>> print(latex([2/x, y], mode='inline'))
$\left[ 2 / x, \ y\right]$
Unsupported types are rendered as monospaced plaintext:
>>> print(latex(int))
\mathtt{\text{<class 'int'>}}
>>> print(latex("plain % text"))
\mathtt{\text{plain \% text}}
See :ref:`printer_method_example` for an example of how to override
this behavior for your own types by implementing ``_latex``.
.. versionchanged:: 1.7.0
Unsupported types no longer have their ``str`` representation treated as valid latex.
"""
return LatexPrinter(settings).doprint(expr)
def print_latex(expr, **settings):
"""Prints LaTeX representation of the given expression. Takes the same
settings as ``latex()``."""
print(latex(expr, **settings))
def multiline_latex(lhs, rhs, terms_per_line=1, environment="align*", use_dots=False, **settings):
r"""
This function generates a LaTeX equation with a multiline right-hand side
in an ``align*``, ``eqnarray`` or ``IEEEeqnarray`` environment.
Parameters
==========
lhs : Expr
Left-hand side of equation
rhs : Expr
Right-hand side of equation
terms_per_line : integer, optional
Number of terms per line to print. Default is 1.
environment : "string", optional
Which LaTeX wnvironment to use for the output. Options are "align*"
(default), "eqnarray", and "IEEEeqnarray".
use_dots : boolean, optional
If ``True``, ``\\dots`` is added to the end of each line. Default is ``False``.
Examples
========
>>> from sympy import multiline_latex, symbols, sin, cos, exp, log, I
>>> x, y, alpha = symbols('x y alpha')
>>> expr = sin(alpha*y) + exp(I*alpha) - cos(log(y))
>>> print(multiline_latex(x, expr))
\begin{align*}
x = & e^{i \alpha} \\
& + \sin{\left(\alpha y \right)} \\
& - \cos{\left(\log{\left(y \right)} \right)}
\end{align*}
Using at most two terms per line:
>>> print(multiline_latex(x, expr, 2))
\begin{align*}
x = & e^{i \alpha} + \sin{\left(\alpha y \right)} \\
& - \cos{\left(\log{\left(y \right)} \right)}
\end{align*}
Using ``eqnarray`` and dots:
>>> print(multiline_latex(x, expr, terms_per_line=2, environment="eqnarray", use_dots=True))
\begin{eqnarray}
x & = & e^{i \alpha} + \sin{\left(\alpha y \right)} \dots\nonumber\\
& & - \cos{\left(\log{\left(y \right)} \right)}
\end{eqnarray}
Using ``IEEEeqnarray``:
>>> print(multiline_latex(x, expr, environment="IEEEeqnarray"))
\begin{IEEEeqnarray}{rCl}
x & = & e^{i \alpha} \nonumber\\
& & + \sin{\left(\alpha y \right)} \nonumber\\
& & - \cos{\left(\log{\left(y \right)} \right)}
\end{IEEEeqnarray}
Notes
=====
All optional parameters from ``latex`` can also be used.
"""
# Based on code from https://github.com/sympy/sympy/issues/3001
l = LatexPrinter(**settings)
if environment == "eqnarray":
result = r'\begin{eqnarray}' + '\n'
first_term = '& = &'
nonumber = r'\nonumber'
end_term = '\n\\end{eqnarray}'
doubleet = True
elif environment == "IEEEeqnarray":
result = r'\begin{IEEEeqnarray}{rCl}' + '\n'
first_term = '& = &'
nonumber = r'\nonumber'
end_term = '\n\\end{IEEEeqnarray}'
doubleet = True
elif environment == "align*":
result = r'\begin{align*}' + '\n'
first_term = '= &'
nonumber = ''
end_term = '\n\\end{align*}'
doubleet = False
else:
raise ValueError("Unknown environment: {}".format(environment))
dots = ''
if use_dots:
dots=r'\dots'
terms = rhs.as_ordered_terms()
n_terms = len(terms)
term_count = 1
for i in range(n_terms):
term = terms[i]
term_start = ''
term_end = ''
sign = '+'
if term_count > terms_per_line:
if doubleet:
term_start = '& & '
else:
term_start = '& '
term_count = 1
if term_count == terms_per_line:
# End of line
if i < n_terms-1:
# There are terms remaining
term_end = dots + nonumber + r'\\' + '\n'
else:
term_end = ''
if term.as_ordered_factors()[0] == -1:
term = -1*term
sign = r'-'
if i == 0: # beginning
if sign == '+':
sign = ''
result += r'{:s} {:s}{:s} {:s} {:s}'.format(l.doprint(lhs),
first_term, sign, l.doprint(term), term_end)
else:
result += r'{:s}{:s} {:s} {:s}'.format(term_start, sign,
l.doprint(term), term_end)
term_count += 1
result += end_term
return result
|
0da82ef731f8bba722e818a5fd0144d60b4c3433b29b085cff3d9c256e38f143 | """Printing subsystem driver
SymPy's printing system works the following way: Any expression can be
passed to a designated Printer who then is responsible to return an
adequate representation of that expression.
**The basic concept is the following:**
1. Let the object print itself if it knows how.
2. Take the best fitting method defined in the printer.
3. As fall-back use the emptyPrinter method for the printer.
Which Method is Responsible for Printing?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The whole printing process is started by calling ``.doprint(expr)`` on the printer
which you want to use. This method looks for an appropriate method which can
print the given expression in the given style that the printer defines.
While looking for the method, it follows these steps:
1. **Let the object print itself if it knows how.**
The printer looks for a specific method in every object. The name of that method
depends on the specific printer and is defined under ``Printer.printmethod``.
For example, StrPrinter calls ``_sympystr`` and LatexPrinter calls ``_latex``.
Look at the documentation of the printer that you want to use.
The name of the method is specified there.
This was the original way of doing printing in sympy. Every class had
its own latex, mathml, str and repr methods, but it turned out that it
is hard to produce a high quality printer, if all the methods are spread
out that far. Therefore all printing code was combined into the different
printers, which works great for built-in sympy objects, but not that
good for user defined classes where it is inconvenient to patch the
printers.
2. **Take the best fitting method defined in the printer.**
The printer loops through expr classes (class + its bases), and tries
to dispatch the work to ``_print_<EXPR_CLASS>``
e.g., suppose we have the following class hierarchy::
Basic
|
Atom
|
Number
|
Rational
then, for ``expr=Rational(...)``, the Printer will try
to call printer methods in the order as shown in the figure below::
p._print(expr)
|
|-- p._print_Rational(expr)
|
|-- p._print_Number(expr)
|
|-- p._print_Atom(expr)
|
`-- p._print_Basic(expr)
if ``._print_Rational`` method exists in the printer, then it is called,
and the result is returned back. Otherwise, the printer tries to call
``._print_Number`` and so on.
3. **As a fall-back use the emptyPrinter method for the printer.**
As fall-back ``self.emptyPrinter`` will be called with the expression. If
not defined in the Printer subclass this will be the same as ``str(expr)``.
.. _printer_example:
Example of Custom Printer
^^^^^^^^^^^^^^^^^^^^^^^^^
In the example below, we have a printer which prints the derivative of a function
in a shorter form.
.. code-block:: python
from sympy import Symbol
from sympy.printing.latex import LatexPrinter, print_latex
from sympy.core.function import UndefinedFunction, Function
class MyLatexPrinter(LatexPrinter):
\"\"\"Print derivative of a function of symbols in a shorter form.
\"\"\"
def _print_Derivative(self, expr):
function, *vars = expr.args
if not isinstance(type(function), UndefinedFunction) or \\
not all(isinstance(i, Symbol) for i in vars):
return super()._print_Derivative(expr)
# If you want the printer to work correctly for nested
# expressions then use self._print() instead of str() or latex().
# See the example of nested modulo below in the custom printing
# method section.
return "{}_{{{}}}".format(
self._print(Symbol(function.func.__name__)),
''.join(self._print(i) for i in vars))
def print_my_latex(expr):
\"\"\" Most of the printers define their own wrappers for print().
These wrappers usually take printer settings. Our printer does not have
any settings.
\"\"\"
print(MyLatexPrinter().doprint(expr))
y = Symbol("y")
x = Symbol("x")
f = Function("f")
expr = f(x, y).diff(x, y)
# Print the expression using the normal latex printer and our custom
# printer.
print_latex(expr)
print_my_latex(expr)
The output of the code above is::
\\frac{\\partial^{2}}{\\partial x\\partial y} f{\\left(x,y \\right)}
f_{xy}
.. _printer_method_example:
Example of Custom Printing Method
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In the example below, the latex printing of the modulo operator is modified.
This is done by overriding the method ``_latex`` of ``Mod``.
>>> from sympy import Symbol, Mod, Integer
>>> from sympy.printing.latex import print_latex
>>> # Always use printer._print()
>>> class ModOp(Mod):
... def _latex(self, printer):
... a, b = [printer._print(i) for i in self.args]
... return r"\\operatorname{Mod}{\\left( %s,%s \\right)}" % (a,b)
Comparing the output of our custom operator to the builtin one:
>>> x = Symbol('x')
>>> m = Symbol('m')
>>> print_latex(Mod(x, m))
x\\bmod{m}
>>> print_latex(ModOp(x, m))
\\operatorname{Mod}{\\left( x,m \\right)}
Common mistakes
~~~~~~~~~~~~~~~
It's important to always use ``self._print(obj)`` to print subcomponents of
an expression when customizing a printer. Mistakes include:
1. Using ``self.doprint(obj)`` instead:
>>> # This example does not work properly, as only the outermost call may use
>>> # doprint.
>>> class ModOpModeWrong(Mod):
... def _latex(self, printer):
... a, b = [printer.doprint(i) for i in self.args]
... return r"\\operatorname{Mod}{\\left( %s,%s \\right)}" % (a,b)
This fails when the `mode` argument is passed to the printer:
>>> print_latex(ModOp(x, m), mode='inline') # ok
$\\operatorname{Mod}{\\left( x,m \\right)}$
>>> print_latex(ModOpModeWrong(x, m), mode='inline') # bad
$\\operatorname{Mod}{\\left( $x$,$m$ \\right)}$
2. Using ``str(obj)`` instead:
>>> class ModOpNestedWrong(Mod):
... def _latex(self, printer):
... a, b = [str(i) for i in self.args]
... return r"\\operatorname{Mod}{\\left( %s,%s \\right)}" % (a,b)
This fails on nested objects:
>>> # Nested modulo.
>>> print_latex(ModOp(ModOp(x, m), Integer(7))) # ok
\\operatorname{Mod}{\\left( \\operatorname{Mod}{\\left( x,m \\right)},7 \\right)}
>>> print_latex(ModOpNestedWrong(ModOpNestedWrong(x, m), Integer(7))) # bad
\\operatorname{Mod}{\\left( ModOpNestedWrong(x, m),7 \\right)}
3. Using ``LatexPrinter()._print(obj)`` instead.
>>> from sympy.printing.latex import LatexPrinter
>>> class ModOpSettingsWrong(Mod):
... def _latex(self, printer):
... a, b = [LatexPrinter()._print(i) for i in self.args]
... return r"\\operatorname{Mod}{\\left( %s,%s \\right)}" % (a,b)
This causes all the settings to be discarded in the subobjects. As an
example, the ``full_prec`` setting which shows floats to full precision is
ignored:
>>> from sympy import Float
>>> print_latex(ModOp(Float(1) * x, m), full_prec=True) # ok
\\operatorname{Mod}{\\left( 1.00000000000000 x,m \\right)}
>>> print_latex(ModOpSettingsWrong(Float(1) * x, m), full_prec=True) # bad
\\operatorname{Mod}{\\left( 1.0 x,m \\right)}
"""
from typing import Any, Dict, Type
import inspect
from contextlib import contextmanager
from functools import cmp_to_key, update_wrapper
from sympy import Basic, Add
from sympy.core.core import BasicMeta
from sympy.core.function import AppliedUndef, UndefinedFunction, Function
@contextmanager
def printer_context(printer, **kwargs):
original = printer._context.copy()
try:
printer._context.update(kwargs)
yield
finally:
printer._context = original
class Printer:
""" Generic printer
Its job is to provide infrastructure for implementing new printers easily.
If you want to define your custom Printer or your custom printing method
for your custom class then see the example above: printer_example_ .
"""
_global_settings = {} # type: Dict[str, Any]
_default_settings = {} # type: Dict[str, Any]
printmethod = None # type: str
@classmethod
def _get_initial_settings(cls):
settings = cls._default_settings.copy()
for key, val in cls._global_settings.items():
if key in cls._default_settings:
settings[key] = val
return settings
def __init__(self, settings=None):
self._str = str
self._settings = self._get_initial_settings()
self._context = dict() # mutable during printing
if settings is not None:
self._settings.update(settings)
if len(self._settings) > len(self._default_settings):
for key in self._settings:
if key not in self._default_settings:
raise TypeError("Unknown setting '%s'." % key)
# _print_level is the number of times self._print() was recursively
# called. See StrPrinter._print_Float() for an example of usage
self._print_level = 0
@classmethod
def set_global_settings(cls, **settings):
"""Set system-wide printing settings. """
for key, val in settings.items():
if val is not None:
cls._global_settings[key] = val
@property
def order(self):
if 'order' in self._settings:
return self._settings['order']
else:
raise AttributeError("No order defined.")
def doprint(self, expr):
"""Returns printer's representation for expr (as a string)"""
return self._str(self._print(expr))
def _print(self, expr, **kwargs):
"""Internal dispatcher
Tries the following concepts to print an expression:
1. Let the object print itself if it knows how.
2. Take the best fitting method defined in the printer.
3. As fall-back use the emptyPrinter method for the printer.
"""
self._print_level += 1
try:
# If the printer defines a name for a printing method
# (Printer.printmethod) and the object knows for itself how it
# should be printed, use that method.
if (self.printmethod and hasattr(expr, self.printmethod)
and not isinstance(expr, BasicMeta)):
return getattr(expr, self.printmethod)(self, **kwargs)
# See if the class of expr is known, or if one of its super
# classes is known, and use that print function
# Exception: ignore the subclasses of Undefined, so that, e.g.,
# Function('gamma') does not get dispatched to _print_gamma
classes = type(expr).__mro__
if AppliedUndef in classes:
classes = classes[classes.index(AppliedUndef):]
if UndefinedFunction in classes:
classes = classes[classes.index(UndefinedFunction):]
# Another exception: if someone subclasses a known function, e.g.,
# gamma, and changes the name, then ignore _print_gamma
if Function in classes:
i = classes.index(Function)
classes = tuple(c for c in classes[:i] if \
c.__name__ == classes[0].__name__ or \
c.__name__.endswith("Base")) + classes[i:]
for cls in classes:
printmethod = '_print_' + cls.__name__
if hasattr(self, printmethod):
return getattr(self, printmethod)(expr, **kwargs)
# Unknown object, fall back to the emptyPrinter.
return self.emptyPrinter(expr)
finally:
self._print_level -= 1
def emptyPrinter(self, expr):
return str(expr)
def _as_ordered_terms(self, expr, order=None):
"""A compatibility function for ordering terms in Add. """
order = order or self.order
if order == 'old':
return sorted(Add.make_args(expr), key=cmp_to_key(Basic._compare_pretty))
elif order == 'none':
return list(expr.args)
else:
return expr.as_ordered_terms(order=order)
class _PrintFunction:
"""
Function wrapper to replace ``**settings`` in the signature with printer defaults
"""
def __init__(self, f, print_cls: Type[Printer]):
# find all the non-setting arguments
params = list(inspect.signature(f).parameters.values())
assert params.pop(-1).kind == inspect.Parameter.VAR_KEYWORD
self.__other_params = params
self.__print_cls = print_cls
update_wrapper(self, f)
def __repr__(self) -> str:
return repr(self.__wrapped__) # type:ignore
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
@property
def __signature__(self) -> inspect.Signature:
settings = self.__print_cls._get_initial_settings()
return inspect.Signature(
parameters=self.__other_params + [
inspect.Parameter(k, inspect.Parameter.KEYWORD_ONLY, default=v)
for k, v in settings.items()
],
return_annotation=self.__wrapped__.__annotations__.get('return', inspect.Signature.empty) # type:ignore
)
def print_function(print_cls):
""" A decorator to replace kwargs with the printer settings in __signature__ """
def decorator(f):
return _PrintFunction(f, print_cls)
return decorator
|
27dc6e30be2b6370cd1eece613d2cc7aae83e2bbe428f8569882ba3cfb9b7f9a | from sympy.core.containers import Tuple
from types import FunctionType
class TableForm:
r"""
Create a nice table representation of data.
Examples
========
>>> from sympy import TableForm
>>> t = TableForm([[5, 7], [4, 2], [10, 3]])
>>> print(t)
5 7
4 2
10 3
You can use the SymPy's printing system to produce tables in any
format (ascii, latex, html, ...).
>>> print(t.as_latex())
\begin{tabular}{l l}
$5$ & $7$ \\
$4$ & $2$ \\
$10$ & $3$ \\
\end{tabular}
"""
def __init__(self, data, **kwarg):
"""
Creates a TableForm.
Parameters:
data ...
2D data to be put into the table; data can be
given as a Matrix
headings ...
gives the labels for rows and columns:
Can be a single argument that applies to both
dimensions:
- None ... no labels
- "automatic" ... labels are 1, 2, 3, ...
Can be a list of labels for rows and columns:
The labels for each dimension can be given
as None, "automatic", or [l1, l2, ...] e.g.
["automatic", None] will number the rows
[default: None]
alignments ...
alignment of the columns with:
- "left" or "<"
- "center" or "^"
- "right" or ">"
When given as a single value, the value is used for
all columns. The row headings (if given) will be
right justified unless an explicit alignment is
given for it and all other columns.
[default: "left"]
formats ...
a list of format strings or functions that accept
3 arguments (entry, row number, col number) and
return a string for the table entry. (If a function
returns None then the _print method will be used.)
wipe_zeros ...
Don't show zeros in the table.
[default: True]
pad ...
the string to use to indicate a missing value (e.g.
elements that are None or those that are missing
from the end of a row (i.e. any row that is shorter
than the rest is assumed to have missing values).
When None, nothing will be shown for values that
are missing from the end of a row; values that are
None, however, will be shown.
[default: None]
Examples
========
>>> from sympy import TableForm, Symbol
>>> TableForm([[5, 7], [4, 2], [10, 3]])
5 7
4 2
10 3
>>> TableForm([list('.'*i) for i in range(1, 4)], headings='automatic')
| 1 2 3
---------
1 | .
2 | . .
3 | . . .
>>> TableForm([[Symbol('.'*(j if not i%2 else 1)) for i in range(3)]
... for j in range(4)], alignments='rcl')
.
. . .
.. . ..
... . ...
"""
from sympy import Symbol, S, Matrix
from sympy.core.sympify import SympifyError
# We only support 2D data. Check the consistency:
if isinstance(data, Matrix):
data = data.tolist()
_h = len(data)
# fill out any short lines
pad = kwarg.get('pad', None)
ok_None = False
if pad is None:
pad = " "
ok_None = True
pad = Symbol(pad)
_w = max(len(line) for line in data)
for i, line in enumerate(data):
if len(line) != _w:
line.extend([pad]*(_w - len(line)))
for j, lj in enumerate(line):
if lj is None:
if not ok_None:
lj = pad
else:
try:
lj = S(lj)
except SympifyError:
lj = Symbol(str(lj))
line[j] = lj
data[i] = line
_lines = Tuple(*data)
headings = kwarg.get("headings", [None, None])
if headings == "automatic":
_headings = [range(1, _h + 1), range(1, _w + 1)]
else:
h1, h2 = headings
if h1 == "automatic":
h1 = range(1, _h + 1)
if h2 == "automatic":
h2 = range(1, _w + 1)
_headings = [h1, h2]
allow = ('l', 'r', 'c')
alignments = kwarg.get("alignments", "l")
def _std_align(a):
a = a.strip().lower()
if len(a) > 1:
return {'left': 'l', 'right': 'r', 'center': 'c'}.get(a, a)
else:
return {'<': 'l', '>': 'r', '^': 'c'}.get(a, a)
std_align = _std_align(alignments)
if std_align in allow:
_alignments = [std_align]*_w
else:
_alignments = []
for a in alignments:
std_align = _std_align(a)
_alignments.append(std_align)
if std_align not in ('l', 'r', 'c'):
raise ValueError('alignment "%s" unrecognized' %
alignments)
if _headings[0] and len(_alignments) == _w + 1:
_head_align = _alignments[0]
_alignments = _alignments[1:]
else:
_head_align = 'r'
if len(_alignments) != _w:
raise ValueError(
'wrong number of alignments: expected %s but got %s' %
(_w, len(_alignments)))
_column_formats = kwarg.get("formats", [None]*_w)
_wipe_zeros = kwarg.get("wipe_zeros", True)
self._w = _w
self._h = _h
self._lines = _lines
self._headings = _headings
self._head_align = _head_align
self._alignments = _alignments
self._column_formats = _column_formats
self._wipe_zeros = _wipe_zeros
def __repr__(self):
from .str import sstr
return sstr(self, order=None)
def __str__(self):
from .str import sstr
return sstr(self, order=None)
def as_matrix(self):
"""Returns the data of the table in Matrix form.
Examples
========
>>> from sympy import TableForm
>>> t = TableForm([[5, 7], [4, 2], [10, 3]], headings='automatic')
>>> t
| 1 2
--------
1 | 5 7
2 | 4 2
3 | 10 3
>>> t.as_matrix()
Matrix([
[ 5, 7],
[ 4, 2],
[10, 3]])
"""
from sympy import Matrix
return Matrix(self._lines)
def as_str(self):
# XXX obsolete ?
return str(self)
def as_latex(self):
from .latex import latex
return latex(self)
def _sympystr(self, p):
"""
Returns the string representation of 'self'.
Examples
========
>>> from sympy import TableForm
>>> t = TableForm([[5, 7], [4, 2], [10, 3]])
>>> s = t.as_str()
"""
column_widths = [0] * self._w
lines = []
for line in self._lines:
new_line = []
for i in range(self._w):
# Format the item somehow if needed:
s = str(line[i])
if self._wipe_zeros and (s == "0"):
s = " "
w = len(s)
if w > column_widths[i]:
column_widths[i] = w
new_line.append(s)
lines.append(new_line)
# Check heading:
if self._headings[0]:
self._headings[0] = [str(x) for x in self._headings[0]]
_head_width = max([len(x) for x in self._headings[0]])
if self._headings[1]:
new_line = []
for i in range(self._w):
# Format the item somehow if needed:
s = str(self._headings[1][i])
w = len(s)
if w > column_widths[i]:
column_widths[i] = w
new_line.append(s)
self._headings[1] = new_line
format_str = []
def _align(align, w):
return '%%%s%ss' % (
("-" if align == "l" else ""),
str(w))
format_str = [_align(align, w) for align, w in
zip(self._alignments, column_widths)]
if self._headings[0]:
format_str.insert(0, _align(self._head_align, _head_width))
format_str.insert(1, '|')
format_str = ' '.join(format_str) + '\n'
s = []
if self._headings[1]:
d = self._headings[1]
if self._headings[0]:
d = [""] + d
first_line = format_str % tuple(d)
s.append(first_line)
s.append("-" * (len(first_line) - 1) + "\n")
for i, line in enumerate(lines):
d = [l if self._alignments[j] != 'c' else
l.center(column_widths[j]) for j, l in enumerate(line)]
if self._headings[0]:
l = self._headings[0][i]
l = (l if self._head_align != 'c' else
l.center(_head_width))
d = [l] + d
s.append(format_str % tuple(d))
return ''.join(s)[:-1] # don't include trailing newline
def _latex(self, printer):
"""
Returns the string representation of 'self'.
"""
# Check heading:
if self._headings[1]:
new_line = []
for i in range(self._w):
# Format the item somehow if needed:
new_line.append(str(self._headings[1][i]))
self._headings[1] = new_line
alignments = []
if self._headings[0]:
self._headings[0] = [str(x) for x in self._headings[0]]
alignments = [self._head_align]
alignments.extend(self._alignments)
s = r"\begin{tabular}{" + " ".join(alignments) + "}\n"
if self._headings[1]:
d = self._headings[1]
if self._headings[0]:
d = [""] + d
first_line = " & ".join(d) + r" \\" + "\n"
s += first_line
s += r"\hline" + "\n"
for i, line in enumerate(self._lines):
d = []
for j, x in enumerate(line):
if self._wipe_zeros and (x in (0, "0")):
d.append(" ")
continue
f = self._column_formats[j]
if f:
if isinstance(f, FunctionType):
v = f(x, i, j)
if v is None:
v = printer._print(x)
else:
v = f % x
d.append(v)
else:
v = printer._print(x)
d.append("$%s$" % v)
if self._headings[0]:
d = [self._headings[0][i]] + d
s += " & ".join(d) + r" \\" + "\n"
s += r"\end{tabular}"
return s
|
b1d1f05f9d617f22ecc79254ccc8b7d1b06f6e1c1f8c62d34e38ebc1f6858667 | """
C code printer
The C89CodePrinter & C99CodePrinter converts single sympy expressions into
single C expressions, using the functions defined in math.h where possible.
A complete code generator, which uses ccode extensively, can be found in
sympy.utilities.codegen. The codegen module can be used to generate complete
source code files that are compilable without further modifications.
"""
from typing import Any, Dict, Tuple
from functools import wraps
from itertools import chain
from sympy.core import S
from sympy.codegen.ast import (
Assignment, Pointer, Variable, Declaration, Type,
real, complex_, integer, bool_, float32, float64, float80,
complex64, complex128, intc, value_const, pointer_const,
int8, int16, int32, int64, uint8, uint16, uint32, uint64, untyped,
none
)
from sympy.printing.codeprinter import CodePrinter, requires
from sympy.printing.precedence import precedence, PRECEDENCE
from sympy.sets.fancysets import Range
# These are defined in the other file so we can avoid importing sympy.codegen
# from the top-level 'import sympy'. Export them here as well.
from sympy.printing.codeprinter import ccode, print_ccode # noqa:F401
# dictionary mapping sympy function to (argument_conditions, C_function).
# Used in C89CodePrinter._print_Function(self)
known_functions_C89 = {
"Abs": [(lambda x: not x.is_integer, "fabs"), (lambda x: x.is_integer, "abs")],
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"exp": "exp",
"log": "log",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"floor": "floor",
"ceiling": "ceil",
}
known_functions_C99 = dict(known_functions_C89, **{
'exp2': 'exp2',
'expm1': 'expm1',
'log10': 'log10',
'log2': 'log2',
'log1p': 'log1p',
'Cbrt': 'cbrt',
'hypot': 'hypot',
'fma': 'fma',
'loggamma': 'lgamma',
'erfc': 'erfc',
'Max': 'fmax',
'Min': 'fmin',
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
"erf": "erf",
"gamma": "tgamma",
})
# These are the core reserved words in the C language. Taken from:
# http://en.cppreference.com/w/c/keyword
reserved_words = [
'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'int',
'long', 'register', 'return', 'short', 'signed', 'sizeof', 'static',
'struct', 'entry', # never standardized, we'll leave it here anyway
'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while'
]
reserved_words_c99 = ['inline', 'restrict']
def get_math_macros():
""" Returns a dictionary with math-related macros from math.h/cmath
Note that these macros are not strictly required by the C/C++-standard.
For MSVC they are enabled by defining "_USE_MATH_DEFINES" (preferably
via a compilation flag).
Returns
=======
Dictionary mapping sympy expressions to strings (macro names)
"""
from sympy.codegen.cfunctions import log2, Sqrt
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.miscellaneous import sqrt
return {
S.Exp1: 'M_E',
log2(S.Exp1): 'M_LOG2E',
1/log(2): 'M_LOG2E',
log(2): 'M_LN2',
log(10): 'M_LN10',
S.Pi: 'M_PI',
S.Pi/2: 'M_PI_2',
S.Pi/4: 'M_PI_4',
1/S.Pi: 'M_1_PI',
2/S.Pi: 'M_2_PI',
2/sqrt(S.Pi): 'M_2_SQRTPI',
2/Sqrt(S.Pi): 'M_2_SQRTPI',
sqrt(2): 'M_SQRT2',
Sqrt(2): 'M_SQRT2',
1/sqrt(2): 'M_SQRT1_2',
1/Sqrt(2): 'M_SQRT1_2'
}
def _as_macro_if_defined(meth):
""" Decorator for printer methods
When a Printer's method is decorated using this decorator the expressions printed
will first be looked for in the attribute ``math_macros``, and if present it will
print the macro name in ``math_macros`` followed by a type suffix for the type
``real``. e.g. printing ``sympy.pi`` would print ``M_PIl`` if real is mapped to float80.
"""
@wraps(meth)
def _meth_wrapper(self, expr, **kwargs):
if expr in self.math_macros:
return '%s%s' % (self.math_macros[expr], self._get_math_macro_suffix(real))
else:
return meth(self, expr, **kwargs)
return _meth_wrapper
class C89CodePrinter(CodePrinter):
"""A printer to convert python expressions to strings of c code"""
printmethod = "_ccode"
language = "C"
standard = "C89"
reserved_words = set(reserved_words)
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
'contract': True,
'dereference': set(),
'error_on_reserved': False,
'reserved_word_suffix': '_',
} # type: Dict[str, Any]
type_aliases = {
real: float64,
complex_: complex128,
integer: intc
}
type_mappings = {
real: 'double',
intc: 'int',
float32: 'float',
float64: 'double',
integer: 'int',
bool_: 'bool',
int8: 'int8_t',
int16: 'int16_t',
int32: 'int32_t',
int64: 'int64_t',
uint8: 'int8_t',
uint16: 'int16_t',
uint32: 'int32_t',
uint64: 'int64_t',
} # type: Dict[Type, Any]
type_headers = {
bool_: {'stdbool.h'},
int8: {'stdint.h'},
int16: {'stdint.h'},
int32: {'stdint.h'},
int64: {'stdint.h'},
uint8: {'stdint.h'},
uint16: {'stdint.h'},
uint32: {'stdint.h'},
uint64: {'stdint.h'},
}
# Macros needed to be defined when using a Type
type_macros = {} # type: Dict[Type, Tuple[str, ...]]
type_func_suffixes = {
float32: 'f',
float64: '',
float80: 'l'
}
type_literal_suffixes = {
float32: 'F',
float64: '',
float80: 'L'
}
type_math_macro_suffixes = {
float80: 'l'
}
math_macros = None
_ns = '' # namespace, C++ uses 'std::'
# known_functions-dict to copy
_kf = known_functions_C89 # type: Dict[str, Any]
def __init__(self, settings=None):
settings = settings or {}
if self.math_macros is None:
self.math_macros = settings.pop('math_macros', get_math_macros())
self.type_aliases = dict(chain(self.type_aliases.items(),
settings.pop('type_aliases', {}).items()))
self.type_mappings = dict(chain(self.type_mappings.items(),
settings.pop('type_mappings', {}).items()))
self.type_headers = dict(chain(self.type_headers.items(),
settings.pop('type_headers', {}).items()))
self.type_macros = dict(chain(self.type_macros.items(),
settings.pop('type_macros', {}).items()))
self.type_func_suffixes = dict(chain(self.type_func_suffixes.items(),
settings.pop('type_func_suffixes', {}).items()))
self.type_literal_suffixes = dict(chain(self.type_literal_suffixes.items(),
settings.pop('type_literal_suffixes', {}).items()))
self.type_math_macro_suffixes = dict(chain(self.type_math_macro_suffixes.items(),
settings.pop('type_math_macro_suffixes', {}).items()))
super().__init__(settings)
self.known_functions = dict(self._kf, **settings.get('user_functions', {}))
self._dereference = set(settings.get('dereference', []))
self.headers = set()
self.libraries = set()
self.macros = set()
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
""" Get code string as a statement - i.e. ending with a semicolon. """
return codestring if codestring.endswith(';') else codestring + ';'
def _get_comment(self, text):
return "// {}".format(text)
def _declare_number_const(self, name, value):
type_ = self.type_aliases[real]
var = Variable(name, type=type_, value=value.evalf(type_.decimal_dig), attrs={value_const})
decl = Declaration(var)
return self._get_statement(self._print(decl))
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
@_as_macro_if_defined
def _print_Mul(self, expr, **kwargs):
return super()._print_Mul(expr, **kwargs)
@_as_macro_if_defined
def _print_Pow(self, expr):
if "Pow" in self.known_functions:
return self._print_Function(expr)
PREC = precedence(expr)
suffix = self._get_func_suffix(real)
if expr.exp == -1:
literal_suffix = self._get_literal_suffix(real)
return '1.0%s/%s' % (literal_suffix, self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return '%ssqrt%s(%s)' % (self._ns, suffix, self._print(expr.base))
elif expr.exp == S.One/3 and self.standard != 'C89':
return '%scbrt%s(%s)' % (self._ns, suffix, self._print(expr.base))
else:
return '%spow%s(%s, %s)' % (self._ns, suffix, self._print(expr.base),
self._print(expr.exp))
def _print_Mod(self, expr):
num, den = expr.args
if num.is_integer and den.is_integer:
return "(({}) % ({}))".format(self._print(num), self._print(den))
else:
return self._print_math_func(expr, known='fmod')
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
suffix = self._get_literal_suffix(real)
return '%d.0%s/%d.0%s' % (p, suffix, q, suffix)
def _print_Indexed(self, expr):
# calculate index for 1d array
offset = getattr(expr.base, 'offset', S.Zero)
strides = getattr(expr.base, 'strides', None)
indices = expr.indices
if strides is None or isinstance(strides, str):
dims = expr.shape
shift = S.One
temp = tuple()
if strides == 'C' or strides is None:
traversal = reversed(range(expr.rank))
indices = indices[::-1]
elif strides == 'F':
traversal = range(expr.rank)
for i in traversal:
temp += (shift,)
shift *= dims[i]
strides = temp
flat_index = sum([x[0]*x[1] for x in zip(indices, strides)]) + offset
return "%s[%s]" % (self._print(expr.base.label),
self._print(flat_index))
def _print_Idx(self, expr):
return self._print(expr.label)
@_as_macro_if_defined
def _print_NumberSymbol(self, expr):
return super()._print_NumberSymbol(expr)
def _print_Infinity(self, expr):
return 'HUGE_VAL'
def _print_NegativeInfinity(self, expr):
return '-HUGE_VAL'
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._print(e)
lines.append(code0)
lines.append("}")
return "\n".join(lines)
else:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c),
self._print(e))
for e, c in expr.args[:-1]]
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
return ": ".join(ecpairs) + last_line + " ".join([")"*len(ecpairs)])
def _print_ITE(self, expr):
from sympy.functions import Piecewise
_piecewise = Piecewise((expr.args[1], expr.args[0]), (expr.args[2], True))
return self._print(_piecewise)
def _print_MatrixElement(self, expr):
return "{}[{}]".format(self.parenthesize(expr.parent, PRECEDENCE["Atom"],
strict=True), expr.j + expr.i*expr.parent.shape[1])
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if expr in self._settings['dereference']:
return '(*{})'.format(name)
else:
return name
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_sinc(self, expr):
from sympy.functions.elementary.trigonometric import sin
from sympy.core.relational import Ne
from sympy.functions import Piecewise
_piecewise = Piecewise(
(sin(expr.args[0]) / expr.args[0], Ne(expr.args[0], 0)), (1, True))
return self._print(_piecewise)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return ('for ({target} = {start}; {target} < {stop}; {target} += '
'{step}) {{\n{body}\n}}').format(target=target, start=start,
stop=stop, step=step, body=body)
def _print_sign(self, func):
return '((({0}) > 0) - (({0}) < 0))'.format(self._print(func.args[0]))
def _print_Max(self, expr):
if "Max" in self.known_functions:
return self._print_Function(expr)
def inner_print_max(args): # The more natural abstraction of creating
if len(args) == 1: # and printing smaller Max objects is slow
return self._print(args[0]) # when there are many arguments.
half = len(args) // 2
return "((%(a)s > %(b)s) ? %(a)s : %(b)s)" % {
'a': inner_print_max(args[:half]),
'b': inner_print_max(args[half:])
}
return inner_print_max(expr.args)
def _print_Min(self, expr):
if "Min" in self.known_functions:
return self._print_Function(expr)
def inner_print_min(args): # The more natural abstraction of creating
if len(args) == 1: # and printing smaller Min objects is slow
return self._print(args[0]) # when there are many arguments.
half = len(args) // 2
return "((%(a)s < %(b)s) ? %(a)s : %(b)s)" % {
'a': inner_print_min(args[:half]),
'b': inner_print_min(args[half:])
}
return inner_print_min(expr.args)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [line.lstrip(' \t') for line in code]
increase = [int(any(map(line.endswith, inc_token))) for line in code]
decrease = [int(any(map(line.startswith, dec_token))) for line in code]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def _get_func_suffix(self, type_):
return self.type_func_suffixes[self.type_aliases.get(type_, type_)]
def _get_literal_suffix(self, type_):
return self.type_literal_suffixes[self.type_aliases.get(type_, type_)]
def _get_math_macro_suffix(self, type_):
alias = self.type_aliases.get(type_, type_)
dflt = self.type_math_macro_suffixes.get(alias, '')
return self.type_math_macro_suffixes.get(type_, dflt)
def _print_Type(self, type_):
self.headers.update(self.type_headers.get(type_, set()))
self.macros.update(self.type_macros.get(type_, set()))
return self._print(self.type_mappings.get(type_, type_.name))
def _print_Declaration(self, decl):
from sympy.codegen.cnodes import restrict
var = decl.variable
val = var.value
if var.type == untyped:
raise ValueError("C does not support untyped variables")
if isinstance(var, Pointer):
result = '{vc}{t} *{pc} {r}{s}'.format(
vc='const ' if value_const in var.attrs else '',
t=self._print(var.type),
pc=' const' if pointer_const in var.attrs else '',
r='restrict ' if restrict in var.attrs else '',
s=self._print(var.symbol)
)
elif isinstance(var, Variable):
result = '{vc}{t} {s}'.format(
vc='const ' if value_const in var.attrs else '',
t=self._print(var.type),
s=self._print(var.symbol)
)
else:
raise NotImplementedError("Unknown type of var: %s" % type(var))
if val != None: # Must be "!= None", cannot be "is not None"
result += ' = %s' % self._print(val)
return result
def _print_Float(self, flt):
type_ = self.type_aliases.get(real, real)
self.macros.update(self.type_macros.get(type_, set()))
suffix = self._get_literal_suffix(type_)
num = str(flt.evalf(type_.decimal_dig))
if 'e' not in num and '.' not in num:
num += '.0'
num_parts = num.split('e')
num_parts[0] = num_parts[0].rstrip('0')
if num_parts[0].endswith('.'):
num_parts[0] += '0'
return 'e'.join(num_parts) + suffix
@requires(headers={'stdbool.h'})
def _print_BooleanTrue(self, expr):
return 'true'
@requires(headers={'stdbool.h'})
def _print_BooleanFalse(self, expr):
return 'false'
def _print_Element(self, elem):
if elem.strides == None: # Must be "== None", cannot be "is None"
if elem.offset != None: # Must be "!= None", cannot be "is not None"
raise ValueError("Expected strides when offset is given")
idxs = ']['.join(map(lambda arg: self._print(arg),
elem.indices))
else:
global_idx = sum([i*s for i, s in zip(elem.indices, elem.strides)])
if elem.offset != None: # Must be "!= None", cannot be "is not None"
global_idx += elem.offset
idxs = self._print(global_idx)
return "{symb}[{idxs}]".format(
symb=self._print(elem.symbol),
idxs=idxs
)
def _print_CodeBlock(self, expr):
""" Elements of code blocks printed as statements. """
return '\n'.join([self._get_statement(self._print(i)) for i in expr.args])
def _print_While(self, expr):
return 'while ({condition}) {{\n{body}\n}}'.format(**expr.kwargs(
apply=lambda arg: self._print(arg)))
def _print_Scope(self, expr):
return '{\n%s\n}' % self._print_CodeBlock(expr.body)
@requires(headers={'stdio.h'})
def _print_Print(self, expr):
return 'printf({fmt}, {pargs})'.format(
fmt=self._print(expr.format_string),
pargs=', '.join(map(lambda arg: self._print(arg), expr.print_args))
)
def _print_FunctionPrototype(self, expr):
pars = ', '.join(map(lambda arg: self._print(Declaration(arg)),
expr.parameters))
return "%s %s(%s)" % (
tuple(map(lambda arg: self._print(arg),
(expr.return_type, expr.name))) + (pars,)
)
def _print_FunctionDefinition(self, expr):
return "%s%s" % (self._print_FunctionPrototype(expr),
self._print_Scope(expr))
def _print_Return(self, expr):
arg, = expr.args
return 'return %s' % self._print(arg)
def _print_CommaOperator(self, expr):
return '(%s)' % ', '.join(map(lambda arg: self._print(arg), expr.args))
def _print_Label(self, expr):
if expr.body == none:
return '%s:' % str(expr.name)
if len(expr.body.args) == 1:
return '%s:\n%s' % (str(expr.name), self._print_CodeBlock(expr.body))
return '%s:\n{\n%s\n}' % (str(expr.name), self._print_CodeBlock(expr.body))
def _print_goto(self, expr):
return 'goto %s' % expr.label.name
def _print_PreIncrement(self, expr):
arg, = expr.args
return '++(%s)' % self._print(arg)
def _print_PostIncrement(self, expr):
arg, = expr.args
return '(%s)++' % self._print(arg)
def _print_PreDecrement(self, expr):
arg, = expr.args
return '--(%s)' % self._print(arg)
def _print_PostDecrement(self, expr):
arg, = expr.args
return '(%s)--' % self._print(arg)
def _print_struct(self, expr):
return "%(keyword)s %(name)s {\n%(lines)s}" % dict(
keyword=expr.__class__.__name__, name=expr.name, lines=';\n'.join(
[self._print(decl) for decl in expr.declarations] + [''])
)
def _print_BreakToken(self, _):
return 'break'
def _print_ContinueToken(self, _):
return 'continue'
_print_union = _print_struct
class C99CodePrinter(C89CodePrinter):
standard = 'C99'
reserved_words = set(reserved_words + reserved_words_c99)
type_mappings=dict(chain(C89CodePrinter.type_mappings.items(), {
complex64: 'float complex',
complex128: 'double complex',
}.items()))
type_headers = dict(chain(C89CodePrinter.type_headers.items(), {
complex64: {'complex.h'},
complex128: {'complex.h'}
}.items()))
# known_functions-dict to copy
_kf = known_functions_C99 # type: Dict[str, Any]
# functions with versions with 'f' and 'l' suffixes:
_prec_funcs = ('fabs fmod remainder remquo fma fmax fmin fdim nan exp exp2'
' expm1 log log10 log2 log1p pow sqrt cbrt hypot sin cos tan'
' asin acos atan atan2 sinh cosh tanh asinh acosh atanh erf'
' erfc tgamma lgamma ceil floor trunc round nearbyint rint'
' frexp ldexp modf scalbn ilogb logb nextafter copysign').split()
def _print_Infinity(self, expr):
return 'INFINITY'
def _print_NegativeInfinity(self, expr):
return '-INFINITY'
def _print_NaN(self, expr):
return 'NAN'
# tgamma was already covered by 'known_functions' dict
@requires(headers={'math.h'}, libraries={'m'})
@_as_macro_if_defined
def _print_math_func(self, expr, nest=False, known=None):
if known is None:
known = self.known_functions[expr.__class__.__name__]
if not isinstance(known, str):
for cb, name in known:
if cb(*expr.args):
known = name
break
else:
raise ValueError("No matching printer")
try:
return known(self, *expr.args)
except TypeError:
suffix = self._get_func_suffix(real) if self._ns + known in self._prec_funcs else ''
if nest:
args = self._print(expr.args[0])
if len(expr.args) > 1:
paren_pile = ''
for curr_arg in expr.args[1:-1]:
paren_pile += ')'
args += ', {ns}{name}{suffix}({next}'.format(
ns=self._ns,
name=known,
suffix=suffix,
next = self._print(curr_arg)
)
args += ', %s%s' % (
self._print(expr.func(expr.args[-1])),
paren_pile
)
else:
args = ', '.join(map(lambda arg: self._print(arg), expr.args))
return '{ns}{name}{suffix}({args})'.format(
ns=self._ns,
name=known,
suffix=suffix,
args=args
)
def _print_Max(self, expr):
return self._print_math_func(expr, nest=True)
def _print_Min(self, expr):
return self._print_math_func(expr, nest=True)
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (int %(var)s=%(start)s; %(var)s<%(end)s; %(var)s++){" # C99
for i in indices:
# C arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
for k in ('Abs Sqrt exp exp2 expm1 log log10 log2 log1p Cbrt hypot fma'
' loggamma sin cos tan asin acos atan atan2 sinh cosh tanh asinh acosh '
'atanh erf erfc loggamma gamma ceiling floor').split():
setattr(C99CodePrinter, '_print_%s' % k, C99CodePrinter._print_math_func)
class C11CodePrinter(C99CodePrinter):
@requires(headers={'stdalign.h'})
def _print_alignof(self, expr):
arg, = expr.args
return 'alignof(%s)' % self._print(arg)
c_code_printers = {
'c89': C89CodePrinter,
'c99': C99CodePrinter,
'c11': C11CodePrinter
}
|
7a26db2a5cd10577fbe4e1bb04202e46545654d23dcdd57393af4fff8a7d9a0d | from .pycode import (
PythonCodePrinter,
MpmathPrinter, # MpmathPrinter is imported for backward compatibility
NumPyPrinter # NumPyPrinter is imported for backward compatibility
)
from sympy.utilities import default_sort_key
__all__ = [
'PythonCodePrinter',
'MpmathPrinter',
'NumPyPrinter',
'LambdaPrinter',
'NumPyPrinter',
'lambdarepr',
]
class LambdaPrinter(PythonCodePrinter):
"""
This printer converts expressions into strings that can be used by
lambdify.
"""
printmethod = "_lambdacode"
def _print_And(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' and ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Or(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' or ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Not(self, expr):
result = ['(', 'not (', self._print(expr.args[0]), '))']
return ''.join(result)
def _print_BooleanTrue(self, expr):
return "True"
def _print_BooleanFalse(self, expr):
return "False"
def _print_ITE(self, expr):
result = [
'((', self._print(expr.args[1]),
') if (', self._print(expr.args[0]),
') else (', self._print(expr.args[2]), '))'
]
return ''.join(result)
def _print_NumberSymbol(self, expr):
return str(expr)
def _print_Pow(self, expr, **kwargs):
# XXX Temporary workaround. Should python math printer be
# isolated from PythonCodePrinter?
return super(PythonCodePrinter, self)._print_Pow(expr, **kwargs)
# numexpr works by altering the string passed to numexpr.evaluate
# rather than by populating a namespace. Thus a special printer...
class NumExprPrinter(LambdaPrinter):
# key, value pairs correspond to sympy name and numexpr name
# functions not appearing in this dict will raise a TypeError
printmethod = "_numexprcode"
_numexpr_functions = {
'sin' : 'sin',
'cos' : 'cos',
'tan' : 'tan',
'asin': 'arcsin',
'acos': 'arccos',
'atan': 'arctan',
'atan2' : 'arctan2',
'sinh' : 'sinh',
'cosh' : 'cosh',
'tanh' : 'tanh',
'asinh': 'arcsinh',
'acosh': 'arccosh',
'atanh': 'arctanh',
'ln' : 'log',
'log': 'log',
'exp': 'exp',
'sqrt' : 'sqrt',
'Abs' : 'abs',
'conjugate' : 'conj',
'im' : 'imag',
're' : 'real',
'where' : 'where',
'complex' : 'complex',
'contains' : 'contains',
}
def _print_ImaginaryUnit(self, expr):
return '1j'
def _print_seq(self, seq, delimiter=', '):
# simplified _print_seq taken from pretty.py
s = [self._print(item) for item in seq]
if s:
return delimiter.join(s)
else:
return ""
def _print_Function(self, e):
func_name = e.func.__name__
nstr = self._numexpr_functions.get(func_name, None)
if nstr is None:
# check for implemented_function
if hasattr(e, '_imp_'):
return "(%s)" % self._print(e._imp_(*e.args))
else:
raise TypeError("numexpr does not support function '%s'" %
func_name)
return "%s(%s)" % (nstr, self._print_seq(e.args))
def _print_Piecewise(self, expr):
"Piecewise function printer"
exprs = [self._print(arg.expr) for arg in expr.args]
conds = [self._print(arg.cond) for arg in expr.args]
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
ans = []
parenthesis_count = 0
is_last_cond_True = False
for cond, expr in zip(conds, exprs):
if cond == 'True':
ans.append(expr)
is_last_cond_True = True
break
else:
ans.append('where(%s, %s, ' % (cond, expr))
parenthesis_count += 1
if not is_last_cond_True:
# simplest way to put a nan but raises
# 'RuntimeWarning: invalid value encountered in log'
ans.append('log(-1)')
return ''.join(ans) + ')' * parenthesis_count
def blacklisted(self, expr):
raise TypeError("numexpr cannot be used with %s" %
expr.__class__.__name__)
# blacklist all Matrix printing
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
blacklisted
# blacklist some python expressions
_print_list = \
_print_tuple = \
_print_Tuple = \
_print_dict = \
_print_Dict = \
blacklisted
def doprint(self, expr):
lstr = super().doprint(expr)
return "evaluate('%s', truediv=True)" % lstr
for k in NumExprPrinter._numexpr_functions:
setattr(NumExprPrinter, '_print_%s' % k, NumExprPrinter._print_Function)
def lambdarepr(expr, **settings):
"""
Returns a string usable for lambdifying.
"""
return LambdaPrinter(settings).doprint(expr)
|
b446d20e585feb73df58019aa6d696abfc62d6fe5ab2a73a72dd820b2a82ccc8 | """
Mathematica code printer
"""
from typing import Any, Dict, Set, Tuple
from sympy.core import Basic, Expr, Float
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
# Used in MCodePrinter._print_Function(self)
known_functions = {
"exp": [(lambda x: True, "Exp")],
"log": [(lambda x: True, "Log")],
"sin": [(lambda x: True, "Sin")],
"cos": [(lambda x: True, "Cos")],
"tan": [(lambda x: True, "Tan")],
"cot": [(lambda x: True, "Cot")],
"sec": [(lambda x: True, "Sec")],
"csc": [(lambda x: True, "Csc")],
"asin": [(lambda x: True, "ArcSin")],
"acos": [(lambda x: True, "ArcCos")],
"atan": [(lambda x: True, "ArcTan")],
"acot": [(lambda x: True, "ArcCot")],
"asec": [(lambda x: True, "ArcSec")],
"acsc": [(lambda x: True, "ArcCsc")],
"atan2": [(lambda *x: True, "ArcTan")],
"sinh": [(lambda x: True, "Sinh")],
"cosh": [(lambda x: True, "Cosh")],
"tanh": [(lambda x: True, "Tanh")],
"coth": [(lambda x: True, "Coth")],
"sech": [(lambda x: True, "Sech")],
"csch": [(lambda x: True, "Csch")],
"asinh": [(lambda x: True, "ArcSinh")],
"acosh": [(lambda x: True, "ArcCosh")],
"atanh": [(lambda x: True, "ArcTanh")],
"acoth": [(lambda x: True, "ArcCoth")],
"asech": [(lambda x: True, "ArcSech")],
"acsch": [(lambda x: True, "ArcCsch")],
"conjugate": [(lambda x: True, "Conjugate")],
"Max": [(lambda *x: True, "Max")],
"Min": [(lambda *x: True, "Min")],
"erf": [(lambda x: True, "Erf")],
"erf2": [(lambda *x: True, "Erf")],
"erfc": [(lambda x: True, "Erfc")],
"erfi": [(lambda x: True, "Erfi")],
"erfinv": [(lambda x: True, "InverseErf")],
"erfcinv": [(lambda x: True, "InverseErfc")],
"erf2inv": [(lambda *x: True, "InverseErf")],
"expint": [(lambda *x: True, "ExpIntegralE")],
"Ei": [(lambda x: True, "ExpIntegralEi")],
"fresnelc": [(lambda x: True, "FresnelC")],
"fresnels": [(lambda x: True, "FresnelS")],
"gamma": [(lambda x: True, "Gamma")],
"uppergamma": [(lambda *x: True, "Gamma")],
"polygamma": [(lambda *x: True, "PolyGamma")],
"loggamma": [(lambda x: True, "LogGamma")],
"beta": [(lambda *x: True, "Beta")],
"Ci": [(lambda x: True, "CosIntegral")],
"Si": [(lambda x: True, "SinIntegral")],
"Chi": [(lambda x: True, "CoshIntegral")],
"Shi": [(lambda x: True, "SinhIntegral")],
"li": [(lambda x: True, "LogIntegral")],
"factorial": [(lambda x: True, "Factorial")],
"factorial2": [(lambda x: True, "Factorial2")],
"subfactorial": [(lambda x: True, "Subfactorial")],
"catalan": [(lambda x: True, "CatalanNumber")],
"harmonic": [(lambda *x: True, "HarmonicNumber")],
"RisingFactorial": [(lambda *x: True, "Pochhammer")],
"FallingFactorial": [(lambda *x: True, "FactorialPower")],
"laguerre": [(lambda *x: True, "LaguerreL")],
"assoc_laguerre": [(lambda *x: True, "LaguerreL")],
"hermite": [(lambda *x: True, "HermiteH")],
"jacobi": [(lambda *x: True, "JacobiP")],
"gegenbauer": [(lambda *x: True, "GegenbauerC")],
"chebyshevt": [(lambda *x: True, "ChebyshevT")],
"chebyshevu": [(lambda *x: True, "ChebyshevU")],
"legendre": [(lambda *x: True, "LegendreP")],
"assoc_legendre": [(lambda *x: True, "LegendreP")],
"mathieuc": [(lambda *x: True, "MathieuC")],
"mathieus": [(lambda *x: True, "MathieuS")],
"mathieucprime": [(lambda *x: True, "MathieuCPrime")],
"mathieusprime": [(lambda *x: True, "MathieuSPrime")],
"stieltjes": [(lambda x: True, "StieltjesGamma")],
"elliptic_e": [(lambda *x: True, "EllipticE")],
"elliptic_f": [(lambda *x: True, "EllipticE")],
"elliptic_k": [(lambda x: True, "EllipticK")],
"elliptic_pi": [(lambda *x: True, "EllipticPi")],
"zeta": [(lambda *x: True, "Zeta")],
"besseli": [(lambda *x: True, "BesselI")],
"besselj": [(lambda *x: True, "BesselJ")],
"besselk": [(lambda *x: True, "BesselK")],
"bessely": [(lambda *x: True, "BesselY")],
"hankel1": [(lambda *x: True, "HankelH1")],
"hankel2": [(lambda *x: True, "HankelH2")],
"airyai": [(lambda x: True, "AiryAi")],
"airybi": [(lambda x: True, "AiryBi")],
"airyaiprime": [(lambda x: True, "AiryAiPrime")],
"airybiprime": [(lambda x: True, "AiryBiPrime")],
"polylog": [(lambda *x: True, "PolyLog")],
"lerchphi": [(lambda *x: True, "LerchPhi")],
"gcd": [(lambda *x: True, "GCD")],
"lcm": [(lambda *x: True, "LCM")],
"jn": [(lambda *x: True, "SphericalBesselJ")],
"yn": [(lambda *x: True, "SphericalBesselY")],
"hyper": [(lambda *x: True, "HypergeometricPFQ")],
"meijerg": [(lambda *x: True, "MeijerG")],
"appellf1": [(lambda *x: True, "AppellF1")],
"DiracDelta": [(lambda x: True, "DiracDelta")],
"Heaviside": [(lambda x: True, "HeavisideTheta")],
"KroneckerDelta": [(lambda *x: True, "KroneckerDelta")],
}
class MCodePrinter(CodePrinter):
"""A printer to convert python expressions to
strings of the Wolfram's Mathematica code
"""
printmethod = "_mcode"
language = "Wolfram Language"
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
} # type: Dict[str, Any]
_number_symbols = set() # type: Set[Tuple[Expr, Float]]
_not_supported = set() # type: Set[Basic]
def __init__(self, settings={}):
"""Register function mappings supplied by user"""
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {}).copy()
for k, v in userfuncs.items():
if not isinstance(v, list):
userfuncs[k] = [(lambda *x: True, v)]
self.known_functions.update(userfuncs)
def _format_code(self, lines):
return lines
def _print_Pow(self, expr):
PREC = precedence(expr)
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Mul(self, expr):
PREC = precedence(expr)
c, nc = expr.args_cnc()
res = super()._print_Mul(expr.func(*c))
if nc:
res += '*'
res += '**'.join(self.parenthesize(a, PREC) for a in nc)
return res
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
# Primitive numbers
def _print_Zero(self, expr):
return '0'
def _print_One(self, expr):
return '1'
def _print_NegativeOne(self, expr):
return '-1'
def _print_Half(self, expr):
return '1/2'
def _print_ImaginaryUnit(self, expr):
return 'I'
# Infinity and invalid numbers
def _print_Infinity(self, expr):
return 'Infinity'
def _print_NegativeInfinity(self, expr):
return '-Infinity'
def _print_ComplexInfinity(self, expr):
return 'ComplexInfinity'
def _print_NaN(self, expr):
return 'Indeterminate'
# Mathematical constants
def _print_Exp1(self, expr):
return 'E'
def _print_Pi(self, expr):
return 'Pi'
def _print_GoldenRatio(self, expr):
return 'GoldenRatio'
def _print_TribonacciConstant(self, expr):
expanded = expr.expand(func=True)
PREC = precedence(expr)
return self.parenthesize(expanded, PREC)
def _print_EulerGamma(self, expr):
return 'EulerGamma'
def _print_Catalan(self, expr):
return 'Catalan'
def _print_list(self, expr):
return '{' + ', '.join(self.doprint(a) for a in expr) + '}'
_print_tuple = _print_list
_print_Tuple = _print_list
def _print_ImmutableDenseMatrix(self, expr):
return self.doprint(expr.tolist())
def _print_ImmutableSparseMatrix(self, expr):
from sympy.core.compatibility import default_sort_key
def print_rule(pos, val):
return '{} -> {}'.format(
self.doprint((pos[0]+1, pos[1]+1)), self.doprint(val))
def print_data():
items = sorted(expr._smat.items(), key=default_sort_key)
return '{' + \
', '.join(print_rule(k, v) for k, v in items) + \
'}'
def print_dims():
return self.doprint(expr.shape)
return 'SparseArray[{}, {}]'.format(print_data(), print_dims())
def _print_ImmutableDenseNDimArray(self, expr):
return self.doprint(expr.tolist())
def _print_ImmutableSparseNDimArray(self, expr):
def print_string_list(string_list):
return '{' + ', '.join(a for a in string_list) + '}'
def to_mathematica_index(*args):
"""Helper function to change Python style indexing to
Pathematica indexing.
Python indexing (0, 1 ... n-1)
-> Mathematica indexing (1, 2 ... n)
"""
return tuple(i + 1 for i in args)
def print_rule(pos, val):
"""Helper function to print a rule of Mathematica"""
return '{} -> {}'.format(self.doprint(pos), self.doprint(val))
def print_data():
"""Helper function to print data part of Mathematica
sparse array.
It uses the fourth notation ``SparseArray[data,{d1,d2,...}]``
from
https://reference.wolfram.com/language/ref/SparseArray.html
``data`` must be formatted with rule.
"""
return print_string_list(
[print_rule(
to_mathematica_index(*(expr._get_tuple_index(key))),
value)
for key, value in sorted(expr._sparse_array.items())]
)
def print_dims():
"""Helper function to print dimensions part of Mathematica
sparse array.
It uses the fourth notation ``SparseArray[data,{d1,d2,...}]``
from
https://reference.wolfram.com/language/ref/SparseArray.html
"""
return self.doprint(expr.shape)
return 'SparseArray[{}, {}]'.format(print_data(), print_dims())
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_mfunc = self.known_functions[expr.func.__name__]
for cond, mfunc in cond_mfunc:
if cond(*expr.args):
return "%s[%s]" % (mfunc, self.stringify(expr.args, ", "))
elif (expr.func.__name__ in self._rewriteable_functions and
self._rewriteable_functions[expr.func.__name__] in self.known_functions):
# Simple rewrite to supported function possible
return self._print(expr.rewrite(self._rewriteable_functions[expr.func.__name__]))
return expr.func.__name__ + "[%s]" % self.stringify(expr.args, ", ")
_print_MinMaxBase = _print_Function
def _print_LambertW(self, expr):
if len(expr.args) == 1:
return "ProductLog[{}]".format(self._print(expr.args[0]))
return "ProductLog[{}, {}]".format(
self._print(expr.args[1]), self._print(expr.args[0]))
def _print_Integral(self, expr):
if len(expr.variables) == 1 and not expr.limits[0][1:]:
args = [expr.args[0], expr.variables[0]]
else:
args = expr.args
return "Hold[Integrate[" + ', '.join(self.doprint(a) for a in args) + "]]"
def _print_Sum(self, expr):
return "Hold[Sum[" + ', '.join(self.doprint(a) for a in expr.args) + "]]"
def _print_Derivative(self, expr):
dexpr = expr.expr
dvars = [i[0] if i[1] == 1 else i for i in expr.variable_count]
return "Hold[D[" + ', '.join(self.doprint(a) for a in [dexpr] + dvars) + "]]"
def _get_comment(self, text):
return "(* {} *)".format(text)
def mathematica_code(expr, **settings):
r"""Converts an expr to a string of the Wolfram Mathematica code
Examples
========
>>> from sympy import mathematica_code as mcode, symbols, sin
>>> x = symbols('x')
>>> mcode(sin(x).series(x).removeO())
'(1/120)*x^5 - 1/6*x^3 + x'
"""
return MCodePrinter(settings).doprint(expr)
|
4c4d60548399b611a0a8fafcf738e5d0db6fb629d78253cd25a227b2c8e67c28 | """
Fortran code printer
The FCodePrinter converts single sympy expressions into single Fortran
expressions, using the functions defined in the Fortran 77 standard where
possible. Some useful pointers to Fortran can be found on wikipedia:
https://en.wikipedia.org/wiki/Fortran
Most of the code below is based on the "Professional Programmer\'s Guide to
Fortran77" by Clive G. Page:
http://www.star.le.ac.uk/~cgp/prof77.html
Fortran is a case-insensitive language. This might cause trouble because
SymPy is case sensitive. So, fcode adds underscores to variable names when
it is necessary to make them different for Fortran.
"""
from typing import Dict, Any
from collections import defaultdict
from itertools import chain
import string
from sympy.codegen.ast import (
Assignment, Declaration, Pointer, value_const,
float32, float64, float80, complex64, complex128, int8, int16, int32,
int64, intc, real, integer, bool_, complex_
)
from sympy.codegen.fnodes import (
allocatable, isign, dsign, cmplx, merge, literal_dp, elemental, pure,
intent_in, intent_out, intent_inout
)
from sympy.core import S, Add, N, Float, Symbol
from sympy.core.function import Function
from sympy.core.relational import Eq
from sympy.sets import Range
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence, PRECEDENCE
from sympy.printing.printer import printer_context
# These are defined in the other file so we can avoid importing sympy.codegen
# from the top-level 'import sympy'. Export them here as well.
from sympy.printing.codeprinter import fcode, print_fcode # noqa:F401
known_functions = {
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"log": "log",
"exp": "exp",
"erf": "erf",
"Abs": "abs",
"conjugate": "conjg",
"Max": "max",
"Min": "min",
}
class FCodePrinter(CodePrinter):
"""A printer to convert sympy expressions to strings of Fortran code"""
printmethod = "_fcode"
language = "Fortran"
type_aliases = {
integer: int32,
real: float64,
complex_: complex128,
}
type_mappings = {
intc: 'integer(c_int)',
float32: 'real*4', # real(kind(0.e0))
float64: 'real*8', # real(kind(0.d0))
float80: 'real*10', # real(kind(????))
complex64: 'complex*8',
complex128: 'complex*16',
int8: 'integer*1',
int16: 'integer*2',
int32: 'integer*4',
int64: 'integer*8',
bool_: 'logical'
}
type_modules = {
intc: {'iso_c_binding': 'c_int'}
}
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 17,
'user_functions': {},
'human': True,
'allow_unknown_functions': False,
'source_format': 'fixed',
'contract': True,
'standard': 77,
'name_mangling' : True,
} # type: Dict[str, Any]
_operators = {
'and': '.and.',
'or': '.or.',
'xor': '.neqv.',
'equivalent': '.eqv.',
'not': '.not. ',
}
_relationals = {
'!=': '/=',
}
def __init__(self, settings=None):
if not settings:
settings = {}
self.mangled_symbols = {} # Dict showing mapping of all words
self.used_name = []
self.type_aliases = dict(chain(self.type_aliases.items(),
settings.pop('type_aliases', {}).items()))
self.type_mappings = dict(chain(self.type_mappings.items(),
settings.pop('type_mappings', {}).items()))
super().__init__(settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
# leading columns depend on fixed or free format
standards = {66, 77, 90, 95, 2003, 2008}
if self._settings['standard'] not in standards:
raise ValueError("Unknown Fortran standard: %s" % self._settings[
'standard'])
self.module_uses = defaultdict(set) # e.g.: use iso_c_binding, only: c_int
@property
def _lead(self):
if self._settings['source_format'] == 'fixed':
return {'code': " ", 'cont': " @ ", 'comment': "C "}
elif self._settings['source_format'] == 'free':
return {'code': "", 'cont': " ", 'comment': "! "}
else:
raise ValueError("Unknown source format: %s" % self._settings['source_format'])
def _print_Symbol(self, expr):
if self._settings['name_mangling'] == True:
if expr not in self.mangled_symbols:
name = expr.name
while name.lower() in self.used_name:
name += '_'
self.used_name.append(name.lower())
if name == expr.name:
self.mangled_symbols[expr] = expr
else:
self.mangled_symbols[expr] = Symbol(name)
expr = expr.xreplace(self.mangled_symbols)
name = super()._print_Symbol(expr)
return name
def _rate_index_position(self, p):
return -p*5
def _get_statement(self, codestring):
return codestring
def _get_comment(self, text):
return "! {}".format(text)
def _declare_number_const(self, name, value):
return "parameter ({} = {})".format(name, self._print(value))
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr, Float(expr.evalf(self._settings['precision']))))
return str(expr)
def _format_code(self, lines):
return self._wrap_fortran(self.indent_code(lines))
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# fortran arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("do %s = %s, %s" % (var, start, stop))
close_lines.append("end do")
return open_lines, close_lines
def _print_sign(self, expr):
from sympy import Abs
arg, = expr.args
if arg.is_integer:
new_expr = merge(0, isign(1, arg), Eq(arg, 0))
elif (arg.is_complex or arg.is_infinite):
new_expr = merge(cmplx(literal_dp(0), literal_dp(0)), arg/Abs(arg), Eq(Abs(arg), literal_dp(0)))
else:
new_expr = merge(literal_dp(0), dsign(literal_dp(1), arg), Eq(arg, literal_dp(0)))
return self._print(new_expr)
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) then" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("else if (%s) then" % self._print(c))
lines.append(self._print(e))
lines.append("end if")
return "\n".join(lines)
elif self._settings["standard"] >= 95:
# Only supported in F95 and newer:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
pattern = "merge({T}, {F}, {COND})"
code = self._print(expr.args[-1].expr)
terms = list(expr.args[:-1])
while terms:
e, c = terms.pop()
expr = self._print(e)
cond = self._print(c)
code = pattern.format(T=expr, F=code, COND=cond)
return code
else:
# `merge` is not supported prior to F95
raise NotImplementedError("Using Piecewise as an expression using "
"inline operators is not supported in "
"standards earlier than Fortran95.")
def _print_MatrixElement(self, expr):
return "{}({}, {})".format(self.parenthesize(expr.parent,
PRECEDENCE["Atom"], strict=True), expr.i + 1, expr.j + 1)
def _print_Add(self, expr):
# purpose: print complex numbers nicely in Fortran.
# collect the purely real and purely imaginary parts:
pure_real = []
pure_imaginary = []
mixed = []
for arg in expr.args:
if arg.is_number and arg.is_real:
pure_real.append(arg)
elif arg.is_number and arg.is_imaginary:
pure_imaginary.append(arg)
else:
mixed.append(arg)
if pure_imaginary:
if mixed:
PREC = precedence(expr)
term = Add(*mixed)
t = self._print(term)
if t.startswith('-'):
sign = "-"
t = t[1:]
else:
sign = "+"
if precedence(term) < PREC:
t = "(%s)" % t
return "cmplx(%s,%s) %s %s" % (
self._print(Add(*pure_real)),
self._print(-S.ImaginaryUnit*Add(*pure_imaginary)),
sign, t,
)
else:
return "cmplx(%s,%s)" % (
self._print(Add(*pure_real)),
self._print(-S.ImaginaryUnit*Add(*pure_imaginary)),
)
else:
return CodePrinter._print_Add(self, expr)
def _print_Function(self, expr):
# All constant function args are evaluated as floats
prec = self._settings['precision']
args = [N(a, prec) for a in expr.args]
eval_expr = expr.func(*args)
if not isinstance(eval_expr, Function):
return self._print(eval_expr)
else:
return CodePrinter._print_Function(self, expr.func(*args))
def _print_Mod(self, expr):
# NOTE : Fortran has the functions mod() and modulo(). modulo() behaves
# the same wrt to the sign of the arguments as Python and SymPy's
# modulus computations (% and Mod()) but is not available in Fortran 66
# or Fortran 77, thus we raise an error.
if self._settings['standard'] in [66, 77]:
msg = ("Python % operator and SymPy's Mod() function are not "
"supported by Fortran 66 or 77 standards.")
raise NotImplementedError(msg)
else:
x, y = expr.args
return " modulo({}, {})".format(self._print(x), self._print(y))
def _print_ImaginaryUnit(self, expr):
# purpose: print complex numbers nicely in Fortran.
return "cmplx(0,1)"
def _print_int(self, expr):
return str(expr)
def _print_Mul(self, expr):
# purpose: print complex numbers nicely in Fortran.
if expr.is_number and expr.is_imaginary:
return "cmplx(0,%s)" % (
self._print(-S.ImaginaryUnit*expr)
)
else:
return CodePrinter._print_Mul(self, expr)
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '%s/%s' % (
self._print(literal_dp(1)),
self.parenthesize(expr.base, PREC)
)
elif expr.exp == 0.5:
if expr.base.is_integer:
# Fortran intrinsic sqrt() does not accept integer argument
if expr.base.is_Number:
return 'sqrt(%s.0d0)' % self._print(expr.base)
else:
return 'sqrt(dble(%s))' % self._print(expr.base)
else:
return 'sqrt(%s)' % self._print(expr.base)
else:
return CodePrinter._print_Pow(self, expr)
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return "%d.0d0/%d.0d0" % (p, q)
def _print_Float(self, expr):
printed = CodePrinter._print_Float(self, expr)
e = printed.find('e')
if e > -1:
return "%sd%s" % (printed[:e], printed[e + 1:])
return "%sd0" % printed
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
op = op if op not in self._relationals else self._relationals[op]
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s(%s)" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_AugmentedAssignment(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
return self._get_statement("{0} = {0} {1} {2}".format(
*map(lambda arg: self._print(arg),
[lhs_code, expr.binop, rhs_code])))
def _print_sum_(self, sm):
params = self._print(sm.array)
if sm.dim != None: # Must use '!= None', cannot use 'is not None'
params += ', ' + self._print(sm.dim)
if sm.mask != None: # Must use '!= None', cannot use 'is not None'
params += ', mask=' + self._print(sm.mask)
return '%s(%s)' % (sm.__class__.__name__.rstrip('_'), params)
def _print_product_(self, prod):
return self._print_sum_(prod)
def _print_Do(self, do):
excl = ['concurrent']
if do.step == 1:
excl.append('step')
step = ''
else:
step = ', {step}'
return (
'do {concurrent}{counter} = {first}, {last}'+step+'\n'
'{body}\n'
'end do\n'
).format(
concurrent='concurrent ' if do.concurrent else '',
**do.kwargs(apply=lambda arg: self._print(arg), exclude=excl)
)
def _print_ImpliedDoLoop(self, idl):
step = '' if idl.step == 1 else ', {step}'
return ('({expr}, {counter} = {first}, {last}'+step+')').format(
**idl.kwargs(apply=lambda arg: self._print(arg))
)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return ('do {target} = {start}, {stop}, {step}\n'
'{body}\n'
'end do').format(target=target, start=start, stop=stop,
step=step, body=body)
def _print_Type(self, type_):
type_ = self.type_aliases.get(type_, type_)
type_str = self.type_mappings.get(type_, type_.name)
module_uses = self.type_modules.get(type_)
if module_uses:
for k, v in module_uses:
self.module_uses[k].add(v)
return type_str
def _print_Element(self, elem):
return '{symbol}({idxs})'.format(
symbol=self._print(elem.symbol),
idxs=', '.join(map(lambda arg: self._print(arg), elem.indices))
)
def _print_Extent(self, ext):
return str(ext)
def _print_Declaration(self, expr):
var = expr.variable
val = var.value
dim = var.attr_params('dimension')
intents = [intent in var.attrs for intent in (intent_in, intent_out, intent_inout)]
if intents.count(True) == 0:
intent = ''
elif intents.count(True) == 1:
intent = ', intent(%s)' % ['in', 'out', 'inout'][intents.index(True)]
else:
raise ValueError("Multiple intents specified for %s" % self)
if isinstance(var, Pointer):
raise NotImplementedError("Pointers are not available by default in Fortran.")
if self._settings["standard"] >= 90:
result = '{t}{vc}{dim}{intent}{alloc} :: {s}'.format(
t=self._print(var.type),
vc=', parameter' if value_const in var.attrs else '',
dim=', dimension(%s)' % ', '.join(map(lambda arg: self._print(arg), dim)) if dim else '',
intent=intent,
alloc=', allocatable' if allocatable in var.attrs else '',
s=self._print(var.symbol)
)
if val != None: # Must be "!= None", cannot be "is not None"
result += ' = %s' % self._print(val)
else:
if value_const in var.attrs or val:
raise NotImplementedError("F77 init./parameter statem. req. multiple lines.")
result = ' '.join(map(lambda arg: self._print(arg), [var.type, var.symbol]))
return result
def _print_Infinity(self, expr):
return '(huge(%s) + 1)' % self._print(literal_dp(0))
def _print_While(self, expr):
return 'do while ({condition})\n{body}\nend do'.format(**expr.kwargs(
apply=lambda arg: self._print(arg)))
def _print_BooleanTrue(self, expr):
return '.true.'
def _print_BooleanFalse(self, expr):
return '.false.'
def _pad_leading_columns(self, lines):
result = []
for line in lines:
if line.startswith('!'):
result.append(self._lead['comment'] + line[1:].lstrip())
else:
result.append(self._lead['code'] + line)
return result
def _wrap_fortran(self, lines):
"""Wrap long Fortran lines
Argument:
lines -- a list of lines (without \\n character)
A comment line is split at white space. Code lines are split with a more
complex rule to give nice results.
"""
# routine to find split point in a code line
my_alnum = set("_+-." + string.digits + string.ascii_letters)
my_white = set(" \t()")
def split_pos_code(line, endpos):
if len(line) <= endpos:
return len(line)
pos = endpos
split = lambda pos: \
(line[pos] in my_alnum and line[pos - 1] not in my_alnum) or \
(line[pos] not in my_alnum and line[pos - 1] in my_alnum) or \
(line[pos] in my_white and line[pos - 1] not in my_white) or \
(line[pos] not in my_white and line[pos - 1] in my_white)
while not split(pos):
pos -= 1
if pos == 0:
return endpos
return pos
# split line by line and add the split lines to result
result = []
if self._settings['source_format'] == 'free':
trailing = ' &'
else:
trailing = ''
for line in lines:
if line.startswith(self._lead['comment']):
# comment line
if len(line) > 72:
pos = line.rfind(" ", 6, 72)
if pos == -1:
pos = 72
hunk = line[:pos]
line = line[pos:].lstrip()
result.append(hunk)
while line:
pos = line.rfind(" ", 0, 66)
if pos == -1 or len(line) < 66:
pos = 66
hunk = line[:pos]
line = line[pos:].lstrip()
result.append("%s%s" % (self._lead['comment'], hunk))
else:
result.append(line)
elif line.startswith(self._lead['code']):
# code line
pos = split_pos_code(line, 72)
hunk = line[:pos].rstrip()
line = line[pos:].lstrip()
if line:
hunk += trailing
result.append(hunk)
while line:
pos = split_pos_code(line, 65)
hunk = line[:pos].rstrip()
line = line[pos:].lstrip()
if line:
hunk += trailing
result.append("%s%s" % (self._lead['cont'], hunk))
else:
result.append(line)
return result
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
free = self._settings['source_format'] == 'free'
code = [ line.lstrip(' \t') for line in code ]
inc_keyword = ('do ', 'if(', 'if ', 'do\n', 'else', 'program', 'interface')
dec_keyword = ('end do', 'enddo', 'end if', 'endif', 'else', 'end program', 'end interface')
increase = [ int(any(map(line.startswith, inc_keyword)))
for line in code ]
decrease = [ int(any(map(line.startswith, dec_keyword)))
for line in code ]
continuation = [ int(any(map(line.endswith, ['&', '&\n'])))
for line in code ]
level = 0
cont_padding = 0
tabwidth = 3
new_code = []
for i, line in enumerate(code):
if line == '' or line == '\n':
new_code.append(line)
continue
level -= decrease[i]
if free:
padding = " "*(level*tabwidth + cont_padding)
else:
padding = " "*level*tabwidth
line = "%s%s" % (padding, line)
if not free:
line = self._pad_leading_columns([line])[0]
new_code.append(line)
if continuation[i]:
cont_padding = 2*tabwidth
else:
cont_padding = 0
level += increase[i]
if not free:
return self._wrap_fortran(new_code)
return new_code
def _print_GoTo(self, goto):
if goto.expr: # computed goto
return "go to ({labels}), {expr}".format(
labels=', '.join(map(lambda arg: self._print(arg), goto.labels)),
expr=self._print(goto.expr)
)
else:
lbl, = goto.labels
return "go to %s" % self._print(lbl)
def _print_Program(self, prog):
return (
"program {name}\n"
"{body}\n"
"end program\n"
).format(**prog.kwargs(apply=lambda arg: self._print(arg)))
def _print_Module(self, mod):
return (
"module {name}\n"
"{declarations}\n"
"\ncontains\n\n"
"{definitions}\n"
"end module\n"
).format(**mod.kwargs(apply=lambda arg: self._print(arg)))
def _print_Stream(self, strm):
if strm.name == 'stdout' and self._settings["standard"] >= 2003:
self.module_uses['iso_c_binding'].add('stdint=>input_unit')
return 'input_unit'
elif strm.name == 'stderr' and self._settings["standard"] >= 2003:
self.module_uses['iso_c_binding'].add('stdint=>error_unit')
return 'error_unit'
else:
if strm.name == 'stdout':
return '*'
else:
return strm.name
def _print_Print(self, ps):
if ps.format_string != None: # Must be '!= None', cannot be 'is not None'
fmt = self._print(ps.format_string)
else:
fmt = "*"
return "print {fmt}, {iolist}".format(fmt=fmt, iolist=', '.join(
map(lambda arg: self._print(arg), ps.print_args)))
def _print_Return(self, rs):
arg, = rs.args
return "{result_name} = {arg}".format(
result_name=self._context.get('result_name', 'sympy_result'),
arg=self._print(arg)
)
def _print_FortranReturn(self, frs):
arg, = frs.args
if arg:
return 'return %s' % self._print(arg)
else:
return 'return'
def _head(self, entity, fp, **kwargs):
bind_C_params = fp.attr_params('bind_C')
if bind_C_params is None:
bind = ''
else:
bind = ' bind(C, name="%s")' % bind_C_params[0] if bind_C_params else ' bind(C)'
result_name = self._settings.get('result_name', None)
return (
"{entity}{name}({arg_names}){result}{bind}\n"
"{arg_declarations}"
).format(
entity=entity,
name=self._print(fp.name),
arg_names=', '.join([self._print(arg.symbol) for arg in fp.parameters]),
result=(' result(%s)' % result_name) if result_name else '',
bind=bind,
arg_declarations='\n'.join(map(lambda arg: self._print(Declaration(arg)), fp.parameters))
)
def _print_FunctionPrototype(self, fp):
entity = "{} function ".format(self._print(fp.return_type))
return (
"interface\n"
"{function_head}\n"
"end function\n"
"end interface"
).format(function_head=self._head(entity, fp))
def _print_FunctionDefinition(self, fd):
if elemental in fd.attrs:
prefix = 'elemental '
elif pure in fd.attrs:
prefix = 'pure '
else:
prefix = ''
entity = "{} function ".format(self._print(fd.return_type))
with printer_context(self, result_name=fd.name):
return (
"{prefix}{function_head}\n"
"{body}\n"
"end function\n"
).format(
prefix=prefix,
function_head=self._head(entity, fd),
body=self._print(fd.body)
)
def _print_Subroutine(self, sub):
return (
'{subroutine_head}\n'
'{body}\n'
'end subroutine\n'
).format(
subroutine_head=self._head('subroutine ', sub),
body=self._print(sub.body)
)
def _print_SubroutineCall(self, scall):
return 'call {name}({args})'.format(
name=self._print(scall.name),
args=', '.join(map(lambda arg: self._print(arg), scall.subroutine_args))
)
def _print_use_rename(self, rnm):
return "%s => %s" % tuple(map(lambda arg: self._print(arg), rnm.args))
def _print_use(self, use):
result = 'use %s' % self._print(use.namespace)
if use.rename != None: # Must be '!= None', cannot be 'is not None'
result += ', ' + ', '.join([self._print(rnm) for rnm in use.rename])
if use.only != None: # Must be '!= None', cannot be 'is not None'
result += ', only: ' + ', '.join([self._print(nly) for nly in use.only])
return result
def _print_BreakToken(self, _):
return 'exit'
def _print_ContinueToken(self, _):
return 'cycle'
def _print_ArrayConstructor(self, ac):
fmtstr = "[%s]" if self._settings["standard"] >= 2003 else '(/%s/)'
return fmtstr % ', '.join(map(lambda arg: self._print(arg), ac.elements))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.