hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
dbb42159099895246fb7ec7204acd73f03d8d0900f62d4b7bbbfa12cb61ac7c7 | """Tools and arithmetics for monomials of distributed polynomials. """
from itertools import combinations_with_replacement, product
from textwrap import dedent
from sympy.core import Mul, S, Tuple, sympify
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.polys.polyutils import PicklableWithSlots, dict_from_expr
from sympy.utilities import public
from sympy.utilities.iterables import is_sequence, iterable
@public
def itermonomials(variables, max_degrees, min_degrees=None):
r"""
``max_degrees`` and ``min_degrees`` are either both integers or both lists.
Unless otherwise specified, ``min_degrees`` is either ``0`` or
``[0, ..., 0]``.
A generator of all monomials ``monom`` is returned, such that
either
``min_degree <= total_degree(monom) <= max_degree``,
or
``min_degrees[i] <= degree_list(monom)[i] <= max_degrees[i]``,
for all ``i``.
Case I. ``max_degrees`` and ``min_degrees`` are both integers
=============================================================
Given a set of variables $V$ and a min_degree $N$ and a max_degree $M$
generate a set of monomials of degree less than or equal to $N$ and greater
than or equal to $M$. The total number of monomials in commutative
variables is huge and is given by the following formula if $M = 0$:
.. math::
\frac{(\#V + N)!}{\#V! N!}
For example if we would like to generate a dense polynomial of
a total degree $N = 50$ and $M = 0$, which is the worst case, in 5
variables, assuming that exponents and all of coefficients are 32-bit long
and stored in an array we would need almost 80 GiB of memory! Fortunately
most polynomials, that we will encounter, are sparse.
Consider monomials in commutative variables $x$ and $y$
and non-commutative variables $a$ and $b$::
>>> from sympy import symbols
>>> from sympy.polys.monomials import itermonomials
>>> from sympy.polys.orderings import monomial_key
>>> from sympy.abc import x, y
>>> sorted(itermonomials([x, y], 2), key=monomial_key('grlex', [y, x]))
[1, x, y, x**2, x*y, y**2]
>>> sorted(itermonomials([x, y], 3), key=monomial_key('grlex', [y, x]))
[1, x, y, x**2, x*y, y**2, x**3, x**2*y, x*y**2, y**3]
>>> a, b = symbols('a, b', commutative=False)
>>> set(itermonomials([a, b, x], 2))
{1, a, a**2, b, b**2, x, x**2, a*b, b*a, x*a, x*b}
>>> sorted(itermonomials([x, y], 2, 1), key=monomial_key('grlex', [y, x]))
[x, y, x**2, x*y, y**2]
Case II. ``max_degrees`` and ``min_degrees`` are both lists
===========================================================
If ``max_degrees = [d_1, ..., d_n]`` and
``min_degrees = [e_1, ..., e_n]``, the number of monomials generated
is:
.. math::
(d_1 - e_1 + 1) (d_2 - e_2 + 1) \cdots (d_n - e_n + 1)
Let us generate all monomials ``monom`` in variables $x$ and $y$
such that ``[1, 2][i] <= degree_list(monom)[i] <= [2, 4][i]``,
``i = 0, 1`` ::
>>> from sympy import symbols
>>> from sympy.polys.monomials import itermonomials
>>> from sympy.polys.orderings import monomial_key
>>> from sympy.abc import x, y
>>> sorted(itermonomials([x, y], [2, 4], [1, 2]), reverse=True, key=monomial_key('lex', [x, y]))
[x**2*y**4, x**2*y**3, x**2*y**2, x*y**4, x*y**3, x*y**2]
"""
n = len(variables)
if is_sequence(max_degrees):
if len(max_degrees) != n:
raise ValueError('Argument sizes do not match')
if min_degrees is None:
min_degrees = [0]*n
elif not is_sequence(min_degrees):
raise ValueError('min_degrees is not a list')
else:
if len(min_degrees) != n:
raise ValueError('Argument sizes do not match')
if any(i < 0 for i in min_degrees):
raise ValueError("min_degrees cannot contain negative numbers")
total_degree = False
else:
max_degree = max_degrees
if max_degree < 0:
raise ValueError("max_degrees cannot be negative")
if min_degrees is None:
min_degree = 0
else:
if min_degrees < 0:
raise ValueError("min_degrees cannot be negative")
min_degree = min_degrees
total_degree = True
if total_degree:
if min_degree > max_degree:
return
if not variables or max_degree == 0:
yield S.One
return
# Force to list in case of passed tuple or other incompatible collection
variables = list(variables) + [S.One]
if all(variable.is_commutative for variable in variables):
monomials_list_comm = []
for item in combinations_with_replacement(variables, max_degree):
powers = dict()
for variable in variables:
powers[variable] = 0
for variable in item:
if variable != 1:
powers[variable] += 1
if sum(powers.values()) >= min_degree:
monomials_list_comm.append(Mul(*item))
yield from set(monomials_list_comm)
else:
monomials_list_non_comm = []
for item in product(variables, repeat=max_degree):
powers = dict()
for variable in variables:
powers[variable] = 0
for variable in item:
if variable != 1:
powers[variable] += 1
if sum(powers.values()) >= min_degree:
monomials_list_non_comm.append(Mul(*item))
yield from set(monomials_list_non_comm)
else:
if any(min_degrees[i] > max_degrees[i] for i in range(n)):
raise ValueError('min_degrees[i] must be <= max_degrees[i] for all i')
power_lists = []
for var, min_d, max_d in zip(variables, min_degrees, max_degrees):
power_lists.append([var**i for i in range(min_d, max_d + 1)])
for powers in product(*power_lists):
yield Mul(*powers)
def monomial_count(V, N):
r"""
Computes the number of monomials.
The number of monomials is given by the following formula:
.. math::
\frac{(\#V + N)!}{\#V! N!}
where `N` is a total degree and `V` is a set of variables.
Examples
========
>>> from sympy.polys.monomials import itermonomials, monomial_count
>>> from sympy.polys.orderings import monomial_key
>>> from sympy.abc import x, y
>>> monomial_count(2, 2)
6
>>> M = list(itermonomials([x, y], 2))
>>> sorted(M, key=monomial_key('grlex', [y, x]))
[1, x, y, x**2, x*y, y**2]
>>> len(M)
6
"""
from sympy.functions.combinatorial.factorials import factorial
return factorial(V + N) / factorial(V) / factorial(N)
def monomial_mul(A, B):
"""
Multiplication of tuples representing monomials.
Examples
========
Lets multiply `x**3*y**4*z` with `x*y**2`::
>>> from sympy.polys.monomials import monomial_mul
>>> monomial_mul((3, 4, 1), (1, 2, 0))
(4, 6, 1)
which gives `x**4*y**5*z`.
"""
return tuple([ a + b for a, b in zip(A, B) ])
def monomial_div(A, B):
"""
Division of tuples representing monomials.
Examples
========
Lets divide `x**3*y**4*z` by `x*y**2`::
>>> from sympy.polys.monomials import monomial_div
>>> monomial_div((3, 4, 1), (1, 2, 0))
(2, 2, 1)
which gives `x**2*y**2*z`. However::
>>> monomial_div((3, 4, 1), (1, 2, 2)) is None
True
`x*y**2*z**2` does not divide `x**3*y**4*z`.
"""
C = monomial_ldiv(A, B)
if all(c >= 0 for c in C):
return tuple(C)
else:
return None
def monomial_ldiv(A, B):
"""
Division of tuples representing monomials.
Examples
========
Lets divide `x**3*y**4*z` by `x*y**2`::
>>> from sympy.polys.monomials import monomial_ldiv
>>> monomial_ldiv((3, 4, 1), (1, 2, 0))
(2, 2, 1)
which gives `x**2*y**2*z`.
>>> monomial_ldiv((3, 4, 1), (1, 2, 2))
(2, 2, -1)
which gives `x**2*y**2*z**-1`.
"""
return tuple([ a - b for a, b in zip(A, B) ])
def monomial_pow(A, n):
"""Return the n-th pow of the monomial. """
return tuple([ a*n for a in A ])
def monomial_gcd(A, B):
"""
Greatest common divisor of tuples representing monomials.
Examples
========
Lets compute GCD of `x*y**4*z` and `x**3*y**2`::
>>> from sympy.polys.monomials import monomial_gcd
>>> monomial_gcd((1, 4, 1), (3, 2, 0))
(1, 2, 0)
which gives `x*y**2`.
"""
return tuple([ min(a, b) for a, b in zip(A, B) ])
def monomial_lcm(A, B):
"""
Least common multiple of tuples representing monomials.
Examples
========
Lets compute LCM of `x*y**4*z` and `x**3*y**2`::
>>> from sympy.polys.monomials import monomial_lcm
>>> monomial_lcm((1, 4, 1), (3, 2, 0))
(3, 4, 1)
which gives `x**3*y**4*z`.
"""
return tuple([ max(a, b) for a, b in zip(A, B) ])
def monomial_divides(A, B):
"""
Does there exist a monomial X such that XA == B?
Examples
========
>>> from sympy.polys.monomials import monomial_divides
>>> monomial_divides((1, 2), (3, 4))
True
>>> monomial_divides((1, 2), (0, 2))
False
"""
return all(a <= b for a, b in zip(A, B))
def monomial_max(*monoms):
"""
Returns maximal degree for each variable in a set of monomials.
Examples
========
Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`.
We wish to find out what is the maximal degree for each of `x`, `y`
and `z` variables::
>>> from sympy.polys.monomials import monomial_max
>>> monomial_max((3,4,5), (0,5,1), (6,3,9))
(6, 5, 9)
"""
M = list(monoms[0])
for N in monoms[1:]:
for i, n in enumerate(N):
M[i] = max(M[i], n)
return tuple(M)
def monomial_min(*monoms):
"""
Returns minimal degree for each variable in a set of monomials.
Examples
========
Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`.
We wish to find out what is the minimal degree for each of `x`, `y`
and `z` variables::
>>> from sympy.polys.monomials import monomial_min
>>> monomial_min((3,4,5), (0,5,1), (6,3,9))
(0, 3, 1)
"""
M = list(monoms[0])
for N in monoms[1:]:
for i, n in enumerate(N):
M[i] = min(M[i], n)
return tuple(M)
def monomial_deg(M):
"""
Returns the total degree of a monomial.
Examples
========
The total degree of `xy^2` is 3:
>>> from sympy.polys.monomials import monomial_deg
>>> monomial_deg((1, 2))
3
"""
return sum(M)
def term_div(a, b, domain):
"""Division of two terms in over a ring/field. """
a_lm, a_lc = a
b_lm, b_lc = b
monom = monomial_div(a_lm, b_lm)
if domain.is_Field:
if monom is not None:
return monom, domain.quo(a_lc, b_lc)
else:
return None
else:
if not (monom is None or a_lc % b_lc):
return monom, domain.quo(a_lc, b_lc)
else:
return None
class MonomialOps:
"""Code generator of fast monomial arithmetic functions. """
def __init__(self, ngens):
self.ngens = ngens
def _build(self, code, name):
ns = {}
exec(code, ns)
return ns[name]
def _vars(self, name):
return [ "%s%s" % (name, i) for i in range(self.ngens) ]
def mul(self):
name = "monomial_mul"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s + %s" % (a, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB))
return self._build(code, name)
def pow(self):
name = "monomial_pow"
template = dedent("""\
def %(name)s(A, k):
(%(A)s,) = A
return (%(Ak)s,)
""")
A = self._vars("a")
Ak = [ "%s*k" % a for a in A ]
code = template % dict(name=name, A=", ".join(A), Ak=", ".join(Ak))
return self._build(code, name)
def mulpow(self):
name = "monomial_mulpow"
template = dedent("""\
def %(name)s(A, B, k):
(%(A)s,) = A
(%(B)s,) = B
return (%(ABk)s,)
""")
A = self._vars("a")
B = self._vars("b")
ABk = [ "%s + %s*k" % (a, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), ABk=", ".join(ABk))
return self._build(code, name)
def ldiv(self):
name = "monomial_ldiv"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s - %s" % (a, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB))
return self._build(code, name)
def div(self):
name = "monomial_div"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
%(RAB)s
return (%(R)s,)
""")
A = self._vars("a")
B = self._vars("b")
RAB = [ "r%(i)s = a%(i)s - b%(i)s\n if r%(i)s < 0: return None" % dict(i=i) for i in range(self.ngens) ]
R = self._vars("r")
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), RAB="\n ".join(RAB), R=", ".join(R))
return self._build(code, name)
def lcm(self):
name = "monomial_lcm"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s if %s >= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB))
return self._build(code, name)
def gcd(self):
name = "monomial_gcd"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s if %s <= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB))
return self._build(code, name)
@public
class Monomial(PicklableWithSlots):
"""Class representing a monomial, i.e. a product of powers. """
__slots__ = ('exponents', 'gens')
def __init__(self, monom, gens=None):
if not iterable(monom):
rep, gens = dict_from_expr(sympify(monom), gens=gens)
if len(rep) == 1 and list(rep.values())[0] == 1:
monom = list(rep.keys())[0]
else:
raise ValueError("Expected a monomial got {}".format(monom))
self.exponents = tuple(map(int, monom))
self.gens = gens
def rebuild(self, exponents, gens=None):
return self.__class__(exponents, gens or self.gens)
def __len__(self):
return len(self.exponents)
def __iter__(self):
return iter(self.exponents)
def __getitem__(self, item):
return self.exponents[item]
def __hash__(self):
return hash((self.__class__.__name__, self.exponents, self.gens))
def __str__(self):
if self.gens:
return "*".join([ "%s**%s" % (gen, exp) for gen, exp in zip(self.gens, self.exponents) ])
else:
return "%s(%s)" % (self.__class__.__name__, self.exponents)
def as_expr(self, *gens):
"""Convert a monomial instance to a SymPy expression. """
gens = gens or self.gens
if not gens:
raise ValueError(
"Cannot convert %s to an expression without generators" % self)
return Mul(*[ gen**exp for gen, exp in zip(gens, self.exponents) ])
def __eq__(self, other):
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
return False
return self.exponents == exponents
def __ne__(self, other):
return not self == other
def __mul__(self, other):
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
raise NotImplementedError
return self.rebuild(monomial_mul(self.exponents, exponents))
def __truediv__(self, other):
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
raise NotImplementedError
result = monomial_div(self.exponents, exponents)
if result is not None:
return self.rebuild(result)
else:
raise ExactQuotientFailed(self, Monomial(other))
__floordiv__ = __truediv__
def __pow__(self, other):
n = int(other)
if not n:
return self.rebuild([0]*len(self))
elif n > 0:
exponents = self.exponents
for i in range(1, n):
exponents = monomial_mul(exponents, self.exponents)
return self.rebuild(exponents)
else:
raise ValueError("a non-negative integer expected, got %s" % other)
def gcd(self, other):
"""Greatest common divisor of monomials. """
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
raise TypeError(
"an instance of Monomial class expected, got %s" % other)
return self.rebuild(monomial_gcd(self.exponents, exponents))
def lcm(self, other):
"""Least common multiple of monomials. """
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
raise TypeError(
"an instance of Monomial class expected, got %s" % other)
return self.rebuild(monomial_lcm(self.exponents, exponents))
|
8166ad31657eb5aa4b3dbba1946618718bde0070c8c6bf2a7dd54383b868e3a9 | """Sparse rational function fields. """
from typing import Any, Dict as tDict
from functools import reduce
from operator import add, mul, lt, le, gt, ge
from sympy.core.expr import Expr
from sympy.core.mod import Mod
from sympy.core.numbers import Exp1
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import CantSympify, sympify
from sympy.functions.elementary.exponential import ExpBase
from sympy.polys.domains.domainelement import DomainElement
from sympy.polys.domains.fractionfield import FractionField
from sympy.polys.domains.polynomialring import PolynomialRing
from sympy.polys.constructor import construct_domain
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.polyoptions import build_options
from sympy.polys.polyutils import _parallel_dict_from_expr
from sympy.polys.rings import PolyElement
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.iterables import is_sequence
from sympy.utilities.magic import pollute
@public
def field(symbols, domain, order=lex):
"""Construct new rational function field returning (field, x1, ..., xn). """
_field = FracField(symbols, domain, order)
return (_field,) + _field.gens
@public
def xfield(symbols, domain, order=lex):
"""Construct new rational function field returning (field, (x1, ..., xn)). """
_field = FracField(symbols, domain, order)
return (_field, _field.gens)
@public
def vfield(symbols, domain, order=lex):
"""Construct new rational function field and inject generators into global namespace. """
_field = FracField(symbols, domain, order)
pollute([ sym.name for sym in _field.symbols ], _field.gens)
return _field
@public
def sfield(exprs, *symbols, **options):
"""Construct a field deriving generators and domain
from options and input expressions.
Parameters
==========
exprs : py:class:`~.Expr` or sequence of :py:class:`~.Expr` (sympifiable)
symbols : sequence of :py:class:`~.Symbol`/:py:class:`~.Expr`
options : keyword arguments understood by :py:class:`~.Options`
Examples
========
>>> from sympy import exp, log, symbols, sfield
>>> x = symbols("x")
>>> K, f = sfield((x*log(x) + 4*x**2)*exp(1/x + log(x)/3)/x**2)
>>> K
Rational function field in x, exp(1/x), log(x), x**(1/3) over ZZ with lex order
>>> f
(4*x**2*(exp(1/x)) + x*(exp(1/x))*(log(x)))/((x**(1/3))**5)
"""
single = False
if not is_sequence(exprs):
exprs, single = [exprs], True
exprs = list(map(sympify, exprs))
opt = build_options(symbols, options)
numdens = []
for expr in exprs:
numdens.extend(expr.as_numer_denom())
reps, opt = _parallel_dict_from_expr(numdens, opt)
if opt.domain is None:
# NOTE: this is inefficient because construct_domain() automatically
# performs conversion to the target domain. It shouldn't do this.
coeffs = sum([list(rep.values()) for rep in reps], [])
opt.domain, _ = construct_domain(coeffs, opt=opt)
_field = FracField(opt.gens, opt.domain, opt.order)
fracs = []
for i in range(0, len(reps), 2):
fracs.append(_field(tuple(reps[i:i+2])))
if single:
return (_field, fracs[0])
else:
return (_field, fracs)
_field_cache = {} # type: tDict[Any, Any]
class FracField(DefaultPrinting):
"""Multivariate distributed rational function field. """
def __new__(cls, symbols, domain, order=lex):
from sympy.polys.rings import PolyRing
ring = PolyRing(symbols, domain, order)
symbols = ring.symbols
ngens = ring.ngens
domain = ring.domain
order = ring.order
_hash_tuple = (cls.__name__, symbols, ngens, domain, order)
obj = _field_cache.get(_hash_tuple)
if obj is None:
obj = object.__new__(cls)
obj._hash_tuple = _hash_tuple
obj._hash = hash(_hash_tuple)
obj.ring = ring
obj.dtype = type("FracElement", (FracElement,), {"field": obj})
obj.symbols = symbols
obj.ngens = ngens
obj.domain = domain
obj.order = order
obj.zero = obj.dtype(ring.zero)
obj.one = obj.dtype(ring.one)
obj.gens = obj._gens()
for symbol, generator in zip(obj.symbols, obj.gens):
if isinstance(symbol, Symbol):
name = symbol.name
if not hasattr(obj, name):
setattr(obj, name, generator)
_field_cache[_hash_tuple] = obj
return obj
def _gens(self):
"""Return a list of polynomial generators. """
return tuple([ self.dtype(gen) for gen in self.ring.gens ])
def __getnewargs__(self):
return (self.symbols, self.domain, self.order)
def __hash__(self):
return self._hash
def index(self, gen):
if isinstance(gen, self.dtype):
return self.ring.index(gen.to_poly())
else:
raise ValueError("expected a %s, got %s instead" % (self.dtype,gen))
def __eq__(self, other):
return isinstance(other, FracField) and \
(self.symbols, self.ngens, self.domain, self.order) == \
(other.symbols, other.ngens, other.domain, other.order)
def __ne__(self, other):
return not self == other
def raw_new(self, numer, denom=None):
return self.dtype(numer, denom)
def new(self, numer, denom=None):
if denom is None: denom = self.ring.one
numer, denom = numer.cancel(denom)
return self.raw_new(numer, denom)
def domain_new(self, element):
return self.domain.convert(element)
def ground_new(self, element):
try:
return self.new(self.ring.ground_new(element))
except CoercionFailed:
domain = self.domain
if not domain.is_Field and domain.has_assoc_Field:
ring = self.ring
ground_field = domain.get_field()
element = ground_field.convert(element)
numer = ring.ground_new(ground_field.numer(element))
denom = ring.ground_new(ground_field.denom(element))
return self.raw_new(numer, denom)
else:
raise
def field_new(self, element):
if isinstance(element, FracElement):
if self == element.field:
return element
if isinstance(self.domain, FractionField) and \
self.domain.field == element.field:
return self.ground_new(element)
elif isinstance(self.domain, PolynomialRing) and \
self.domain.ring.to_field() == element.field:
return self.ground_new(element)
else:
raise NotImplementedError("conversion")
elif isinstance(element, PolyElement):
denom, numer = element.clear_denoms()
if isinstance(self.domain, PolynomialRing) and \
numer.ring == self.domain.ring:
numer = self.ring.ground_new(numer)
elif isinstance(self.domain, FractionField) and \
numer.ring == self.domain.field.to_ring():
numer = self.ring.ground_new(numer)
else:
numer = numer.set_ring(self.ring)
denom = self.ring.ground_new(denom)
return self.raw_new(numer, denom)
elif isinstance(element, tuple) and len(element) == 2:
numer, denom = list(map(self.ring.ring_new, element))
return self.new(numer, denom)
elif isinstance(element, str):
raise NotImplementedError("parsing")
elif isinstance(element, Expr):
return self.from_expr(element)
else:
return self.ground_new(element)
__call__ = field_new
def _rebuild_expr(self, expr, mapping):
domain = self.domain
powers = tuple((gen, gen.as_base_exp()) for gen in mapping.keys()
if gen.is_Pow or isinstance(gen, ExpBase))
def _rebuild(expr):
generator = mapping.get(expr)
if generator is not None:
return generator
elif expr.is_Add:
return reduce(add, list(map(_rebuild, expr.args)))
elif expr.is_Mul:
return reduce(mul, list(map(_rebuild, expr.args)))
elif expr.is_Pow or isinstance(expr, (ExpBase, Exp1)):
b, e = expr.as_base_exp()
# look for bg**eg whose integer power may be b**e
for gen, (bg, eg) in powers:
if bg == b and Mod(e, eg) == 0:
return mapping.get(gen)**int(e/eg)
if e.is_Integer and e is not S.One:
return _rebuild(b)**int(e)
try:
return domain.convert(expr)
except CoercionFailed:
if not domain.is_Field and domain.has_assoc_Field:
return domain.get_field().convert(expr)
else:
raise
return _rebuild(sympify(expr))
def from_expr(self, expr):
mapping = dict(list(zip(self.symbols, self.gens)))
try:
frac = self._rebuild_expr(expr, mapping)
except CoercionFailed:
raise ValueError("expected an expression convertible to a rational function in %s, got %s" % (self, expr))
else:
return self.field_new(frac)
def to_domain(self):
return FractionField(self)
def to_ring(self):
from sympy.polys.rings import PolyRing
return PolyRing(self.symbols, self.domain, self.order)
class FracElement(DomainElement, DefaultPrinting, CantSympify):
"""Element of multivariate distributed rational function field. """
def __init__(self, numer, denom=None):
if denom is None:
denom = self.field.ring.one
elif not denom:
raise ZeroDivisionError("zero denominator")
self.numer = numer
self.denom = denom
def raw_new(f, numer, denom):
return f.__class__(numer, denom)
def new(f, numer, denom):
return f.raw_new(*numer.cancel(denom))
def to_poly(f):
if f.denom != 1:
raise ValueError("f.denom should be 1")
return f.numer
def parent(self):
return self.field.to_domain()
def __getnewargs__(self):
return (self.field, self.numer, self.denom)
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.field, self.numer, self.denom))
return _hash
def copy(self):
return self.raw_new(self.numer.copy(), self.denom.copy())
def set_field(self, new_field):
if self.field == new_field:
return self
else:
new_ring = new_field.ring
numer = self.numer.set_ring(new_ring)
denom = self.denom.set_ring(new_ring)
return new_field.new(numer, denom)
def as_expr(self, *symbols):
return self.numer.as_expr(*symbols)/self.denom.as_expr(*symbols)
def __eq__(f, g):
if isinstance(g, FracElement) and f.field == g.field:
return f.numer == g.numer and f.denom == g.denom
else:
return f.numer == g and f.denom == f.field.ring.one
def __ne__(f, g):
return not f == g
def __bool__(f):
return bool(f.numer)
def sort_key(self):
return (self.denom.sort_key(), self.numer.sort_key())
def _cmp(f1, f2, op):
if isinstance(f2, f1.field.dtype):
return op(f1.sort_key(), f2.sort_key())
else:
return NotImplemented
def __lt__(f1, f2):
return f1._cmp(f2, lt)
def __le__(f1, f2):
return f1._cmp(f2, le)
def __gt__(f1, f2):
return f1._cmp(f2, gt)
def __ge__(f1, f2):
return f1._cmp(f2, ge)
def __pos__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(f.numer, f.denom)
def __neg__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(-f.numer, f.denom)
def _extract_ground(self, element):
domain = self.field.domain
try:
element = domain.convert(element)
except CoercionFailed:
if not domain.is_Field and domain.has_assoc_Field:
ground_field = domain.get_field()
try:
element = ground_field.convert(element)
except CoercionFailed:
pass
else:
return -1, ground_field.numer(element), ground_field.denom(element)
return 0, None, None
else:
return 1, element, None
def __add__(f, g):
"""Add rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer + g.numer, f.denom)
else:
return f.new(f.numer*g.denom + f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer + f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__radd__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__radd__(f)
return f.__radd__(g)
def __radd__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __sub__(f, g):
"""Subtract rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return -g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer - g.numer, f.denom)
else:
return f.new(f.numer*g.denom - f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer - f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rsub__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rsub__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer - f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom - f.denom*g_numer, f.denom*g_denom)
def __rsub__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(-f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(-f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(-f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __mul__(f, g):
"""Multiply rational functions ``f`` and ``g``. """
field = f.field
if not f or not g:
return field.zero
elif isinstance(g, field.dtype):
return f.new(f.numer*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rmul__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rmul__(f)
return f.__rmul__(g)
def __rmul__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_numer, f.denom*g_denom)
def __truediv__(f, g):
"""Computes quotient of fractions ``f`` and ``g``. """
field = f.field
if not g:
raise ZeroDivisionError
elif isinstance(g, field.dtype):
return f.new(f.numer*g.denom, f.denom*g.numer)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer, f.denom*g)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rtruediv__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rtruediv__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer, f.denom*g_numer)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom, f.denom*g_numer)
def __rtruediv__(f, c):
if not f:
raise ZeroDivisionError
elif isinstance(c, f.field.ring.dtype):
return f.new(f.denom*c, f.numer)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.denom*g_numer, f.numer)
elif not op:
return NotImplemented
else:
return f.new(f.denom*g_numer, f.numer*g_denom)
def __pow__(f, n):
"""Raise ``f`` to a non-negative power ``n``. """
if n >= 0:
return f.raw_new(f.numer**n, f.denom**n)
elif not f:
raise ZeroDivisionError
else:
return f.raw_new(f.denom**-n, f.numer**-n)
def diff(f, x):
"""Computes partial derivative in ``x``.
Examples
========
>>> from sympy.polys.fields import field
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = field("x,y,z", ZZ)
>>> ((x**2 + y)/(z + 1)).diff(x)
2*x/(z + 1)
"""
x = x.to_poly()
return f.new(f.numer.diff(x)*f.denom - f.numer*f.denom.diff(x), f.denom**2)
def __call__(f, *values):
if 0 < len(values) <= f.field.ngens:
return f.evaluate(list(zip(f.field.gens, values)))
else:
raise ValueError("expected at least 1 and at most %s values, got %s" % (f.field.ngens, len(values)))
def evaluate(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.evaluate(x), f.denom.evaluate(x)
else:
x = x.to_poly()
numer, denom = f.numer.evaluate(x, a), f.denom.evaluate(x, a)
field = numer.ring.to_field()
return field.new(numer, denom)
def subs(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.subs(x), f.denom.subs(x)
else:
x = x.to_poly()
numer, denom = f.numer.subs(x, a), f.denom.subs(x, a)
return f.new(numer, denom)
def compose(f, x, a=None):
raise NotImplementedError
|
2b345e60d33261f81750cf94163ebb79053b31093a12c6df855b9fe3f4e5feed | """Algorithms for partial fraction decomposition of rational functions. """
from sympy.core import S, Add, sympify, Function, Lambda, Dummy
from sympy.core.traversal import preorder_traversal
from sympy.polys import Poly, RootSum, cancel, factor
from sympy.polys.polyerrors import PolynomialError
from sympy.polys.polyoptions import allowed_flags, set_defaults
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.utilities import numbered_symbols, take, xthreaded, public
@xthreaded
@public
def apart(f, x=None, full=False, **options):
"""
Compute partial fraction decomposition of a rational function.
Given a rational function ``f``, computes the partial fraction
decomposition of ``f``. Two algorithms are available: One is based on the
undertermined coefficients method, the other is Bronstein's full partial
fraction decomposition algorithm.
The undetermined coefficients method (selected by ``full=False``) uses
polynomial factorization (and therefore accepts the same options as
factor) for the denominator. Per default it works over the rational
numbers, therefore decomposition of denominators with non-rational roots
(e.g. irrational, complex roots) is not supported by default (see options
of factor).
Bronstein's algorithm can be selected by using ``full=True`` and allows a
decomposition of denominators with non-rational roots. A human-readable
result can be obtained via ``doit()`` (see examples below).
Examples
========
>>> from sympy.polys.partfrac import apart
>>> from sympy.abc import x, y
By default, using the undetermined coefficients method:
>>> apart(y/(x + 2)/(x + 1), x)
-y/(x + 2) + y/(x + 1)
The undetermined coefficients method does not provide a result when the
denominators roots are not rational:
>>> apart(y/(x**2 + x + 1), x)
y/(x**2 + x + 1)
You can choose Bronstein's algorithm by setting ``full=True``:
>>> apart(y/(x**2 + x + 1), x, full=True)
RootSum(_w**2 + _w + 1, Lambda(_a, (-2*_a*y/3 - y/3)/(-_a + x)))
Calling ``doit()`` yields a human-readable result:
>>> apart(y/(x**2 + x + 1), x, full=True).doit()
(-y/3 - 2*y*(-1/2 - sqrt(3)*I/2)/3)/(x + 1/2 + sqrt(3)*I/2) + (-y/3 -
2*y*(-1/2 + sqrt(3)*I/2)/3)/(x + 1/2 - sqrt(3)*I/2)
See Also
========
apart_list, assemble_partfrac_list
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
_options = options.copy()
options = set_defaults(options, extension=True)
try:
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
except PolynomialError as msg:
if f.is_commutative:
raise PolynomialError(msg)
# non-commutative
if f.is_Mul:
c, nc = f.args_cnc(split_1=False)
nc = f.func(*nc)
if c:
c = apart(f.func._from_args(c), x=x, full=full, **_options)
return c*nc
else:
return nc
elif f.is_Add:
c = []
nc = []
for i in f.args:
if i.is_commutative:
c.append(i)
else:
try:
nc.append(apart(i, x=x, full=full, **_options))
except NotImplementedError:
nc.append(i)
return apart(f.func(*c), x=x, full=full, **_options) + f.func(*nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
try:
reps.append((e, apart(e, x=x, full=full, **_options)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
if P.is_multivariate:
fc = f.cancel()
if fc != f:
return apart(fc, x=x, full=full, **_options)
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
if Q.degree() <= 1:
partial = P/Q
else:
if not full:
partial = apart_undetermined_coeffs(P, Q)
else:
partial = apart_full_decomposition(P, Q)
terms = S.Zero
for term in Add.make_args(partial):
if term.has(RootSum):
terms += term
else:
terms += factor(term)
return common*(poly.as_expr() + terms)
def apart_undetermined_coeffs(P, Q):
"""Partial fractions via method of undetermined coefficients. """
X = numbered_symbols(cls=Dummy)
partial, symbols = [], []
_, factors = Q.factor_list()
for f, k in factors:
n, q = f.degree(), Q
for i in range(1, k + 1):
coeffs, q = take(X, n), q.quo(f)
partial.append((coeffs, q, f, i))
symbols.extend(coeffs)
dom = Q.get_domain().inject(*symbols)
F = Poly(0, Q.gen, domain=dom)
for i, (coeffs, q, f, k) in enumerate(partial):
h = Poly(coeffs, Q.gen, domain=dom)
partial[i] = (h, f, k)
q = q.set_domain(dom)
F += h*q
system, result = [], S.Zero
for (k,), coeff in F.terms():
system.append(coeff - P.nth(k))
from sympy.solvers import solve
solution = solve(system, symbols)
for h, f, k in partial:
h = h.as_expr().subs(solution)
result += h/f.as_expr()**k
return result
def apart_full_decomposition(P, Q):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
.. [1] [Bronstein93]_
"""
return assemble_partfrac_list(apart_list(P/Q, P.gens[0]))
@public
def apart_list(f, x=None, dummies=None, **options):
"""
Compute partial fraction decomposition of a rational function
and return the result in structured form.
Given a rational function ``f`` compute the partial fraction decomposition
of ``f``. Only Bronstein's full partial fraction decomposition algorithm
is supported by this method. The return value is highly structured and
perfectly suited for further algorithmic treatment rather than being
human-readable. The function returns a tuple holding three elements:
* The first item is the common coefficient, free of the variable `x` used
for decomposition. (It is an element of the base field `K`.)
* The second item is the polynomial part of the decomposition. This can be
the zero polynomial. (It is an element of `K[x]`.)
* The third part itself is a list of quadruples. Each quadruple
has the following elements in this order:
- The (not necessarily irreducible) polynomial `D` whose roots `w_i` appear
in the linear denominator of a bunch of related fraction terms. (This item
can also be a list of explicit roots. However, at the moment ``apart_list``
never returns a result this way, but the related ``assemble_partfrac_list``
function accepts this format as input.)
- The numerator of the fraction, written as a function of the root `w`
- The linear denominator of the fraction *excluding its power exponent*,
written as a function of the root `w`.
- The power to which the denominator has to be raised.
On can always rebuild a plain expression by using the function ``assemble_partfrac_list``.
Examples
========
A first example:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, t
>>> f = (2*x**3 - 2*x) / (x**2 - 2*x + 1)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(2*x + 4, x, domain='ZZ'),
[(Poly(_w - 1, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
2*x + 4 + 4/(x - 1)
Second example:
>>> f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
>>> pfd = apart_list(f)
>>> pfd
(-1,
Poly(2/3, x, domain='QQ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-2/3 - 2/(x - 2)
Another example, showing symbolic parameters:
>>> pfd = apart_list(t/(x**2 + x + t), x)
>>> pfd
(1,
Poly(0, x, domain='ZZ[t]'),
[(Poly(_w**2 + _w + t, _w, domain='ZZ[t]'),
Lambda(_a, -2*_a*t/(4*t - 1) - t/(4*t - 1)),
Lambda(_a, -_a + x),
1)])
>>> assemble_partfrac_list(pfd)
RootSum(_w**2 + _w + t, Lambda(_a, (-2*_a*t/(4*t - 1) - t/(4*t - 1))/(-_a + x)))
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
See also
========
apart, assemble_partfrac_list
References
==========
.. [1] [Bronstein93]_
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
options = set_defaults(options, extension=True)
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
if P.is_multivariate:
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
polypart = poly
if dummies is None:
def dummies(name):
d = Dummy(name)
while True:
yield d
dummies = dummies("w")
rationalpart = apart_list_full_decomposition(P, Q, dummies)
return (common, polypart, rationalpart)
def apart_list_full_decomposition(P, Q, dummygen):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
.. [1] [Bronstein93]_
"""
f, x, U = P/Q, P.gen, []
u = Function('u')(x)
a = Dummy('a')
partial = []
for d, n in Q.sqf_list_include(all=True):
b = d.as_expr()
U += [ u.diff(x, n - 1) ]
h = cancel(f*b**n) / u**n
H, subs = [h], []
for j in range(1, n):
H += [ H[-1].diff(x) / j ]
for j in range(1, n + 1):
subs += [ (U[j - 1], b.diff(x, j) / j) ]
for j in range(0, n):
P, Q = cancel(H[j]).as_numer_denom()
for i in range(0, j + 1):
P = P.subs(*subs[j - i])
Q = Q.subs(*subs[0])
P = Poly(P, x)
Q = Poly(Q, x)
G = P.gcd(d)
D = d.quo(G)
B, g = Q.half_gcdex(D)
b = (P * B.quo(g)).rem(D)
Dw = D.subs(x, next(dummygen))
numer = Lambda(a, b.as_expr().subs(x, a))
denom = Lambda(a, (x - a))
exponent = n-j
partial.append((Dw, numer, denom, exponent))
return partial
@public
def assemble_partfrac_list(partial_list):
r"""Reassemble a full partial fraction decomposition
from a structured result obtained by the function ``apart_list``.
Examples
========
This example is taken from Bronstein's original paper:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
If we happen to know some roots we can provide them easily inside the structure:
>>> pfd = apart_list(2/(x**2-2))
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w**2 - 2, _w, domain='ZZ'),
Lambda(_a, _a/2),
Lambda(_a, -_a + x),
1)])
>>> pfda = assemble_partfrac_list(pfd)
>>> pfda
RootSum(_w**2 - 2, Lambda(_a, _a/(-_a + x)))/2
>>> pfda.doit()
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
>>> from sympy import Dummy, Poly, Lambda, sqrt
>>> a = Dummy("a")
>>> pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)])
>>> assemble_partfrac_list(pfd)
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
See Also
========
apart, apart_list
"""
# Common factor
common = partial_list[0]
# Polynomial part
polypart = partial_list[1]
pfd = polypart.as_expr()
# Rational parts
for r, nf, df, ex in partial_list[2]:
if isinstance(r, Poly):
# Assemble in case the roots are given implicitly by a polynomials
an, nu = nf.variables, nf.expr
ad, de = df.variables, df.expr
# Hack to make dummies equal because Lambda created new Dummies
de = de.subs(ad[0], an[0])
func = Lambda(tuple(an), nu/de**ex)
pfd += RootSum(r, func, auto=False, quadratic=False)
else:
# Assemble in case the roots are given explicitly by a list of algebraic numbers
for root in r:
pfd += nf(root)/df(root)**ex
return common*pfd
|
2bc746769a4001a7bd77d3862571d80fca094e20ea684bc7946bc76b0c803392 | """Real and complex root isolation and refinement algorithms. """
from sympy.polys.densearith import (
dup_neg, dup_rshift, dup_rem)
from sympy.polys.densebasic import (
dup_LC, dup_TC, dup_degree,
dup_strip, dup_reverse,
dup_convert,
dup_terms_gcd)
from sympy.polys.densetools import (
dup_clear_denoms,
dup_mirror, dup_scale, dup_shift,
dup_transform,
dup_diff,
dup_eval, dmp_eval_in,
dup_sign_variations,
dup_real_imag)
from sympy.polys.factortools import (
dup_factor_list)
from sympy.polys.polyerrors import (
RefinementFailed,
DomainError)
from sympy.polys.sqfreetools import (
dup_sqf_part, dup_sqf_list)
def dup_sturm(f, K):
"""
Computes the Sturm sequence of ``f`` in ``F[x]``.
Given a univariate, square-free polynomial ``f(x)`` returns the
associated Sturm sequence ``f_0(x), ..., f_n(x)`` defined by::
f_0(x), f_1(x) = f(x), f'(x)
f_n = -rem(f_{n-2}(x), f_{n-1}(x))
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2/9*x + 25/9, -2079/4]
References
==========
.. [1] [Davenport88]_
"""
if not K.is_Field:
raise DomainError("Cannot compute Sturm sequence over %s" % K)
f = dup_sqf_part(f, K)
sturm = [f, dup_diff(f, 1, K)]
while sturm[-1]:
s = dup_rem(sturm[-2], sturm[-1], K)
sturm.append(dup_neg(s, K))
return sturm[:-1]
def dup_root_upper_bound(f, K):
"""Compute the LMQ upper bound for the positive roots of `f`;
LMQ (Local Max Quadratic) was developed by Akritas-Strzebonski-Vigklas.
References
==========
.. [1] Alkiviadis G. Akritas: "Linear and Quadratic Complexity Bounds on the
Values of the Positive Roots of Polynomials"
Journal of Universal Computer Science, Vol. 15, No. 3, 523-537, 2009.
"""
n, P = len(f), []
t = n * [K.one]
if dup_LC(f, K) < 0:
f = dup_neg(f, K)
f = list(reversed(f))
for i in range(0, n):
if f[i] >= 0:
continue
a, QL = K.log(-f[i], 2), []
for j in range(i + 1, n):
if f[j] <= 0:
continue
q = t[j] + a - K.log(f[j], 2)
QL.append([q // (j - i), j])
if not QL:
continue
q = min(QL)
t[q[1]] = t[q[1]] + 1
P.append(q[0])
if not P:
return None
else:
return K.get_field()(2)**(max(P) + 1)
def dup_root_lower_bound(f, K):
"""Compute the LMQ lower bound for the positive roots of `f`;
LMQ (Local Max Quadratic) was developed by Akritas-Strzebonski-Vigklas.
References
==========
.. [1] Alkiviadis G. Akritas: "Linear and Quadratic Complexity Bounds on the
Values of the Positive Roots of Polynomials"
Journal of Universal Computer Science, Vol. 15, No. 3, 523-537, 2009.
"""
bound = dup_root_upper_bound(dup_reverse(f), K)
if bound is not None:
return 1/bound
else:
return None
def _mobius_from_interval(I, field):
"""Convert an open interval to a Mobius transform. """
s, t = I
a, c = field.numer(s), field.denom(s)
b, d = field.numer(t), field.denom(t)
return a, b, c, d
def _mobius_to_interval(M, field):
"""Convert a Mobius transform to an open interval. """
a, b, c, d = M
s, t = field(a, c), field(b, d)
if s <= t:
return (s, t)
else:
return (t, s)
def dup_step_refine_real_root(f, M, K, fast=False):
"""One step of positive real root refinement algorithm. """
a, b, c, d = M
if a == b and c == d:
return f, (a, b, c, d)
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_shift(f, A, K)
b, d = A*a + b, A*c + d
if not dup_eval(f, K.zero, K):
return f, (b, b, d, d)
f, g = dup_shift(f, K.one, K), f
a1, b1, c1, d1 = a, a + b, c, c + d
if not dup_eval(f, K.zero, K):
return f, (b1, b1, d1, d1)
k = dup_sign_variations(f, K)
if k == 1:
a, b, c, d = a1, b1, c1, d1
else:
f = dup_shift(dup_reverse(g), K.one, K)
if not dup_eval(f, K.zero, K):
f = dup_rshift(f, 1, K)
a, b, c, d = b, a + b, d, c + d
return f, (a, b, c, d)
def dup_inner_refine_real_root(f, M, K, eps=None, steps=None, disjoint=None, fast=False, mobius=False):
"""Refine a positive root of `f` given a Mobius transform or an interval. """
F = K.get_field()
if len(M) == 2:
a, b, c, d = _mobius_from_interval(M, F)
else:
a, b, c, d = M
while not c:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c,
d), K, fast=fast)
if eps is not None and steps is not None:
for i in range(0, steps):
if abs(F(a, c) - F(b, d)) >= eps:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
else:
break
else:
if eps is not None:
while abs(F(a, c) - F(b, d)) >= eps:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
if steps is not None:
for i in range(0, steps):
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
if disjoint is not None:
while True:
u, v = _mobius_to_interval((a, b, c, d), F)
if v <= disjoint or disjoint <= u:
break
else:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
if not mobius:
return _mobius_to_interval((a, b, c, d), F)
else:
return f, (a, b, c, d)
def dup_outer_refine_real_root(f, s, t, K, eps=None, steps=None, disjoint=None, fast=False):
"""Refine a positive root of `f` given an interval `(s, t)`. """
a, b, c, d = _mobius_from_interval((s, t), K.get_field())
f = dup_transform(f, dup_strip([a, b]),
dup_strip([c, d]), K)
if dup_sign_variations(f, K) != 1:
raise RefinementFailed("there should be exactly one root in (%s, %s) interval" % (s, t))
return dup_inner_refine_real_root(f, (a, b, c, d), K, eps=eps, steps=steps, disjoint=disjoint, fast=fast)
def dup_refine_real_root(f, s, t, K, eps=None, steps=None, disjoint=None, fast=False):
"""Refine real root's approximating interval to the given precision. """
if K.is_QQ:
(_, f), K = dup_clear_denoms(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("real root refinement not supported over %s" % K)
if s == t:
return (s, t)
if s > t:
s, t = t, s
negative = False
if s < 0:
if t <= 0:
f, s, t, negative = dup_mirror(f, K), -t, -s, True
else:
raise ValueError("Cannot refine a real root in (%s, %s)" % (s, t))
if negative and disjoint is not None:
if disjoint < 0:
disjoint = -disjoint
else:
disjoint = None
s, t = dup_outer_refine_real_root(
f, s, t, K, eps=eps, steps=steps, disjoint=disjoint, fast=fast)
if negative:
return (-t, -s)
else:
return ( s, t)
def dup_inner_isolate_real_roots(f, K, eps=None, fast=False):
"""Internal function for isolation positive roots up to given precision.
References
==========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
a, b, c, d = K.one, K.zero, K.zero, K.one
k = dup_sign_variations(f, K)
if k == 0:
return []
if k == 1:
roots = [dup_inner_refine_real_root(
f, (a, b, c, d), K, eps=eps, fast=fast, mobius=True)]
else:
roots, stack = [], [(a, b, c, d, f, k)]
while stack:
a, b, c, d, f, k = stack.pop()
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_shift(f, A, K)
b, d = A*a + b, A*c + d
if not dup_TC(f, K):
roots.append((f, (b, b, d, d)))
f = dup_rshift(f, 1, K)
k = dup_sign_variations(f, K)
if k == 0:
continue
if k == 1:
roots.append(dup_inner_refine_real_root(
f, (a, b, c, d), K, eps=eps, fast=fast, mobius=True))
continue
f1 = dup_shift(f, K.one, K)
a1, b1, c1, d1, r = a, a + b, c, c + d, 0
if not dup_TC(f1, K):
roots.append((f1, (b1, b1, d1, d1)))
f1, r = dup_rshift(f1, 1, K), 1
k1 = dup_sign_variations(f1, K)
k2 = k - k1 - r
a2, b2, c2, d2 = b, a + b, d, c + d
if k2 > 1:
f2 = dup_shift(dup_reverse(f), K.one, K)
if not dup_TC(f2, K):
f2 = dup_rshift(f2, 1, K)
k2 = dup_sign_variations(f2, K)
else:
f2 = None
if k1 < k2:
a1, a2, b1, b2 = a2, a1, b2, b1
c1, c2, d1, d2 = c2, c1, d2, d1
f1, f2, k1, k2 = f2, f1, k2, k1
if not k1:
continue
if f1 is None:
f1 = dup_shift(dup_reverse(f), K.one, K)
if not dup_TC(f1, K):
f1 = dup_rshift(f1, 1, K)
if k1 == 1:
roots.append(dup_inner_refine_real_root(
f1, (a1, b1, c1, d1), K, eps=eps, fast=fast, mobius=True))
else:
stack.append((a1, b1, c1, d1, f1, k1))
if not k2:
continue
if f2 is None:
f2 = dup_shift(dup_reverse(f), K.one, K)
if not dup_TC(f2, K):
f2 = dup_rshift(f2, 1, K)
if k2 == 1:
roots.append(dup_inner_refine_real_root(
f2, (a2, b2, c2, d2), K, eps=eps, fast=fast, mobius=True))
else:
stack.append((a2, b2, c2, d2, f2, k2))
return roots
def _discard_if_outside_interval(f, M, inf, sup, K, negative, fast, mobius):
"""Discard an isolating interval if outside ``(inf, sup)``. """
F = K.get_field()
while True:
u, v = _mobius_to_interval(M, F)
if negative:
u, v = -v, -u
if (inf is None or u >= inf) and (sup is None or v <= sup):
if not mobius:
return u, v
else:
return f, M
elif (sup is not None and u > sup) or (inf is not None and v < inf):
return None
else:
f, M = dup_step_refine_real_root(f, M, K, fast=fast)
def dup_inner_isolate_positive_roots(f, K, eps=None, inf=None, sup=None, fast=False, mobius=False):
"""Iteratively compute disjoint positive root isolation intervals. """
if sup is not None and sup < 0:
return []
roots = dup_inner_isolate_real_roots(f, K, eps=eps, fast=fast)
F, results = K.get_field(), []
if inf is not None or sup is not None:
for f, M in roots:
result = _discard_if_outside_interval(f, M, inf, sup, K, False, fast, mobius)
if result is not None:
results.append(result)
elif not mobius:
for f, M in roots:
u, v = _mobius_to_interval(M, F)
results.append((u, v))
else:
results = roots
return results
def dup_inner_isolate_negative_roots(f, K, inf=None, sup=None, eps=None, fast=False, mobius=False):
"""Iteratively compute disjoint negative root isolation intervals. """
if inf is not None and inf >= 0:
return []
roots = dup_inner_isolate_real_roots(dup_mirror(f, K), K, eps=eps, fast=fast)
F, results = K.get_field(), []
if inf is not None or sup is not None:
for f, M in roots:
result = _discard_if_outside_interval(f, M, inf, sup, K, True, fast, mobius)
if result is not None:
results.append(result)
elif not mobius:
for f, M in roots:
u, v = _mobius_to_interval(M, F)
results.append((-v, -u))
else:
results = roots
return results
def _isolate_zero(f, K, inf, sup, basis=False, sqf=False):
"""Handle special case of CF algorithm when ``f`` is homogeneous. """
j, f = dup_terms_gcd(f, K)
if j > 0:
F = K.get_field()
if (inf is None or inf <= 0) and (sup is None or 0 <= sup):
if not sqf:
if not basis:
return [((F.zero, F.zero), j)], f
else:
return [((F.zero, F.zero), j, [K.one, K.zero])], f
else:
return [(F.zero, F.zero)], f
return [], f
def dup_isolate_real_roots_sqf(f, K, eps=None, inf=None, sup=None, fast=False, blackbox=False):
"""Isolate real roots of a square-free polynomial using the Vincent-Akritas-Strzebonski (VAS) CF approach.
References
==========
.. [1] Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative
Study of Two Real Root Isolation Methods. Nonlinear Analysis:
Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
.. [2] Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S.
Vigklas: Improving the Performance of the Continued Fractions
Method Using New Bounds of Positive Roots. Nonlinear Analysis:
Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
if K.is_QQ:
(_, f), K = dup_clear_denoms(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
if dup_degree(f) <= 0:
return []
I_zero, f = _isolate_zero(f, K, inf, sup, basis=False, sqf=True)
I_neg = dup_inner_isolate_negative_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
I_pos = dup_inner_isolate_positive_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
roots = sorted(I_neg + I_zero + I_pos)
if not blackbox:
return roots
else:
return [ RealInterval((a, b), f, K) for (a, b) in roots ]
def dup_isolate_real_roots(f, K, eps=None, inf=None, sup=None, basis=False, fast=False):
"""Isolate real roots using Vincent-Akritas-Strzebonski (VAS) continued fractions approach.
References
==========
.. [1] Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative
Study of Two Real Root Isolation Methods. Nonlinear Analysis:
Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
.. [2] Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S.
Vigklas: Improving the Performance of the Continued Fractions
Method Using New Bounds of Positive Roots.
Nonlinear Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
if K.is_QQ:
(_, f), K = dup_clear_denoms(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
if dup_degree(f) <= 0:
return []
I_zero, f = _isolate_zero(f, K, inf, sup, basis=basis, sqf=False)
_, factors = dup_sqf_list(f, K)
if len(factors) == 1:
((f, k),) = factors
I_neg = dup_inner_isolate_negative_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
I_pos = dup_inner_isolate_positive_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
I_neg = [ ((u, v), k) for u, v in I_neg ]
I_pos = [ ((u, v), k) for u, v in I_pos ]
else:
I_neg, I_pos = _real_isolate_and_disjoin(factors, K,
eps=eps, inf=inf, sup=sup, basis=basis, fast=fast)
return sorted(I_neg + I_zero + I_pos)
def dup_isolate_real_roots_list(polys, K, eps=None, inf=None, sup=None, strict=False, basis=False, fast=False):
"""Isolate real roots of a list of square-free polynomial using Vincent-Akritas-Strzebonski (VAS) CF approach.
References
==========
.. [1] Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative
Study of Two Real Root Isolation Methods. Nonlinear Analysis:
Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
.. [2] Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S.
Vigklas: Improving the Performance of the Continued Fractions
Method Using New Bounds of Positive Roots.
Nonlinear Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
if K.is_QQ:
K, F, polys = K.get_ring(), K, polys[:]
for i, p in enumerate(polys):
polys[i] = dup_clear_denoms(p, F, K, convert=True)[1]
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
zeros, factors_dict = False, {}
if (inf is None or inf <= 0) and (sup is None or 0 <= sup):
zeros, zero_indices = True, {}
for i, p in enumerate(polys):
j, p = dup_terms_gcd(p, K)
if zeros and j > 0:
zero_indices[i] = j
for f, k in dup_factor_list(p, K)[1]:
f = tuple(f)
if f not in factors_dict:
factors_dict[f] = {i: k}
else:
factors_dict[f][i] = k
factors_list = []
for f, indices in factors_dict.items():
factors_list.append((list(f), indices))
I_neg, I_pos = _real_isolate_and_disjoin(factors_list, K, eps=eps,
inf=inf, sup=sup, strict=strict, basis=basis, fast=fast)
F = K.get_field()
if not zeros or not zero_indices:
I_zero = []
else:
if not basis:
I_zero = [((F.zero, F.zero), zero_indices)]
else:
I_zero = [((F.zero, F.zero), zero_indices, [K.one, K.zero])]
return sorted(I_neg + I_zero + I_pos)
def _disjoint_p(M, N, strict=False):
"""Check if Mobius transforms define disjoint intervals. """
a1, b1, c1, d1 = M
a2, b2, c2, d2 = N
a1d1, b1c1 = a1*d1, b1*c1
a2d2, b2c2 = a2*d2, b2*c2
if a1d1 == b1c1 and a2d2 == b2c2:
return True
if a1d1 > b1c1:
a1, c1, b1, d1 = b1, d1, a1, c1
if a2d2 > b2c2:
a2, c2, b2, d2 = b2, d2, a2, c2
if not strict:
return a2*d1 >= c2*b1 or b2*c1 <= d2*a1
else:
return a2*d1 > c2*b1 or b2*c1 < d2*a1
def _real_isolate_and_disjoin(factors, K, eps=None, inf=None, sup=None, strict=False, basis=False, fast=False):
"""Isolate real roots of a list of polynomials and disjoin intervals. """
I_pos, I_neg = [], []
for i, (f, k) in enumerate(factors):
for F, M in dup_inner_isolate_positive_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast, mobius=True):
I_pos.append((F, M, k, f))
for G, N in dup_inner_isolate_negative_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast, mobius=True):
I_neg.append((G, N, k, f))
for i, (f, M, k, F) in enumerate(I_pos):
for j, (g, N, m, G) in enumerate(I_pos[i + 1:]):
while not _disjoint_p(M, N, strict=strict):
f, M = dup_inner_refine_real_root(f, M, K, steps=1, fast=fast, mobius=True)
g, N = dup_inner_refine_real_root(g, N, K, steps=1, fast=fast, mobius=True)
I_pos[i + j + 1] = (g, N, m, G)
I_pos[i] = (f, M, k, F)
for i, (f, M, k, F) in enumerate(I_neg):
for j, (g, N, m, G) in enumerate(I_neg[i + 1:]):
while not _disjoint_p(M, N, strict=strict):
f, M = dup_inner_refine_real_root(f, M, K, steps=1, fast=fast, mobius=True)
g, N = dup_inner_refine_real_root(g, N, K, steps=1, fast=fast, mobius=True)
I_neg[i + j + 1] = (g, N, m, G)
I_neg[i] = (f, M, k, F)
if strict:
for i, (f, M, k, F) in enumerate(I_neg):
if not M[0]:
while not M[0]:
f, M = dup_inner_refine_real_root(f, M, K, steps=1, fast=fast, mobius=True)
I_neg[i] = (f, M, k, F)
break
for j, (g, N, m, G) in enumerate(I_pos):
if not N[0]:
while not N[0]:
g, N = dup_inner_refine_real_root(g, N, K, steps=1, fast=fast, mobius=True)
I_pos[j] = (g, N, m, G)
break
field = K.get_field()
I_neg = [ (_mobius_to_interval(M, field), k, f) for (_, M, k, f) in I_neg ]
I_pos = [ (_mobius_to_interval(M, field), k, f) for (_, M, k, f) in I_pos ]
if not basis:
I_neg = [ ((-v, -u), k) for ((u, v), k, _) in I_neg ]
I_pos = [ (( u, v), k) for ((u, v), k, _) in I_pos ]
else:
I_neg = [ ((-v, -u), k, f) for ((u, v), k, f) in I_neg ]
I_pos = [ (( u, v), k, f) for ((u, v), k, f) in I_pos ]
return I_neg, I_pos
def dup_count_real_roots(f, K, inf=None, sup=None):
"""Returns the number of distinct real roots of ``f`` in ``[inf, sup]``. """
if dup_degree(f) <= 0:
return 0
if not K.is_Field:
R, K = K, K.get_field()
f = dup_convert(f, R, K)
sturm = dup_sturm(f, K)
if inf is None:
signs_inf = dup_sign_variations([ dup_LC(s, K)*(-1)**dup_degree(s) for s in sturm ], K)
else:
signs_inf = dup_sign_variations([ dup_eval(s, inf, K) for s in sturm ], K)
if sup is None:
signs_sup = dup_sign_variations([ dup_LC(s, K) for s in sturm ], K)
else:
signs_sup = dup_sign_variations([ dup_eval(s, sup, K) for s in sturm ], K)
count = abs(signs_inf - signs_sup)
if inf is not None and not dup_eval(f, inf, K):
count += 1
return count
OO = 'OO' # Origin of (re, im) coordinate system
Q1 = 'Q1' # Quadrant #1 (++): re > 0 and im > 0
Q2 = 'Q2' # Quadrant #2 (-+): re < 0 and im > 0
Q3 = 'Q3' # Quadrant #3 (--): re < 0 and im < 0
Q4 = 'Q4' # Quadrant #4 (+-): re > 0 and im < 0
A1 = 'A1' # Axis #1 (+0): re > 0 and im = 0
A2 = 'A2' # Axis #2 (0+): re = 0 and im > 0
A3 = 'A3' # Axis #3 (-0): re < 0 and im = 0
A4 = 'A4' # Axis #4 (0-): re = 0 and im < 0
_rules_simple = {
# Q --> Q (same) => no change
(Q1, Q1): 0,
(Q2, Q2): 0,
(Q3, Q3): 0,
(Q4, Q4): 0,
# A -- CCW --> Q => +1/4 (CCW)
(A1, Q1): 1,
(A2, Q2): 1,
(A3, Q3): 1,
(A4, Q4): 1,
# A -- CW --> Q => -1/4 (CCW)
(A1, Q4): 2,
(A2, Q1): 2,
(A3, Q2): 2,
(A4, Q3): 2,
# Q -- CCW --> A => +1/4 (CCW)
(Q1, A2): 3,
(Q2, A3): 3,
(Q3, A4): 3,
(Q4, A1): 3,
# Q -- CW --> A => -1/4 (CCW)
(Q1, A1): 4,
(Q2, A2): 4,
(Q3, A3): 4,
(Q4, A4): 4,
# Q -- CCW --> Q => +1/2 (CCW)
(Q1, Q2): +5,
(Q2, Q3): +5,
(Q3, Q4): +5,
(Q4, Q1): +5,
# Q -- CW --> Q => -1/2 (CW)
(Q1, Q4): -5,
(Q2, Q1): -5,
(Q3, Q2): -5,
(Q4, Q3): -5,
}
_rules_ambiguous = {
# A -- CCW --> Q => { +1/4 (CCW), -9/4 (CW) }
(A1, OO, Q1): -1,
(A2, OO, Q2): -1,
(A3, OO, Q3): -1,
(A4, OO, Q4): -1,
# A -- CW --> Q => { -1/4 (CCW), +7/4 (CW) }
(A1, OO, Q4): -2,
(A2, OO, Q1): -2,
(A3, OO, Q2): -2,
(A4, OO, Q3): -2,
# Q -- CCW --> A => { +1/4 (CCW), -9/4 (CW) }
(Q1, OO, A2): -3,
(Q2, OO, A3): -3,
(Q3, OO, A4): -3,
(Q4, OO, A1): -3,
# Q -- CW --> A => { -1/4 (CCW), +7/4 (CW) }
(Q1, OO, A1): -4,
(Q2, OO, A2): -4,
(Q3, OO, A3): -4,
(Q4, OO, A4): -4,
# A -- OO --> A => { +1 (CCW), -1 (CW) }
(A1, A3): 7,
(A2, A4): 7,
(A3, A1): 7,
(A4, A2): 7,
(A1, OO, A3): 7,
(A2, OO, A4): 7,
(A3, OO, A1): 7,
(A4, OO, A2): 7,
# Q -- DIA --> Q => { +1 (CCW), -1 (CW) }
(Q1, Q3): 8,
(Q2, Q4): 8,
(Q3, Q1): 8,
(Q4, Q2): 8,
(Q1, OO, Q3): 8,
(Q2, OO, Q4): 8,
(Q3, OO, Q1): 8,
(Q4, OO, Q2): 8,
# A --- R ---> A => { +1/2 (CCW), -3/2 (CW) }
(A1, A2): 9,
(A2, A3): 9,
(A3, A4): 9,
(A4, A1): 9,
(A1, OO, A2): 9,
(A2, OO, A3): 9,
(A3, OO, A4): 9,
(A4, OO, A1): 9,
# A --- L ---> A => { +3/2 (CCW), -1/2 (CW) }
(A1, A4): 10,
(A2, A1): 10,
(A3, A2): 10,
(A4, A3): 10,
(A1, OO, A4): 10,
(A2, OO, A1): 10,
(A3, OO, A2): 10,
(A4, OO, A3): 10,
# Q --- 1 ---> A => { +3/4 (CCW), -5/4 (CW) }
(Q1, A3): 11,
(Q2, A4): 11,
(Q3, A1): 11,
(Q4, A2): 11,
(Q1, OO, A3): 11,
(Q2, OO, A4): 11,
(Q3, OO, A1): 11,
(Q4, OO, A2): 11,
# Q --- 2 ---> A => { +5/4 (CCW), -3/4 (CW) }
(Q1, A4): 12,
(Q2, A1): 12,
(Q3, A2): 12,
(Q4, A3): 12,
(Q1, OO, A4): 12,
(Q2, OO, A1): 12,
(Q3, OO, A2): 12,
(Q4, OO, A3): 12,
# A --- 1 ---> Q => { +5/4 (CCW), -3/4 (CW) }
(A1, Q3): 13,
(A2, Q4): 13,
(A3, Q1): 13,
(A4, Q2): 13,
(A1, OO, Q3): 13,
(A2, OO, Q4): 13,
(A3, OO, Q1): 13,
(A4, OO, Q2): 13,
# A --- 2 ---> Q => { +3/4 (CCW), -5/4 (CW) }
(A1, Q2): 14,
(A2, Q3): 14,
(A3, Q4): 14,
(A4, Q1): 14,
(A1, OO, Q2): 14,
(A2, OO, Q3): 14,
(A3, OO, Q4): 14,
(A4, OO, Q1): 14,
# Q --> OO --> Q => { +1/2 (CCW), -3/2 (CW) }
(Q1, OO, Q2): 15,
(Q2, OO, Q3): 15,
(Q3, OO, Q4): 15,
(Q4, OO, Q1): 15,
# Q --> OO --> Q => { +3/2 (CCW), -1/2 (CW) }
(Q1, OO, Q4): 16,
(Q2, OO, Q1): 16,
(Q3, OO, Q2): 16,
(Q4, OO, Q3): 16,
# A --> OO --> A => { +2 (CCW), 0 (CW) }
(A1, OO, A1): 17,
(A2, OO, A2): 17,
(A3, OO, A3): 17,
(A4, OO, A4): 17,
# Q --> OO --> Q => { +2 (CCW), 0 (CW) }
(Q1, OO, Q1): 18,
(Q2, OO, Q2): 18,
(Q3, OO, Q3): 18,
(Q4, OO, Q4): 18,
}
_values = {
0: [( 0, 1)],
1: [(+1, 4)],
2: [(-1, 4)],
3: [(+1, 4)],
4: [(-1, 4)],
-1: [(+9, 4), (+1, 4)],
-2: [(+7, 4), (-1, 4)],
-3: [(+9, 4), (+1, 4)],
-4: [(+7, 4), (-1, 4)],
+5: [(+1, 2)],
-5: [(-1, 2)],
7: [(+1, 1), (-1, 1)],
8: [(+1, 1), (-1, 1)],
9: [(+1, 2), (-3, 2)],
10: [(+3, 2), (-1, 2)],
11: [(+3, 4), (-5, 4)],
12: [(+5, 4), (-3, 4)],
13: [(+5, 4), (-3, 4)],
14: [(+3, 4), (-5, 4)],
15: [(+1, 2), (-3, 2)],
16: [(+3, 2), (-1, 2)],
17: [(+2, 1), ( 0, 1)],
18: [(+2, 1), ( 0, 1)],
}
def _classify_point(re, im):
"""Return the half-axis (or origin) on which (re, im) point is located. """
if not re and not im:
return OO
if not re:
if im > 0:
return A2
else:
return A4
elif not im:
if re > 0:
return A1
else:
return A3
def _intervals_to_quadrants(intervals, f1, f2, s, t, F):
"""Generate a sequence of extended quadrants from a list of critical points. """
if not intervals:
return []
Q = []
if not f1:
(a, b), _, _ = intervals[0]
if a == b == s:
if len(intervals) == 1:
if dup_eval(f2, t, F) > 0:
return [OO, A2]
else:
return [OO, A4]
else:
(a, _), _, _ = intervals[1]
if dup_eval(f2, (s + a)/2, F) > 0:
Q.extend([OO, A2])
f2_sgn = +1
else:
Q.extend([OO, A4])
f2_sgn = -1
intervals = intervals[1:]
else:
if dup_eval(f2, s, F) > 0:
Q.append(A2)
f2_sgn = +1
else:
Q.append(A4)
f2_sgn = -1
for (a, _), indices, _ in intervals:
Q.append(OO)
if indices[1] % 2 == 1:
f2_sgn = -f2_sgn
if a != t:
if f2_sgn > 0:
Q.append(A2)
else:
Q.append(A4)
return Q
if not f2:
(a, b), _, _ = intervals[0]
if a == b == s:
if len(intervals) == 1:
if dup_eval(f1, t, F) > 0:
return [OO, A1]
else:
return [OO, A3]
else:
(a, _), _, _ = intervals[1]
if dup_eval(f1, (s + a)/2, F) > 0:
Q.extend([OO, A1])
f1_sgn = +1
else:
Q.extend([OO, A3])
f1_sgn = -1
intervals = intervals[1:]
else:
if dup_eval(f1, s, F) > 0:
Q.append(A1)
f1_sgn = +1
else:
Q.append(A3)
f1_sgn = -1
for (a, _), indices, _ in intervals:
Q.append(OO)
if indices[0] % 2 == 1:
f1_sgn = -f1_sgn
if a != t:
if f1_sgn > 0:
Q.append(A1)
else:
Q.append(A3)
return Q
re = dup_eval(f1, s, F)
im = dup_eval(f2, s, F)
if not re or not im:
Q.append(_classify_point(re, im))
if len(intervals) == 1:
re = dup_eval(f1, t, F)
im = dup_eval(f2, t, F)
else:
(a, _), _, _ = intervals[1]
re = dup_eval(f1, (s + a)/2, F)
im = dup_eval(f2, (s + a)/2, F)
intervals = intervals[1:]
if re > 0:
f1_sgn = +1
else:
f1_sgn = -1
if im > 0:
f2_sgn = +1
else:
f2_sgn = -1
sgn = {
(+1, +1): Q1,
(-1, +1): Q2,
(-1, -1): Q3,
(+1, -1): Q4,
}
Q.append(sgn[(f1_sgn, f2_sgn)])
for (a, b), indices, _ in intervals:
if a == b:
re = dup_eval(f1, a, F)
im = dup_eval(f2, a, F)
cls = _classify_point(re, im)
if cls is not None:
Q.append(cls)
if 0 in indices:
if indices[0] % 2 == 1:
f1_sgn = -f1_sgn
if 1 in indices:
if indices[1] % 2 == 1:
f2_sgn = -f2_sgn
if not (a == b and b == t):
Q.append(sgn[(f1_sgn, f2_sgn)])
return Q
def _traverse_quadrants(Q_L1, Q_L2, Q_L3, Q_L4, exclude=None):
"""Transform sequences of quadrants to a sequence of rules. """
if exclude is True:
edges = [1, 1, 0, 0]
corners = {
(0, 1): 1,
(1, 2): 1,
(2, 3): 0,
(3, 0): 1,
}
else:
edges = [0, 0, 0, 0]
corners = {
(0, 1): 0,
(1, 2): 0,
(2, 3): 0,
(3, 0): 0,
}
if exclude is not None and exclude is not True:
exclude = set(exclude)
for i, edge in enumerate(['S', 'E', 'N', 'W']):
if edge in exclude:
edges[i] = 1
for i, corner in enumerate(['SW', 'SE', 'NE', 'NW']):
if corner in exclude:
corners[((i - 1) % 4, i)] = 1
QQ, rules = [Q_L1, Q_L2, Q_L3, Q_L4], []
for i, Q in enumerate(QQ):
if not Q:
continue
if Q[-1] == OO:
Q = Q[:-1]
if Q[0] == OO:
j, Q = (i - 1) % 4, Q[1:]
qq = (QQ[j][-2], OO, Q[0])
if qq in _rules_ambiguous:
rules.append((_rules_ambiguous[qq], corners[(j, i)]))
else:
raise NotImplementedError("3 element rule (corner): " + str(qq))
q1, k = Q[0], 1
while k < len(Q):
q2, k = Q[k], k + 1
if q2 != OO:
qq = (q1, q2)
if qq in _rules_simple:
rules.append((_rules_simple[qq], 0))
elif qq in _rules_ambiguous:
rules.append((_rules_ambiguous[qq], edges[i]))
else:
raise NotImplementedError("2 element rule (inside): " + str(qq))
else:
qq, k = (q1, q2, Q[k]), k + 1
if qq in _rules_ambiguous:
rules.append((_rules_ambiguous[qq], edges[i]))
else:
raise NotImplementedError("3 element rule (edge): " + str(qq))
q1 = qq[-1]
return rules
def _reverse_intervals(intervals):
"""Reverse intervals for traversal from right to left and from top to bottom. """
return [ ((b, a), indices, f) for (a, b), indices, f in reversed(intervals) ]
def _winding_number(T, field):
"""Compute the winding number of the input polynomial, i.e. the number of roots. """
return int(sum([ field(*_values[t][i]) for t, i in T ]) / field(2))
def dup_count_complex_roots(f, K, inf=None, sup=None, exclude=None):
"""Count all roots in [u + v*I, s + t*I] rectangle using Collins-Krandick algorithm. """
if not K.is_ZZ and not K.is_QQ:
raise DomainError("complex root counting is not supported over %s" % K)
if K.is_ZZ:
R, F = K, K.get_field()
else:
R, F = K.get_ring(), K
f = dup_convert(f, K, F)
if inf is None or sup is None:
_, lc = dup_degree(f), abs(dup_LC(f, F))
B = 2*max([ F.quo(abs(c), lc) for c in f ])
if inf is None:
(u, v) = (-B, -B)
else:
(u, v) = inf
if sup is None:
(s, t) = (+B, +B)
else:
(s, t) = sup
f1, f2 = dup_real_imag(f, F)
f1L1F = dmp_eval_in(f1, v, 1, 1, F)
f2L1F = dmp_eval_in(f2, v, 1, 1, F)
_, f1L1R = dup_clear_denoms(f1L1F, F, R, convert=True)
_, f2L1R = dup_clear_denoms(f2L1F, F, R, convert=True)
f1L2F = dmp_eval_in(f1, s, 0, 1, F)
f2L2F = dmp_eval_in(f2, s, 0, 1, F)
_, f1L2R = dup_clear_denoms(f1L2F, F, R, convert=True)
_, f2L2R = dup_clear_denoms(f2L2F, F, R, convert=True)
f1L3F = dmp_eval_in(f1, t, 1, 1, F)
f2L3F = dmp_eval_in(f2, t, 1, 1, F)
_, f1L3R = dup_clear_denoms(f1L3F, F, R, convert=True)
_, f2L3R = dup_clear_denoms(f2L3F, F, R, convert=True)
f1L4F = dmp_eval_in(f1, u, 0, 1, F)
f2L4F = dmp_eval_in(f2, u, 0, 1, F)
_, f1L4R = dup_clear_denoms(f1L4F, F, R, convert=True)
_, f2L4R = dup_clear_denoms(f2L4F, F, R, convert=True)
S_L1 = [f1L1R, f2L1R]
S_L2 = [f1L2R, f2L2R]
S_L3 = [f1L3R, f2L3R]
S_L4 = [f1L4R, f2L4R]
I_L1 = dup_isolate_real_roots_list(S_L1, R, inf=u, sup=s, fast=True, basis=True, strict=True)
I_L2 = dup_isolate_real_roots_list(S_L2, R, inf=v, sup=t, fast=True, basis=True, strict=True)
I_L3 = dup_isolate_real_roots_list(S_L3, R, inf=u, sup=s, fast=True, basis=True, strict=True)
I_L4 = dup_isolate_real_roots_list(S_L4, R, inf=v, sup=t, fast=True, basis=True, strict=True)
I_L3 = _reverse_intervals(I_L3)
I_L4 = _reverse_intervals(I_L4)
Q_L1 = _intervals_to_quadrants(I_L1, f1L1F, f2L1F, u, s, F)
Q_L2 = _intervals_to_quadrants(I_L2, f1L2F, f2L2F, v, t, F)
Q_L3 = _intervals_to_quadrants(I_L3, f1L3F, f2L3F, s, u, F)
Q_L4 = _intervals_to_quadrants(I_L4, f1L4F, f2L4F, t, v, F)
T = _traverse_quadrants(Q_L1, Q_L2, Q_L3, Q_L4, exclude=exclude)
return _winding_number(T, F)
def _vertical_bisection(N, a, b, I, Q, F1, F2, f1, f2, F):
"""Vertical bisection step in Collins-Krandick root isolation algorithm. """
(u, v), (s, t) = a, b
I_L1, I_L2, I_L3, I_L4 = I
Q_L1, Q_L2, Q_L3, Q_L4 = Q
f1L1F, f1L2F, f1L3F, f1L4F = F1
f2L1F, f2L2F, f2L3F, f2L4F = F2
x = (u + s) / 2
f1V = dmp_eval_in(f1, x, 0, 1, F)
f2V = dmp_eval_in(f2, x, 0, 1, F)
I_V = dup_isolate_real_roots_list([f1V, f2V], F, inf=v, sup=t, fast=True, strict=True, basis=True)
I_L1_L, I_L1_R = [], []
I_L2_L, I_L2_R = I_V, I_L2
I_L3_L, I_L3_R = [], []
I_L4_L, I_L4_R = I_L4, _reverse_intervals(I_V)
for I in I_L1:
(a, b), indices, h = I
if a == b:
if a == x:
I_L1_L.append(I)
I_L1_R.append(I)
elif a < x:
I_L1_L.append(I)
else:
I_L1_R.append(I)
else:
if b <= x:
I_L1_L.append(I)
elif a >= x:
I_L1_R.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=x, fast=True)
if b <= x:
I_L1_L.append(((a, b), indices, h))
if a >= x:
I_L1_R.append(((a, b), indices, h))
for I in I_L3:
(b, a), indices, h = I
if a == b:
if a == x:
I_L3_L.append(I)
I_L3_R.append(I)
elif a < x:
I_L3_L.append(I)
else:
I_L3_R.append(I)
else:
if b <= x:
I_L3_L.append(I)
elif a >= x:
I_L3_R.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=x, fast=True)
if b <= x:
I_L3_L.append(((b, a), indices, h))
if a >= x:
I_L3_R.append(((b, a), indices, h))
Q_L1_L = _intervals_to_quadrants(I_L1_L, f1L1F, f2L1F, u, x, F)
Q_L2_L = _intervals_to_quadrants(I_L2_L, f1V, f2V, v, t, F)
Q_L3_L = _intervals_to_quadrants(I_L3_L, f1L3F, f2L3F, x, u, F)
Q_L4_L = Q_L4
Q_L1_R = _intervals_to_quadrants(I_L1_R, f1L1F, f2L1F, x, s, F)
Q_L2_R = Q_L2
Q_L3_R = _intervals_to_quadrants(I_L3_R, f1L3F, f2L3F, s, x, F)
Q_L4_R = _intervals_to_quadrants(I_L4_R, f1V, f2V, t, v, F)
T_L = _traverse_quadrants(Q_L1_L, Q_L2_L, Q_L3_L, Q_L4_L, exclude=True)
T_R = _traverse_quadrants(Q_L1_R, Q_L2_R, Q_L3_R, Q_L4_R, exclude=True)
N_L = _winding_number(T_L, F)
N_R = _winding_number(T_R, F)
I_L = (I_L1_L, I_L2_L, I_L3_L, I_L4_L)
Q_L = (Q_L1_L, Q_L2_L, Q_L3_L, Q_L4_L)
I_R = (I_L1_R, I_L2_R, I_L3_R, I_L4_R)
Q_R = (Q_L1_R, Q_L2_R, Q_L3_R, Q_L4_R)
F1_L = (f1L1F, f1V, f1L3F, f1L4F)
F2_L = (f2L1F, f2V, f2L3F, f2L4F)
F1_R = (f1L1F, f1L2F, f1L3F, f1V)
F2_R = (f2L1F, f2L2F, f2L3F, f2V)
a, b = (u, v), (x, t)
c, d = (x, v), (s, t)
D_L = (N_L, a, b, I_L, Q_L, F1_L, F2_L)
D_R = (N_R, c, d, I_R, Q_R, F1_R, F2_R)
return D_L, D_R
def _horizontal_bisection(N, a, b, I, Q, F1, F2, f1, f2, F):
"""Horizontal bisection step in Collins-Krandick root isolation algorithm. """
(u, v), (s, t) = a, b
I_L1, I_L2, I_L3, I_L4 = I
Q_L1, Q_L2, Q_L3, Q_L4 = Q
f1L1F, f1L2F, f1L3F, f1L4F = F1
f2L1F, f2L2F, f2L3F, f2L4F = F2
y = (v + t) / 2
f1H = dmp_eval_in(f1, y, 1, 1, F)
f2H = dmp_eval_in(f2, y, 1, 1, F)
I_H = dup_isolate_real_roots_list([f1H, f2H], F, inf=u, sup=s, fast=True, strict=True, basis=True)
I_L1_B, I_L1_U = I_L1, I_H
I_L2_B, I_L2_U = [], []
I_L3_B, I_L3_U = _reverse_intervals(I_H), I_L3
I_L4_B, I_L4_U = [], []
for I in I_L2:
(a, b), indices, h = I
if a == b:
if a == y:
I_L2_B.append(I)
I_L2_U.append(I)
elif a < y:
I_L2_B.append(I)
else:
I_L2_U.append(I)
else:
if b <= y:
I_L2_B.append(I)
elif a >= y:
I_L2_U.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=y, fast=True)
if b <= y:
I_L2_B.append(((a, b), indices, h))
if a >= y:
I_L2_U.append(((a, b), indices, h))
for I in I_L4:
(b, a), indices, h = I
if a == b:
if a == y:
I_L4_B.append(I)
I_L4_U.append(I)
elif a < y:
I_L4_B.append(I)
else:
I_L4_U.append(I)
else:
if b <= y:
I_L4_B.append(I)
elif a >= y:
I_L4_U.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=y, fast=True)
if b <= y:
I_L4_B.append(((b, a), indices, h))
if a >= y:
I_L4_U.append(((b, a), indices, h))
Q_L1_B = Q_L1
Q_L2_B = _intervals_to_quadrants(I_L2_B, f1L2F, f2L2F, v, y, F)
Q_L3_B = _intervals_to_quadrants(I_L3_B, f1H, f2H, s, u, F)
Q_L4_B = _intervals_to_quadrants(I_L4_B, f1L4F, f2L4F, y, v, F)
Q_L1_U = _intervals_to_quadrants(I_L1_U, f1H, f2H, u, s, F)
Q_L2_U = _intervals_to_quadrants(I_L2_U, f1L2F, f2L2F, y, t, F)
Q_L3_U = Q_L3
Q_L4_U = _intervals_to_quadrants(I_L4_U, f1L4F, f2L4F, t, y, F)
T_B = _traverse_quadrants(Q_L1_B, Q_L2_B, Q_L3_B, Q_L4_B, exclude=True)
T_U = _traverse_quadrants(Q_L1_U, Q_L2_U, Q_L3_U, Q_L4_U, exclude=True)
N_B = _winding_number(T_B, F)
N_U = _winding_number(T_U, F)
I_B = (I_L1_B, I_L2_B, I_L3_B, I_L4_B)
Q_B = (Q_L1_B, Q_L2_B, Q_L3_B, Q_L4_B)
I_U = (I_L1_U, I_L2_U, I_L3_U, I_L4_U)
Q_U = (Q_L1_U, Q_L2_U, Q_L3_U, Q_L4_U)
F1_B = (f1L1F, f1L2F, f1H, f1L4F)
F2_B = (f2L1F, f2L2F, f2H, f2L4F)
F1_U = (f1H, f1L2F, f1L3F, f1L4F)
F2_U = (f2H, f2L2F, f2L3F, f2L4F)
a, b = (u, v), (s, y)
c, d = (u, y), (s, t)
D_B = (N_B, a, b, I_B, Q_B, F1_B, F2_B)
D_U = (N_U, c, d, I_U, Q_U, F1_U, F2_U)
return D_B, D_U
def _depth_first_select(rectangles):
"""Find a rectangle of minimum area for bisection. """
min_area, j = None, None
for i, (_, (u, v), (s, t), _, _, _, _) in enumerate(rectangles):
area = (s - u)*(t - v)
if min_area is None or area < min_area:
min_area, j = area, i
return rectangles.pop(j)
def _rectangle_small_p(a, b, eps):
"""Return ``True`` if the given rectangle is small enough. """
(u, v), (s, t) = a, b
if eps is not None:
return s - u < eps and t - v < eps
else:
return True
def dup_isolate_complex_roots_sqf(f, K, eps=None, inf=None, sup=None, blackbox=False):
"""Isolate complex roots of a square-free polynomial using Collins-Krandick algorithm. """
if not K.is_ZZ and not K.is_QQ:
raise DomainError("isolation of complex roots is not supported over %s" % K)
if dup_degree(f) <= 0:
return []
if K.is_ZZ:
F = K.get_field()
else:
F = K
f = dup_convert(f, K, F)
lc = abs(dup_LC(f, F))
B = 2*max([ F.quo(abs(c), lc) for c in f ])
(u, v), (s, t) = (-B, F.zero), (B, B)
if inf is not None:
u = inf
if sup is not None:
s = sup
if v < 0 or t <= v or s <= u:
raise ValueError("not a valid complex isolation rectangle")
f1, f2 = dup_real_imag(f, F)
f1L1 = dmp_eval_in(f1, v, 1, 1, F)
f2L1 = dmp_eval_in(f2, v, 1, 1, F)
f1L2 = dmp_eval_in(f1, s, 0, 1, F)
f2L2 = dmp_eval_in(f2, s, 0, 1, F)
f1L3 = dmp_eval_in(f1, t, 1, 1, F)
f2L3 = dmp_eval_in(f2, t, 1, 1, F)
f1L4 = dmp_eval_in(f1, u, 0, 1, F)
f2L4 = dmp_eval_in(f2, u, 0, 1, F)
S_L1 = [f1L1, f2L1]
S_L2 = [f1L2, f2L2]
S_L3 = [f1L3, f2L3]
S_L4 = [f1L4, f2L4]
I_L1 = dup_isolate_real_roots_list(S_L1, F, inf=u, sup=s, fast=True, strict=True, basis=True)
I_L2 = dup_isolate_real_roots_list(S_L2, F, inf=v, sup=t, fast=True, strict=True, basis=True)
I_L3 = dup_isolate_real_roots_list(S_L3, F, inf=u, sup=s, fast=True, strict=True, basis=True)
I_L4 = dup_isolate_real_roots_list(S_L4, F, inf=v, sup=t, fast=True, strict=True, basis=True)
I_L3 = _reverse_intervals(I_L3)
I_L4 = _reverse_intervals(I_L4)
Q_L1 = _intervals_to_quadrants(I_L1, f1L1, f2L1, u, s, F)
Q_L2 = _intervals_to_quadrants(I_L2, f1L2, f2L2, v, t, F)
Q_L3 = _intervals_to_quadrants(I_L3, f1L3, f2L3, s, u, F)
Q_L4 = _intervals_to_quadrants(I_L4, f1L4, f2L4, t, v, F)
T = _traverse_quadrants(Q_L1, Q_L2, Q_L3, Q_L4)
N = _winding_number(T, F)
if not N:
return []
I = (I_L1, I_L2, I_L3, I_L4)
Q = (Q_L1, Q_L2, Q_L3, Q_L4)
F1 = (f1L1, f1L2, f1L3, f1L4)
F2 = (f2L1, f2L2, f2L3, f2L4)
rectangles, roots = [(N, (u, v), (s, t), I, Q, F1, F2)], []
while rectangles:
N, (u, v), (s, t), I, Q, F1, F2 = _depth_first_select(rectangles)
if s - u > t - v:
D_L, D_R = _vertical_bisection(N, (u, v), (s, t), I, Q, F1, F2, f1, f2, F)
N_L, a, b, I_L, Q_L, F1_L, F2_L = D_L
N_R, c, d, I_R, Q_R, F1_R, F2_R = D_R
if N_L >= 1:
if N_L == 1 and _rectangle_small_p(a, b, eps):
roots.append(ComplexInterval(a, b, I_L, Q_L, F1_L, F2_L, f1, f2, F))
else:
rectangles.append(D_L)
if N_R >= 1:
if N_R == 1 and _rectangle_small_p(c, d, eps):
roots.append(ComplexInterval(c, d, I_R, Q_R, F1_R, F2_R, f1, f2, F))
else:
rectangles.append(D_R)
else:
D_B, D_U = _horizontal_bisection(N, (u, v), (s, t), I, Q, F1, F2, f1, f2, F)
N_B, a, b, I_B, Q_B, F1_B, F2_B = D_B
N_U, c, d, I_U, Q_U, F1_U, F2_U = D_U
if N_B >= 1:
if N_B == 1 and _rectangle_small_p(a, b, eps):
roots.append(ComplexInterval(
a, b, I_B, Q_B, F1_B, F2_B, f1, f2, F))
else:
rectangles.append(D_B)
if N_U >= 1:
if N_U == 1 and _rectangle_small_p(c, d, eps):
roots.append(ComplexInterval(
c, d, I_U, Q_U, F1_U, F2_U, f1, f2, F))
else:
rectangles.append(D_U)
_roots, roots = sorted(roots, key=lambda r: (r.ax, r.ay)), []
for root in _roots:
roots.extend([root.conjugate(), root])
if blackbox:
return roots
else:
return [ r.as_tuple() for r in roots ]
def dup_isolate_all_roots_sqf(f, K, eps=None, inf=None, sup=None, fast=False, blackbox=False):
"""Isolate real and complex roots of a square-free polynomial ``f``. """
return (
dup_isolate_real_roots_sqf( f, K, eps=eps, inf=inf, sup=sup, fast=fast, blackbox=blackbox),
dup_isolate_complex_roots_sqf(f, K, eps=eps, inf=inf, sup=sup, blackbox=blackbox))
def dup_isolate_all_roots(f, K, eps=None, inf=None, sup=None, fast=False):
"""Isolate real and complex roots of a non-square-free polynomial ``f``. """
if not K.is_ZZ and not K.is_QQ:
raise DomainError("isolation of real and complex roots is not supported over %s" % K)
_, factors = dup_sqf_list(f, K)
if len(factors) == 1:
((f, k),) = factors
real_part, complex_part = dup_isolate_all_roots_sqf(
f, K, eps=eps, inf=inf, sup=sup, fast=fast)
real_part = [ ((a, b), k) for (a, b) in real_part ]
complex_part = [ ((a, b), k) for (a, b) in complex_part ]
return real_part, complex_part
else:
raise NotImplementedError( "only trivial square-free polynomials are supported")
class RealInterval:
"""A fully qualified representation of a real isolation interval. """
def __init__(self, data, f, dom):
"""Initialize new real interval with complete information. """
if len(data) == 2:
s, t = data
self.neg = False
if s < 0:
if t <= 0:
f, s, t, self.neg = dup_mirror(f, dom), -t, -s, True
else:
raise ValueError("Cannot refine a real root in (%s, %s)" % (s, t))
a, b, c, d = _mobius_from_interval((s, t), dom.get_field())
f = dup_transform(f, dup_strip([a, b]),
dup_strip([c, d]), dom)
self.mobius = a, b, c, d
else:
self.mobius = data[:-1]
self.neg = data[-1]
self.f, self.dom = f, dom
@property
def func(self):
return RealInterval
@property
def args(self):
i = self
return (i.mobius + (i.neg,), i.f, i.dom)
def __eq__(self, other):
if type(other) != type(self):
return False
return self.args == other.args
@property
def a(self):
"""Return the position of the left end. """
field = self.dom.get_field()
a, b, c, d = self.mobius
if not self.neg:
if a*d < b*c:
return field(a, c)
return field(b, d)
else:
if a*d > b*c:
return -field(a, c)
return -field(b, d)
@property
def b(self):
"""Return the position of the right end. """
was = self.neg
self.neg = not was
rv = -self.a
self.neg = was
return rv
@property
def dx(self):
"""Return width of the real isolating interval. """
return self.b - self.a
@property
def center(self):
"""Return the center of the real isolating interval. """
return (self.a + self.b)/2
def as_tuple(self):
"""Return tuple representation of real isolating interval. """
return (self.a, self.b)
def __repr__(self):
return "(%s, %s)" % (self.a, self.b)
def is_disjoint(self, other):
"""Return ``True`` if two isolation intervals are disjoint. """
if isinstance(other, RealInterval):
return (self.b < other.a or other.b < self.a)
assert isinstance(other, ComplexInterval)
return (self.b < other.ax or other.bx < self.a
or other.ay*other.by > 0)
def _inner_refine(self):
"""Internal one step real root refinement procedure. """
if self.mobius is None:
return self
f, mobius = dup_inner_refine_real_root(
self.f, self.mobius, self.dom, steps=1, mobius=True)
return RealInterval(mobius + (self.neg,), f, self.dom)
def refine_disjoint(self, other):
"""Refine an isolating interval until it is disjoint with another one. """
expr = self
while not expr.is_disjoint(other):
expr, other = expr._inner_refine(), other._inner_refine()
return expr, other
def refine_size(self, dx):
"""Refine an isolating interval until it is of sufficiently small size. """
expr = self
while not (expr.dx < dx):
expr = expr._inner_refine()
return expr
def refine_step(self, steps=1):
"""Perform several steps of real root refinement algorithm. """
expr = self
for _ in range(steps):
expr = expr._inner_refine()
return expr
def refine(self):
"""Perform one step of real root refinement algorithm. """
return self._inner_refine()
class ComplexInterval:
"""A fully qualified representation of a complex isolation interval.
The printed form is shown as (ax, bx) x (ay, by) where (ax, ay)
and (bx, by) are the coordinates of the southwest and northeast
corners of the interval's rectangle, respectively.
Examples
========
>>> from sympy import CRootOf, S
>>> from sympy.abc import x
>>> CRootOf.clear_cache() # for doctest reproducibility
>>> root = CRootOf(x**10 - 2*x + 3, 9)
>>> i = root._get_interval(); i
(3/64, 3/32) x (9/8, 75/64)
The real part of the root lies within the range [0, 3/4] while
the imaginary part lies within the range [9/8, 3/2]:
>>> root.n(3)
0.0766 + 1.14*I
The width of the ranges in the x and y directions on the complex
plane are:
>>> i.dx, i.dy
(3/64, 3/64)
The center of the range is
>>> i.center
(9/128, 147/128)
The northeast coordinate of the rectangle bounding the root in the
complex plane is given by attribute b and the x and y components
are accessed by bx and by:
>>> i.b, i.bx, i.by
((3/32, 75/64), 3/32, 75/64)
The southwest coordinate is similarly given by i.a
>>> i.a, i.ax, i.ay
((3/64, 9/8), 3/64, 9/8)
Although the interval prints to show only the real and imaginary
range of the root, all the information of the underlying root
is contained as properties of the interval.
For example, an interval with a nonpositive imaginary range is
considered to be the conjugate. Since the y values of y are in the
range [0, 1/4] it is not the conjugate:
>>> i.conj
False
The conjugate's interval is
>>> ic = i.conjugate(); ic
(3/64, 3/32) x (-75/64, -9/8)
NOTE: the values printed still represent the x and y range
in which the root -- conjugate, in this case -- is located,
but the underlying a and b values of a root and its conjugate
are the same:
>>> assert i.a == ic.a and i.b == ic.b
What changes are the reported coordinates of the bounding rectangle:
>>> (i.ax, i.ay), (i.bx, i.by)
((3/64, 9/8), (3/32, 75/64))
>>> (ic.ax, ic.ay), (ic.bx, ic.by)
((3/64, -75/64), (3/32, -9/8))
The interval can be refined once:
>>> i # for reference, this is the current interval
(3/64, 3/32) x (9/8, 75/64)
>>> i.refine()
(3/64, 3/32) x (9/8, 147/128)
Several refinement steps can be taken:
>>> i.refine_step(2) # 2 steps
(9/128, 3/32) x (9/8, 147/128)
It is also possible to refine to a given tolerance:
>>> tol = min(i.dx, i.dy)/2
>>> i.refine_size(tol)
(9/128, 21/256) x (9/8, 291/256)
A disjoint interval is one whose bounding rectangle does not
overlap with another. An interval, necessarily, is not disjoint with
itself, but any interval is disjoint with a conjugate since the
conjugate rectangle will always be in the lower half of the complex
plane and the non-conjugate in the upper half:
>>> i.is_disjoint(i), i.is_disjoint(i.conjugate())
(False, True)
The following interval j is not disjoint from i:
>>> close = CRootOf(x**10 - 2*x + 300/S(101), 9)
>>> j = close._get_interval(); j
(75/1616, 75/808) x (225/202, 1875/1616)
>>> i.is_disjoint(j)
False
The two can be made disjoint, however:
>>> newi, newj = i.refine_disjoint(j)
>>> newi
(39/512, 159/2048) x (2325/2048, 4653/4096)
>>> newj
(3975/51712, 2025/25856) x (29325/25856, 117375/103424)
Even though the real ranges overlap, the imaginary do not, so
the roots have been resolved as distinct. Intervals are disjoint
when either the real or imaginary component of the intervals is
distinct. In the case above, the real components have not been
resolved (so we do not know, yet, which root has the smaller real
part) but the imaginary part of ``close`` is larger than ``root``:
>>> close.n(3)
0.0771 + 1.13*I
>>> root.n(3)
0.0766 + 1.14*I
"""
def __init__(self, a, b, I, Q, F1, F2, f1, f2, dom, conj=False):
"""Initialize new complex interval with complete information. """
# a and b are the SW and NE corner of the bounding interval,
# (ax, ay) and (bx, by), respectively, for the NON-CONJUGATE
# root (the one with the positive imaginary part); when working
# with the conjugate, the a and b value are still non-negative
# but the ay, by are reversed and have oppositite sign
self.a, self.b = a, b
self.I, self.Q = I, Q
self.f1, self.F1 = f1, F1
self.f2, self.F2 = f2, F2
self.dom = dom
self.conj = conj
@property
def func(self):
return ComplexInterval
@property
def args(self):
i = self
return (i.a, i.b, i.I, i.Q, i.F1, i.F2, i.f1, i.f2, i.dom, i.conj)
def __eq__(self, other):
if type(other) != type(self):
return False
return self.args == other.args
@property
def ax(self):
"""Return ``x`` coordinate of south-western corner. """
return self.a[0]
@property
def ay(self):
"""Return ``y`` coordinate of south-western corner. """
if not self.conj:
return self.a[1]
else:
return -self.b[1]
@property
def bx(self):
"""Return ``x`` coordinate of north-eastern corner. """
return self.b[0]
@property
def by(self):
"""Return ``y`` coordinate of north-eastern corner. """
if not self.conj:
return self.b[1]
else:
return -self.a[1]
@property
def dx(self):
"""Return width of the complex isolating interval. """
return self.b[0] - self.a[0]
@property
def dy(self):
"""Return height of the complex isolating interval. """
return self.b[1] - self.a[1]
@property
def center(self):
"""Return the center of the complex isolating interval. """
return ((self.ax + self.bx)/2, (self.ay + self.by)/2)
def as_tuple(self):
"""Return tuple representation of the complex isolating
interval's SW and NE corners, respectively. """
return ((self.ax, self.ay), (self.bx, self.by))
def __repr__(self):
return "(%s, %s) x (%s, %s)" % (self.ax, self.bx, self.ay, self.by)
def conjugate(self):
"""This complex interval really is located in lower half-plane. """
return ComplexInterval(self.a, self.b, self.I, self.Q,
self.F1, self.F2, self.f1, self.f2, self.dom, conj=True)
def is_disjoint(self, other):
"""Return ``True`` if two isolation intervals are disjoint. """
if isinstance(other, RealInterval):
return other.is_disjoint(self)
if self.conj != other.conj: # above and below real axis
return True
re_distinct = (self.bx < other.ax or other.bx < self.ax)
if re_distinct:
return True
im_distinct = (self.by < other.ay or other.by < self.ay)
return im_distinct
def _inner_refine(self):
"""Internal one step complex root refinement procedure. """
(u, v), (s, t) = self.a, self.b
I, Q = self.I, self.Q
f1, F1 = self.f1, self.F1
f2, F2 = self.f2, self.F2
dom = self.dom
if s - u > t - v:
D_L, D_R = _vertical_bisection(1, (u, v), (s, t), I, Q, F1, F2, f1, f2, dom)
if D_L[0] == 1:
_, a, b, I, Q, F1, F2 = D_L
else:
_, a, b, I, Q, F1, F2 = D_R
else:
D_B, D_U = _horizontal_bisection(1, (u, v), (s, t), I, Q, F1, F2, f1, f2, dom)
if D_B[0] == 1:
_, a, b, I, Q, F1, F2 = D_B
else:
_, a, b, I, Q, F1, F2 = D_U
return ComplexInterval(a, b, I, Q, F1, F2, f1, f2, dom, self.conj)
def refine_disjoint(self, other):
"""Refine an isolating interval until it is disjoint with another one. """
expr = self
while not expr.is_disjoint(other):
expr, other = expr._inner_refine(), other._inner_refine()
return expr, other
def refine_size(self, dx, dy=None):
"""Refine an isolating interval until it is of sufficiently small size. """
if dy is None:
dy = dx
expr = self
while not (expr.dx < dx and expr.dy < dy):
expr = expr._inner_refine()
return expr
def refine_step(self, steps=1):
"""Perform several steps of complex root refinement algorithm. """
expr = self
for _ in range(steps):
expr = expr._inner_refine()
return expr
def refine(self):
"""Perform one step of complex root refinement algorithm. """
return self._inner_refine()
|
49b5d300f255a4edb8d54abe0d6d1b42e3eb014a2789c6e301389c6a65c67676 | """Efficient functions for generating orthogonal polynomials. """
from sympy.core.symbol import Dummy
from sympy.polys.constructor import construct_domain
from sympy.polys.densearith import (
dup_mul, dup_mul_ground, dup_lshift, dup_sub, dup_add
)
from sympy.polys.domains import ZZ, QQ
from sympy.polys.polyclasses import DMP
from sympy.polys.polytools import Poly, PurePoly
from sympy.utilities import public
def dup_jacobi(n, a, b, K):
"""Low-level implementation of Jacobi polynomials. """
seq = [[K.one], [(a + b + K(2))/K(2), (a - b)/K(2)]]
for i in range(2, n + 1):
den = K(i)*(a + b + i)*(a + b + K(2)*i - K(2))
f0 = (a + b + K(2)*i - K.one) * (a*a - b*b) / (K(2)*den)
f1 = (a + b + K(2)*i - K.one) * (a + b + K(2)*i - K(2)) * (a + b + K(2)*i) / (K(2)*den)
f2 = (a + i - K.one)*(b + i - K.one)*(a + b + K(2)*i) / den
p0 = dup_mul_ground(seq[-1], f0, K)
p1 = dup_mul_ground(dup_lshift(seq[-1], 1, K), f1, K)
p2 = dup_mul_ground(seq[-2], f2, K)
seq.append(dup_sub(dup_add(p0, p1, K), p2, K))
return seq[n]
@public
def jacobi_poly(n, a, b, x=None, polys=False):
"""Generates Jacobi polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
a
Lower limit of minimal domain for the list of
coefficients.
b
Upper limit of minimal domain for the list of
coefficients.
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError("Cannot generate Jacobi polynomial of degree %s" % n)
K, v = construct_domain([a, b], field=True)
poly = DMP(dup_jacobi(int(n), v[0], v[1], K), K)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_gegenbauer(n, a, K):
"""Low-level implementation of Gegenbauer polynomials. """
seq = [[K.one], [K(2)*a, K.zero]]
for i in range(2, n + 1):
f1 = K(2) * (i + a - K.one) / i
f2 = (i + K(2)*a - K(2)) / i
p1 = dup_mul_ground(dup_lshift(seq[-1], 1, K), f1, K)
p2 = dup_mul_ground(seq[-2], f2, K)
seq.append(dup_sub(p1, p2, K))
return seq[n]
def gegenbauer_poly(n, a, x=None, polys=False):
"""Generates Gegenbauer polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
a
Decides minimal domain for the list of
coefficients.
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError(
"Cannot generate Gegenbauer polynomial of degree %s" % n)
K, a = construct_domain(a, field=True)
poly = DMP(dup_gegenbauer(int(n), a, K), K)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_chebyshevt(n, K):
"""Low-level implementation of Chebyshev polynomials of the 1st kind. """
seq = [[K.one], [K.one, K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(2), K)
seq.append(dup_sub(a, seq[-2], K))
return seq[n]
@public
def chebyshevt_poly(n, x=None, polys=False):
"""Generates Chebyshev polynomial of the first kind of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError(
"Cannot generate 1st kind Chebyshev polynomial of degree %s" % n)
poly = DMP(dup_chebyshevt(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_chebyshevu(n, K):
"""Low-level implementation of Chebyshev polynomials of the 2nd kind. """
seq = [[K.one], [K(2), K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(2), K)
seq.append(dup_sub(a, seq[-2], K))
return seq[n]
@public
def chebyshevu_poly(n, x=None, polys=False):
"""Generates Chebyshev polynomial of the second kind of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError(
"Cannot generate 2nd kind Chebyshev polynomial of degree %s" % n)
poly = DMP(dup_chebyshevu(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_hermite(n, K):
"""Low-level implementation of Hermite polynomials. """
seq = [[K.one], [K(2), K.zero]]
for i in range(2, n + 1):
a = dup_lshift(seq[-1], 1, K)
b = dup_mul_ground(seq[-2], K(i - 1), K)
c = dup_mul_ground(dup_sub(a, b, K), K(2), K)
seq.append(c)
return seq[n]
@public
def hermite_poly(n, x=None, polys=False):
"""Generates Hermite polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError("Cannot generate Hermite polynomial of degree %s" % n)
poly = DMP(dup_hermite(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_legendre(n, K):
"""Low-level implementation of Legendre polynomials. """
seq = [[K.one], [K.one, K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(2*i - 1, i), K)
b = dup_mul_ground(seq[-2], K(i - 1, i), K)
seq.append(dup_sub(a, b, K))
return seq[n]
@public
def legendre_poly(n, x=None, polys=False):
"""Generates Legendre polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError("Cannot generate Legendre polynomial of degree %s" % n)
poly = DMP(dup_legendre(int(n), QQ), QQ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_laguerre(n, alpha, K):
"""Low-level implementation of Laguerre polynomials. """
seq = [[K.zero], [K.one]]
for i in range(1, n + 1):
a = dup_mul(seq[-1], [-K.one/i, alpha/i + K(2*i - 1)/i], K)
b = dup_mul_ground(seq[-2], alpha/i + K(i - 1)/i, K)
seq.append(dup_sub(a, b, K))
return seq[-1]
@public
def laguerre_poly(n, x=None, alpha=None, polys=False):
"""Generates Laguerre polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
alpha
Decides minimal domain for the list
of coefficients.
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError("Cannot generate Laguerre polynomial of degree %s" % n)
if alpha is not None:
K, alpha = construct_domain(
alpha, field=True) # XXX: ground_field=True
else:
K, alpha = QQ, QQ(0)
poly = DMP(dup_laguerre(int(n), alpha, K), K)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_spherical_bessel_fn(n, K):
""" Low-level implementation of fn(n, x) """
seq = [[K.one], [K.one, K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(2*i - 1), K)
seq.append(dup_sub(a, seq[-2], K))
return dup_lshift(seq[n], 1, K)
def dup_spherical_bessel_fn_minus(n, K):
""" Low-level implementation of fn(-n, x) """
seq = [[K.one, K.zero], [K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(3 - 2*i), K)
seq.append(dup_sub(a, seq[-2], K))
return seq[n]
def spherical_bessel_fn(n, x=None, polys=False):
"""
Coefficients for the spherical Bessel functions.
Those are only needed in the jn() function.
The coefficients are calculated from:
fn(0, z) = 1/z
fn(1, z) = 1/z**2
fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
Examples
========
>>> from sympy.polys.orthopolys import spherical_bessel_fn as fn
>>> from sympy import Symbol
>>> z = Symbol("z")
>>> fn(1, z)
z**(-2)
>>> fn(2, z)
-1/z + 3/z**3
>>> fn(3, z)
-6/z**2 + 15/z**4
>>> fn(4, z)
1/z - 45/z**3 + 105/z**5
"""
if n < 0:
dup = dup_spherical_bessel_fn_minus(-int(n), ZZ)
else:
dup = dup_spherical_bessel_fn(int(n), ZZ)
poly = DMP(dup, ZZ)
if x is not None:
poly = Poly.new(poly, 1/x)
else:
poly = PurePoly.new(poly, 1/Dummy('x'))
return poly if polys else poly.as_expr()
|
67850e1d5e3a27ad639f9e154211a35809597ed6ff43f80609725e87c5d00590 | """
This module contains functions for two multivariate resultants. These
are:
- Dixon's resultant.
- Macaulay's resultant.
Multivariate resultants are used to identify whether a multivariate
system has common roots. That is when the resultant is equal to zero.
"""
from sympy.core.mul import (Mul, prod)
from sympy.matrices.dense import (Matrix, diag)
from sympy.polys.polytools import (Poly, degree_list, rem)
from sympy.simplify.simplify import simplify
from sympy.tensor.indexed import IndexedBase
from sympy.polys.monomials import itermonomials, monomial_deg
from sympy.polys.orderings import monomial_key
from sympy.polys.polytools import poly_from_expr, total_degree
from sympy.functions.combinatorial.factorials import binomial
from itertools import combinations_with_replacement
from sympy.utilities.exceptions import SymPyDeprecationWarning
class DixonResultant():
"""
A class for retrieving the Dixon's resultant of a multivariate
system.
Examples
========
>>> from sympy import symbols
>>> from sympy.polys.multivariate_resultants import DixonResultant
>>> x, y = symbols('x, y')
>>> p = x + y
>>> q = x ** 2 + y ** 3
>>> h = x ** 2 + y
>>> dixon = DixonResultant(variables=[x, y], polynomials=[p, q, h])
>>> poly = dixon.get_dixon_polynomial()
>>> matrix = dixon.get_dixon_matrix(polynomial=poly)
>>> matrix
Matrix([
[ 0, 0, -1, 0, -1],
[ 0, -1, 0, -1, 0],
[-1, 0, 1, 0, 0],
[ 0, -1, 0, 0, 1],
[-1, 0, 0, 1, 0]])
>>> matrix.det()
0
See Also
========
Notebook in examples: sympy/example/notebooks.
References
==========
.. [1] [Kapur1994]_
.. [2] [Palancz08]_
"""
def __init__(self, polynomials, variables):
"""
A class that takes two lists, a list of polynomials and list of
variables. Returns the Dixon matrix of the multivariate system.
Parameters
----------
polynomials : list of polynomials
A list of m n-degree polynomials
variables: list
A list of all n variables
"""
self.polynomials = polynomials
self.variables = variables
self.n = len(self.variables)
self.m = len(self.polynomials)
a = IndexedBase("alpha")
# A list of n alpha variables (the replacing variables)
self.dummy_variables = [a[i] for i in range(self.n)]
# A list of the d_max of each variable.
self._max_degrees = [max(degree_list(poly)[i] for poly in self.polynomials)
for i in range(self.n)]
@property
def max_degrees(self):
SymPyDeprecationWarning(feature="max_degrees",
issue=17763,
deprecated_since_version="1.5").warn()
return self._max_degrees
def get_dixon_polynomial(self):
r"""
Returns
=======
dixon_polynomial: polynomial
Dixon's polynomial is calculated as:
delta = Delta(A) / ((x_1 - a_1) ... (x_n - a_n)) where,
A = |p_1(x_1,... x_n), ..., p_n(x_1,... x_n)|
|p_1(a_1,... x_n), ..., p_n(a_1,... x_n)|
|... , ..., ...|
|p_1(a_1,... a_n), ..., p_n(a_1,... a_n)|
"""
if self.m != (self.n + 1):
raise ValueError('Method invalid for given combination.')
# First row
rows = [self.polynomials]
temp = list(self.variables)
for idx in range(self.n):
temp[idx] = self.dummy_variables[idx]
substitution = {var: t for var, t in zip(self.variables, temp)}
rows.append([f.subs(substitution) for f in self.polynomials])
A = Matrix(rows)
terms = zip(self.variables, self.dummy_variables)
product_of_differences = Mul(*[a - b for a, b in terms])
dixon_polynomial = (A.det() / product_of_differences).factor()
return poly_from_expr(dixon_polynomial, self.dummy_variables)[0]
def get_upper_degree(self):
SymPyDeprecationWarning(feature="get_upper_degree",
useinstead="get_max_degrees",
issue=17763,
deprecated_since_version="1.5").warn()
list_of_products = [self.variables[i] ** self._max_degrees[i]
for i in range(self.n)]
product = prod(list_of_products)
product = Poly(product).monoms()
return monomial_deg(*product)
def get_max_degrees(self, polynomial):
r"""
Returns a list of the maximum degree of each variable appearing
in the coefficients of the Dixon polynomial. The coefficients are
viewed as polys in $x_1, x_2, \dots, x_n$.
"""
deg_lists = [degree_list(Poly(poly, self.variables))
for poly in polynomial.coeffs()]
max_degrees = [max(degs) for degs in zip(*deg_lists)]
return max_degrees
def get_dixon_matrix(self, polynomial):
r"""
Construct the Dixon matrix from the coefficients of polynomial
\alpha. Each coefficient is viewed as a polynomial of x_1, ...,
x_n.
"""
max_degrees = self.get_max_degrees(polynomial)
# list of column headers of the Dixon matrix.
monomials = itermonomials(self.variables, max_degrees)
monomials = sorted(monomials, reverse=True,
key=monomial_key('lex', self.variables))
dixon_matrix = Matrix([[Poly(c, *self.variables).coeff_monomial(m)
for m in monomials]
for c in polynomial.coeffs()])
# remove columns if needed
if dixon_matrix.shape[0] != dixon_matrix.shape[1]:
keep = [column for column in range(dixon_matrix.shape[-1])
if any(element != 0 for element
in dixon_matrix[:, column])]
dixon_matrix = dixon_matrix[:, keep]
return dixon_matrix
def KSY_precondition(self, matrix):
"""
Test for the validity of the Kapur-Saxena-Yang precondition.
The precondition requires that the column corresponding to the
monomial 1 = x_1 ^ 0 * x_2 ^ 0 * ... * x_n ^ 0 is not a linear
combination of the remaining ones. In SymPy notation this is
the last column. For the precondition to hold the last non-zero
row of the rref matrix should be of the form [0, 0, ..., 1].
"""
if matrix.is_zero_matrix:
return False
m, n = matrix.shape
# simplify the matrix and keep only its non-zero rows
matrix = simplify(matrix.rref()[0])
rows = [i for i in range(m) if any(matrix[i, j] != 0 for j in range(n))]
matrix = matrix[rows,:]
condition = Matrix([[0]*(n-1) + [1]])
if matrix[-1,:] == condition:
return True
else:
return False
def delete_zero_rows_and_columns(self, matrix):
"""Remove the zero rows and columns of the matrix."""
rows = [
i for i in range(matrix.rows) if not matrix.row(i).is_zero_matrix]
cols = [
j for j in range(matrix.cols) if not matrix.col(j).is_zero_matrix]
return matrix[rows, cols]
def product_leading_entries(self, matrix):
"""Calculate the product of the leading entries of the matrix."""
res = 1
for row in range(matrix.rows):
for el in matrix.row(row):
if el != 0:
res = res * el
break
return res
def get_KSY_Dixon_resultant(self, matrix):
"""Calculate the Kapur-Saxena-Yang approach to the Dixon Resultant."""
matrix = self.delete_zero_rows_and_columns(matrix)
_, U, _ = matrix.LUdecomposition()
matrix = self.delete_zero_rows_and_columns(simplify(U))
return self.product_leading_entries(matrix)
class MacaulayResultant():
"""
A class for calculating the Macaulay resultant. Note that the
polynomials must be homogenized and their coefficients must be
given as symbols.
Examples
========
>>> from sympy import symbols
>>> from sympy.polys.multivariate_resultants import MacaulayResultant
>>> x, y, z = symbols('x, y, z')
>>> a_0, a_1, a_2 = symbols('a_0, a_1, a_2')
>>> b_0, b_1, b_2 = symbols('b_0, b_1, b_2')
>>> c_0, c_1, c_2,c_3, c_4 = symbols('c_0, c_1, c_2, c_3, c_4')
>>> f = a_0 * y - a_1 * x + a_2 * z
>>> g = b_1 * x ** 2 + b_0 * y ** 2 - b_2 * z ** 2
>>> h = c_0 * y * z ** 2 - c_1 * x ** 3 + c_2 * x ** 2 * z - c_3 * x * z ** 2 + c_4 * z ** 3
>>> mac = MacaulayResultant(polynomials=[f, g, h], variables=[x, y, z])
>>> mac.monomial_set
[x**4, x**3*y, x**3*z, x**2*y**2, x**2*y*z, x**2*z**2, x*y**3,
x*y**2*z, x*y*z**2, x*z**3, y**4, y**3*z, y**2*z**2, y*z**3, z**4]
>>> matrix = mac.get_matrix()
>>> submatrix = mac.get_submatrix(matrix)
>>> submatrix
Matrix([
[-a_1, a_0, a_2, 0],
[ 0, -a_1, 0, 0],
[ 0, 0, -a_1, 0],
[ 0, 0, 0, -a_1]])
See Also
========
Notebook in examples: sympy/example/notebooks.
References
==========
.. [1] [Bruce97]_
.. [2] [Stiller96]_
"""
def __init__(self, polynomials, variables):
"""
Parameters
==========
variables: list
A list of all n variables
polynomials : list of SymPy polynomials
A list of m n-degree polynomials
"""
self.polynomials = polynomials
self.variables = variables
self.n = len(variables)
# A list of the d_max of each polynomial.
self.degrees = [total_degree(poly, *self.variables) for poly
in self.polynomials]
self.degree_m = self._get_degree_m()
self.monomials_size = self.get_size()
# The set T of all possible monomials of degree degree_m
self.monomial_set = self.get_monomials_of_certain_degree(self.degree_m)
def _get_degree_m(self):
r"""
Returns
=======
degree_m: int
The degree_m is calculated as 1 + \sum_1 ^ n (d_i - 1),
where d_i is the degree of the i polynomial
"""
return 1 + sum(d - 1 for d in self.degrees)
def get_size(self):
r"""
Returns
=======
size: int
The size of set T. Set T is the set of all possible
monomials of the n variables for degree equal to the
degree_m
"""
return binomial(self.degree_m + self.n - 1, self.n - 1)
def get_monomials_of_certain_degree(self, degree):
"""
Returns
=======
monomials: list
A list of monomials of a certain degree.
"""
monomials = [Mul(*monomial) for monomial
in combinations_with_replacement(self.variables,
degree)]
return sorted(monomials, reverse=True,
key=monomial_key('lex', self.variables))
def get_row_coefficients(self):
"""
Returns
=======
row_coefficients: list
The row coefficients of Macaulay's matrix
"""
row_coefficients = []
divisible = []
for i in range(self.n):
if i == 0:
degree = self.degree_m - self.degrees[i]
monomial = self.get_monomials_of_certain_degree(degree)
row_coefficients.append(monomial)
else:
divisible.append(self.variables[i - 1] **
self.degrees[i - 1])
degree = self.degree_m - self.degrees[i]
poss_rows = self.get_monomials_of_certain_degree(degree)
for div in divisible:
for p in poss_rows:
if rem(p, div) == 0:
poss_rows = [item for item in poss_rows
if item != p]
row_coefficients.append(poss_rows)
return row_coefficients
def get_matrix(self):
"""
Returns
=======
macaulay_matrix: Matrix
The Macaulay numerator matrix
"""
rows = []
row_coefficients = self.get_row_coefficients()
for i in range(self.n):
for multiplier in row_coefficients[i]:
coefficients = []
poly = Poly(self.polynomials[i] * multiplier,
*self.variables)
for mono in self.monomial_set:
coefficients.append(poly.coeff_monomial(mono))
rows.append(coefficients)
macaulay_matrix = Matrix(rows)
return macaulay_matrix
def get_reduced_nonreduced(self):
r"""
Returns
=======
reduced: list
A list of the reduced monomials
non_reduced: list
A list of the monomials that are not reduced
Definition
==========
A polynomial is said to be reduced in x_i, if its degree (the
maximum degree of its monomials) in x_i is less than d_i. A
polynomial that is reduced in all variables but one is said
simply to be reduced.
"""
divisible = []
for m in self.monomial_set:
temp = []
for i, v in enumerate(self.variables):
temp.append(bool(total_degree(m, v) >= self.degrees[i]))
divisible.append(temp)
reduced = [i for i, r in enumerate(divisible)
if sum(r) < self.n - 1]
non_reduced = [i for i, r in enumerate(divisible)
if sum(r) >= self.n -1]
return reduced, non_reduced
def get_submatrix(self, matrix):
r"""
Returns
=======
macaulay_submatrix: Matrix
The Macaulay denominator matrix. Columns that are non reduced are kept.
The row which contains one of the a_{i}s is dropped. a_{i}s
are the coefficients of x_i ^ {d_i}.
"""
reduced, non_reduced = self.get_reduced_nonreduced()
# if reduced == [], then det(matrix) should be 1
if reduced == []:
return diag([1])
# reduced != []
reduction_set = [v ** self.degrees[i] for i, v
in enumerate(self.variables)]
ais = list([self.polynomials[i].coeff(reduction_set[i])
for i in range(self.n)])
reduced_matrix = matrix[:, reduced]
keep = []
for row in range(reduced_matrix.rows):
check = [ai in reduced_matrix[row, :] for ai in ais]
if True not in check:
keep.append(row)
return matrix[keep, non_reduced]
|
11f39006bb8995b1b2356a050e4cff541d8417664842966b8a6601a400943b99 | """Useful utilities for higher level polynomial classes. """
from sympy.core import (S, Add, Mul, Pow, Eq, Expr,
expand_mul, expand_multinomial)
from sympy.core.exprtools import decompose_power, decompose_power_rat
from sympy.polys.polyerrors import PolynomialError, GeneratorsError
from sympy.polys.polyoptions import build_options
import re
_gens_order = {
'a': 301, 'b': 302, 'c': 303, 'd': 304,
'e': 305, 'f': 306, 'g': 307, 'h': 308,
'i': 309, 'j': 310, 'k': 311, 'l': 312,
'm': 313, 'n': 314, 'o': 315, 'p': 216,
'q': 217, 'r': 218, 's': 219, 't': 220,
'u': 221, 'v': 222, 'w': 223, 'x': 124,
'y': 125, 'z': 126,
}
_max_order = 1000
_re_gen = re.compile(r"^(.*?)(\d*)$", re.MULTILINE)
def _nsort(roots, separated=False):
"""Sort the numerical roots putting the real roots first, then sorting
according to real and imaginary parts. If ``separated`` is True, then
the real and imaginary roots will be returned in two lists, respectively.
This routine tries to avoid issue 6137 by separating the roots into real
and imaginary parts before evaluation. In addition, the sorting will raise
an error if any computation cannot be done with precision.
"""
if not all(r.is_number for r in roots):
raise NotImplementedError
# see issue 6137:
# get the real part of the evaluated real and imaginary parts of each root
key = [[i.n(2).as_real_imag()[0] for i in r.as_real_imag()] for r in roots]
# make sure the parts were computed with precision
if len(roots) > 1 and any(i._prec == 1 for k in key for i in k):
raise NotImplementedError("could not compute root with precision")
# insert a key to indicate if the root has an imaginary part
key = [(1 if i else 0, r, i) for r, i in key]
key = sorted(zip(key, roots))
# return the real and imaginary roots separately if desired
if separated:
r = []
i = []
for (im, _, _), v in key:
if im:
i.append(v)
else:
r.append(v)
return r, i
_, roots = zip(*key)
return list(roots)
def _sort_gens(gens, **args):
"""Sort generators in a reasonably intelligent way. """
opt = build_options(args)
gens_order, wrt = {}, None
if opt is not None:
gens_order, wrt = {}, opt.wrt
for i, gen in enumerate(opt.sort):
gens_order[gen] = i + 1
def order_key(gen):
gen = str(gen)
if wrt is not None:
try:
return (-len(wrt) + wrt.index(gen), gen, 0)
except ValueError:
pass
name, index = _re_gen.match(gen).groups()
if index:
index = int(index)
else:
index = 0
try:
return ( gens_order[name], name, index)
except KeyError:
pass
try:
return (_gens_order[name], name, index)
except KeyError:
pass
return (_max_order, name, index)
try:
gens = sorted(gens, key=order_key)
except TypeError: # pragma: no cover
pass
return tuple(gens)
def _unify_gens(f_gens, g_gens):
"""Unify generators in a reasonably intelligent way. """
f_gens = list(f_gens)
g_gens = list(g_gens)
if f_gens == g_gens:
return tuple(f_gens)
gens, common, k = [], [], 0
for gen in f_gens:
if gen in g_gens:
common.append(gen)
for i, gen in enumerate(g_gens):
if gen in common:
g_gens[i], k = common[k], k + 1
for gen in common:
i = f_gens.index(gen)
gens.extend(f_gens[:i])
f_gens = f_gens[i + 1:]
i = g_gens.index(gen)
gens.extend(g_gens[:i])
g_gens = g_gens[i + 1:]
gens.append(gen)
gens.extend(f_gens)
gens.extend(g_gens)
return tuple(gens)
def _analyze_gens(gens):
"""Support for passing generators as `*gens` and `[gens]`. """
if len(gens) == 1 and hasattr(gens[0], '__iter__'):
return tuple(gens[0])
else:
return tuple(gens)
def _sort_factors(factors, **args):
"""Sort low-level factors in increasing 'complexity' order. """
def order_if_multiple_key(factor):
(f, n) = factor
return (len(f), n, f)
def order_no_multiple_key(f):
return (len(f), f)
if args.get('multiple', True):
return sorted(factors, key=order_if_multiple_key)
else:
return sorted(factors, key=order_no_multiple_key)
illegal = [S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity]
illegal_types = [type(obj) for obj in illegal]
finf = [float(i) for i in illegal[1:3]]
def _not_a_coeff(expr):
"""Do not treat NaN and infinities as valid polynomial coefficients. """
if type(expr) in illegal_types or expr in finf:
return True
if type(expr) is float and float(expr) != expr:
return True # nan
return # could be
def _parallel_dict_from_expr_if_gens(exprs, opt):
"""Transform expressions into a multinomial form given generators. """
k, indices = len(opt.gens), {}
for i, g in enumerate(opt.gens):
indices[g] = i
polys = []
for expr in exprs:
poly = {}
if expr.is_Equality:
expr = expr.lhs - expr.rhs
for term in Add.make_args(expr):
coeff, monom = [], [0]*k
for factor in Mul.make_args(term):
if not _not_a_coeff(factor) and factor.is_Number:
coeff.append(factor)
else:
try:
if opt.series is False:
base, exp = decompose_power(factor)
if exp < 0:
exp, base = -exp, Pow(base, -S.One)
else:
base, exp = decompose_power_rat(factor)
monom[indices[base]] = exp
except KeyError:
if not factor.free_symbols.intersection(opt.gens):
coeff.append(factor)
else:
raise PolynomialError("%s contains an element of "
"the set of generators." % factor)
monom = tuple(monom)
if monom in poly:
poly[monom] += Mul(*coeff)
else:
poly[monom] = Mul(*coeff)
polys.append(poly)
return polys, opt.gens
def _parallel_dict_from_expr_no_gens(exprs, opt):
"""Transform expressions into a multinomial form and figure out generators. """
if opt.domain is not None:
def _is_coeff(factor):
return factor in opt.domain
elif opt.extension is True:
def _is_coeff(factor):
return factor.is_algebraic
elif opt.greedy is not False:
def _is_coeff(factor):
return factor is S.ImaginaryUnit
else:
def _is_coeff(factor):
return factor.is_number
gens, reprs = set(), []
for expr in exprs:
terms = []
if expr.is_Equality:
expr = expr.lhs - expr.rhs
for term in Add.make_args(expr):
coeff, elements = [], {}
for factor in Mul.make_args(term):
if not _not_a_coeff(factor) and (factor.is_Number or _is_coeff(factor)):
coeff.append(factor)
else:
if opt.series is False:
base, exp = decompose_power(factor)
if exp < 0:
exp, base = -exp, Pow(base, -S.One)
else:
base, exp = decompose_power_rat(factor)
elements[base] = elements.setdefault(base, 0) + exp
gens.add(base)
terms.append((coeff, elements))
reprs.append(terms)
gens = _sort_gens(gens, opt=opt)
k, indices = len(gens), {}
for i, g in enumerate(gens):
indices[g] = i
polys = []
for terms in reprs:
poly = {}
for coeff, term in terms:
monom = [0]*k
for base, exp in term.items():
monom[indices[base]] = exp
monom = tuple(monom)
if monom in poly:
poly[monom] += Mul(*coeff)
else:
poly[monom] = Mul(*coeff)
polys.append(poly)
return polys, tuple(gens)
def _dict_from_expr_if_gens(expr, opt):
"""Transform an expression into a multinomial form given generators. """
(poly,), gens = _parallel_dict_from_expr_if_gens((expr,), opt)
return poly, gens
def _dict_from_expr_no_gens(expr, opt):
"""Transform an expression into a multinomial form and figure out generators. """
(poly,), gens = _parallel_dict_from_expr_no_gens((expr,), opt)
return poly, gens
def parallel_dict_from_expr(exprs, **args):
"""Transform expressions into a multinomial form. """
reps, opt = _parallel_dict_from_expr(exprs, build_options(args))
return reps, opt.gens
def _parallel_dict_from_expr(exprs, opt):
"""Transform expressions into a multinomial form. """
if opt.expand is not False:
exprs = [ expr.expand() for expr in exprs ]
if any(expr.is_commutative is False for expr in exprs):
raise PolynomialError('non-commutative expressions are not supported')
if opt.gens:
reps, gens = _parallel_dict_from_expr_if_gens(exprs, opt)
else:
reps, gens = _parallel_dict_from_expr_no_gens(exprs, opt)
return reps, opt.clone({'gens': gens})
def dict_from_expr(expr, **args):
"""Transform an expression into a multinomial form. """
rep, opt = _dict_from_expr(expr, build_options(args))
return rep, opt.gens
def _dict_from_expr(expr, opt):
"""Transform an expression into a multinomial form. """
if expr.is_commutative is False:
raise PolynomialError('non-commutative expressions are not supported')
def _is_expandable_pow(expr):
return (expr.is_Pow and expr.exp.is_positive and expr.exp.is_Integer
and expr.base.is_Add)
if opt.expand is not False:
if not isinstance(expr, (Expr, Eq)):
raise PolynomialError('expression must be of type Expr')
expr = expr.expand()
# TODO: Integrate this into expand() itself
while any(_is_expandable_pow(i) or i.is_Mul and
any(_is_expandable_pow(j) for j in i.args) for i in
Add.make_args(expr)):
expr = expand_multinomial(expr)
while any(i.is_Mul and any(j.is_Add for j in i.args) for i in Add.make_args(expr)):
expr = expand_mul(expr)
if opt.gens:
rep, gens = _dict_from_expr_if_gens(expr, opt)
else:
rep, gens = _dict_from_expr_no_gens(expr, opt)
return rep, opt.clone({'gens': gens})
def expr_from_dict(rep, *gens):
"""Convert a multinomial form into an expression. """
result = []
for monom, coeff in rep.items():
term = [coeff]
for g, m in zip(gens, monom):
if m:
term.append(Pow(g, m))
result.append(Mul(*term))
return Add(*result)
parallel_dict_from_basic = parallel_dict_from_expr
dict_from_basic = dict_from_expr
basic_from_dict = expr_from_dict
def _dict_reorder(rep, gens, new_gens):
"""Reorder levels using dict representation. """
gens = list(gens)
monoms = rep.keys()
coeffs = rep.values()
new_monoms = [ [] for _ in range(len(rep)) ]
used_indices = set()
for gen in new_gens:
try:
j = gens.index(gen)
used_indices.add(j)
for M, new_M in zip(monoms, new_monoms):
new_M.append(M[j])
except ValueError:
for new_M in new_monoms:
new_M.append(0)
for i, _ in enumerate(gens):
if i not in used_indices:
for monom in monoms:
if monom[i]:
raise GeneratorsError("unable to drop generators")
return map(tuple, new_monoms), coeffs
class PicklableWithSlots:
"""
Mixin class that allows to pickle objects with ``__slots__``.
Examples
========
First define a class that mixes :class:`PicklableWithSlots` in::
>>> from sympy.polys.polyutils import PicklableWithSlots
>>> class Some(PicklableWithSlots):
... __slots__ = ('foo', 'bar')
...
... def __init__(self, foo, bar):
... self.foo = foo
... self.bar = bar
To make :mod:`pickle` happy in doctest we have to use these hacks::
>>> import builtins
>>> builtins.Some = Some
>>> from sympy.polys import polyutils
>>> polyutils.Some = Some
Next lets see if we can create an instance, pickle it and unpickle::
>>> some = Some('abc', 10)
>>> some.foo, some.bar
('abc', 10)
>>> from pickle import dumps, loads
>>> some2 = loads(dumps(some))
>>> some2.foo, some2.bar
('abc', 10)
"""
__slots__ = ()
def __getstate__(self, cls=None):
if cls is None:
# This is the case for the instance that gets pickled
cls = self.__class__
d = {}
# Get all data that should be stored from super classes
for c in cls.__bases__:
if hasattr(c, "__getstate__"):
d.update(c.__getstate__(self, c))
# Get all information that should be stored from cls and return the dict
for name in cls.__slots__:
if hasattr(self, name):
d[name] = getattr(self, name)
return d
def __setstate__(self, d):
# All values that were pickled are now assigned to a fresh instance
for name, value in d.items():
try:
setattr(self, name, value)
except AttributeError: # This is needed in cases like Rational :> Half
pass
class IntegerPowerable:
r"""
Mixin class for classes that define a `__mul__` method, and want to be
raised to integer powers in the natural way that follows. Implements
powering via binary expansion, for efficiency.
By default, only integer powers $\geq 2$ are supported. To support the
first, zeroth, or negative powers, override the corresponding methods,
`_first_power`, `_zeroth_power`, `_negative_power`, below.
"""
def __pow__(self, e, modulo=None):
if e < 2:
try:
if e == 1:
return self._first_power()
elif e == 0:
return self._zeroth_power()
else:
return self._negative_power(e, modulo=modulo)
except NotImplementedError:
return NotImplemented
else:
bits = [int(d) for d in reversed(bin(e)[2:])]
n = len(bits)
p = self
first = True
for i in range(n):
if bits[i]:
if first:
r = p
first = False
else:
r *= p
if modulo is not None:
r %= modulo
if i < n - 1:
p *= p
if modulo is not None:
p %= modulo
return r
def _negative_power(self, e, modulo=None):
"""
Compute inverse of self, then raise that to the abs(e) power.
For example, if the class has an `inv()` method,
return self.inv() ** abs(e) % modulo
"""
raise NotImplementedError
def _zeroth_power(self):
"""Return unity element of algebraic struct to which self belongs."""
raise NotImplementedError
def _first_power(self):
"""Return a copy of self."""
raise NotImplementedError
|
8242eed18bec489d9bd3800e2bb315feb8e9d66e9ec4b91da8294e8eb6c16e8a | """Tools for manipulation of rational expressions. """
from sympy.core import Basic, Add, sympify
from sympy.core.exprtools import gcd_terms
from sympy.utilities import public
from sympy.utilities.iterables import iterable
@public
def together(expr, deep=False, fraction=True):
"""
Denest and combine rational expressions using symbolic methods.
This function takes an expression or a container of expressions
and puts it (them) together by denesting and combining rational
subexpressions. No heroic measures are taken to minimize degree
of the resulting numerator and denominator. To obtain completely
reduced expression use :func:`~.cancel`. However, :func:`~.together`
can preserve as much as possible of the structure of the input
expression in the output (no expansion is performed).
A wide variety of objects can be put together including lists,
tuples, sets, relational objects, integrals and others. It is
also possible to transform interior of function applications,
by setting ``deep`` flag to ``True``.
By definition, :func:`~.together` is a complement to :func:`~.apart`,
so ``apart(together(expr))`` should return expr unchanged. Note
however, that :func:`~.together` uses only symbolic methods, so
it might be necessary to use :func:`~.cancel` to perform algebraic
simplification and minimize degree of the numerator and denominator.
Examples
========
>>> from sympy import together, exp
>>> from sympy.abc import x, y, z
>>> together(1/x + 1/y)
(x + y)/(x*y)
>>> together(1/x + 1/y + 1/z)
(x*y + x*z + y*z)/(x*y*z)
>>> together(1/(x*y) + 1/y**2)
(x + y)/(x*y**2)
>>> together(1/(1 + 1/x) + 1/(1 + 1/y))
(x*(y + 1) + y*(x + 1))/((x + 1)*(y + 1))
>>> together(exp(1/x + 1/y))
exp(1/y + 1/x)
>>> together(exp(1/x + 1/y), deep=True)
exp((x + y)/(x*y))
>>> together(1/exp(x) + 1/(x*exp(x)))
(x + 1)*exp(-x)/x
>>> together(1/exp(2*x) + 1/(x*exp(3*x)))
(x*exp(x) + 1)*exp(-3*x)/x
"""
def _together(expr):
if isinstance(expr, Basic):
if expr.is_Atom or (expr.is_Function and not deep):
return expr
elif expr.is_Add:
return gcd_terms(list(map(_together, Add.make_args(expr))), fraction=fraction)
elif expr.is_Pow:
base = _together(expr.base)
if deep:
exp = _together(expr.exp)
else:
exp = expr.exp
return expr.__class__(base, exp)
else:
return expr.__class__(*[ _together(arg) for arg in expr.args ])
elif iterable(expr):
return expr.__class__([ _together(ex) for ex in expr ])
return expr
return _together(sympify(expr))
|
022e174279a921aa587fbcef533ae90a86c4429a467b625f578ed493a273a800 | """Euclidean algorithms, GCDs, LCMs and polynomial remainder sequences. """
from sympy.ntheory import nextprime
from sympy.polys.densearith import (
dup_sub_mul,
dup_neg, dmp_neg,
dmp_add,
dmp_sub,
dup_mul, dmp_mul,
dmp_pow,
dup_div, dmp_div,
dup_rem,
dup_quo, dmp_quo,
dup_prem, dmp_prem,
dup_mul_ground, dmp_mul_ground,
dmp_mul_term,
dup_quo_ground, dmp_quo_ground,
dup_max_norm, dmp_max_norm)
from sympy.polys.densebasic import (
dup_strip, dmp_raise,
dmp_zero, dmp_one, dmp_ground,
dmp_one_p, dmp_zero_p,
dmp_zeros,
dup_degree, dmp_degree, dmp_degree_in,
dup_LC, dmp_LC, dmp_ground_LC,
dmp_multi_deflate, dmp_inflate,
dup_convert, dmp_convert,
dmp_apply_pairs)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_diff, dmp_diff,
dup_eval, dmp_eval, dmp_eval_in,
dup_trunc, dmp_ground_trunc,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_ground_primitive,
dup_extract, dmp_ground_extract)
from sympy.polys.galoistools import (
gf_int, gf_crt)
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
HeuristicGCDFailed,
HomomorphismFailed,
NotInvertible,
DomainError)
def dup_half_gcdex(f, g, K):
"""
Half extended Euclidean algorithm in `F[x]`.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> R.dup_half_gcdex(f, g)
(-1/5*x + 3/5, x + 1)
"""
if not K.is_Field:
raise DomainError("Cannot compute half extended GCD over %s" % K)
a, b = [K.one], []
while g:
q, r = dup_div(f, g, K)
f, g = g, r
a, b = b, dup_sub_mul(a, q, b, K)
a = dup_quo_ground(a, dup_LC(f, K), K)
f = dup_monic(f, K)
return a, f
def dmp_half_gcdex(f, g, u, K):
"""
Half extended Euclidean algorithm in `F[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
"""
if not u:
return dup_half_gcdex(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_gcdex(f, g, K):
"""
Extended Euclidean algorithm in `F[x]`.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> R.dup_gcdex(f, g)
(-1/5*x + 3/5, 1/5*x**2 - 6/5*x + 2, x + 1)
"""
s, h = dup_half_gcdex(f, g, K)
F = dup_sub_mul(h, s, f, K)
t = dup_quo(F, g, K)
return s, t, h
def dmp_gcdex(f, g, u, K):
"""
Extended Euclidean algorithm in `F[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
"""
if not u:
return dup_gcdex(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_invert(f, g, K):
"""
Compute multiplicative inverse of `f` modulo `g` in `F[x]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = x**2 - 1
>>> g = 2*x - 1
>>> h = x - 1
>>> R.dup_invert(f, g)
-4/3
>>> R.dup_invert(f, h)
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
s, h = dup_half_gcdex(f, g, K)
if h == [K.one]:
return dup_rem(s, g, K)
else:
raise NotInvertible("zero divisor")
def dmp_invert(f, g, u, K):
"""
Compute multiplicative inverse of `f` modulo `g` in `F[X]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
"""
if not u:
return dup_invert(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_euclidean_prs(f, g, K):
"""
Euclidean polynomial remainder sequence (PRS) in `K[x]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
>>> g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
>>> prs = R.dup_euclidean_prs(f, g)
>>> prs[0]
x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
>>> prs[1]
3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
>>> prs[2]
-5/9*x**4 + 1/9*x**2 - 1/3
>>> prs[3]
-117/25*x**2 - 9*x + 441/25
>>> prs[4]
233150/19773*x - 102500/6591
>>> prs[5]
-1288744821/543589225
"""
prs = [f, g]
h = dup_rem(f, g, K)
while h:
prs.append(h)
f, g = g, h
h = dup_rem(f, g, K)
return prs
def dmp_euclidean_prs(f, g, u, K):
"""
Euclidean polynomial remainder sequence (PRS) in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
"""
if not u:
return dup_euclidean_prs(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_primitive_prs(f, g, K):
"""
Primitive polynomial remainder sequence (PRS) in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
>>> g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
>>> prs = R.dup_primitive_prs(f, g)
>>> prs[0]
x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
>>> prs[1]
3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
>>> prs[2]
-5*x**4 + x**2 - 3
>>> prs[3]
13*x**2 + 25*x - 49
>>> prs[4]
4663*x - 6150
>>> prs[5]
1
"""
prs = [f, g]
_, h = dup_primitive(dup_prem(f, g, K), K)
while h:
prs.append(h)
f, g = g, h
_, h = dup_primitive(dup_prem(f, g, K), K)
return prs
def dmp_primitive_prs(f, g, u, K):
"""
Primitive polynomial remainder sequence (PRS) in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
"""
if not u:
return dup_primitive_prs(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_inner_subresultants(f, g, K):
"""
Subresultant PRS algorithm in `K[x]`.
Computes the subresultant polynomial remainder sequence (PRS)
and the non-zero scalar subresultants of `f` and `g`.
By [1] Thm. 3, these are the constants '-c' (- to optimize
computation of sign).
The first subdeterminant is set to 1 by convention to match
the polynomial and the scalar subdeterminants.
If 'deg(f) < deg(g)', the subresultants of '(g,f)' are computed.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_inner_subresultants(x**2 + 1, x**2 - 1)
([x**2 + 1, x**2 - 1, -2], [1, 1, 4])
References
==========
.. [1] W.S. Brown, The Subresultant PRS Algorithm.
ACM Transaction of Mathematical Software 4 (1978) 237-249
"""
n = dup_degree(f)
m = dup_degree(g)
if n < m:
f, g = g, f
n, m = m, n
if not f:
return [], []
if not g:
return [f], [K.one]
R = [f, g]
d = n - m
b = (-K.one)**(d + 1)
h = dup_prem(f, g, K)
h = dup_mul_ground(h, b, K)
lc = dup_LC(g, K)
c = lc**d
# Conventional first scalar subdeterminant is 1
S = [K.one, c]
c = -c
while h:
k = dup_degree(h)
R.append(h)
f, g, m, d = g, h, k, m - k
b = -lc * c**d
h = dup_prem(f, g, K)
h = dup_quo_ground(h, b, K)
lc = dup_LC(g, K)
if d > 1: # abnormal case
q = c**(d - 1)
c = K.quo((-lc)**d, q)
else:
c = -lc
S.append(-c)
return R, S
def dup_subresultants(f, g, K):
"""
Computes subresultant PRS of two polynomials in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
return dup_inner_subresultants(f, g, K)[0]
def dup_prs_resultant(f, g, K):
"""
Resultant algorithm in `K[x]` using subresultant PRS.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_prs_resultant(x**2 + 1, x**2 - 1)
(4, [x**2 + 1, x**2 - 1, -2])
"""
if not f or not g:
return (K.zero, [])
R, S = dup_inner_subresultants(f, g, K)
if dup_degree(R[-1]) > 0:
return (K.zero, R)
return S[-1], R
def dup_resultant(f, g, K, includePRS=False):
"""
Computes resultant of two polynomials in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_resultant(x**2 + 1, x**2 - 1)
4
"""
if includePRS:
return dup_prs_resultant(f, g, K)
return dup_prs_resultant(f, g, K)[0]
def dmp_inner_subresultants(f, g, u, K):
"""
Subresultant PRS algorithm in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> a = 3*x*y**4 + y**3 - 27*y + 4
>>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
>>> prs = [f, g, a, b]
>>> sres = [[1], [1], [3, 0, 0, 0, 0], [-3, 0, 0, -12, 1, 0, -54, 8, 729, -216, 16]]
>>> R.dmp_inner_subresultants(f, g) == (prs, sres)
True
"""
if not u:
return dup_inner_subresultants(f, g, K)
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < m:
f, g = g, f
n, m = m, n
if dmp_zero_p(f, u):
return [], []
v = u - 1
if dmp_zero_p(g, u):
return [f], [dmp_ground(K.one, v)]
R = [f, g]
d = n - m
b = dmp_pow(dmp_ground(-K.one, v), d + 1, v, K)
h = dmp_prem(f, g, u, K)
h = dmp_mul_term(h, b, 0, u, K)
lc = dmp_LC(g, K)
c = dmp_pow(lc, d, v, K)
S = [dmp_ground(K.one, v), c]
c = dmp_neg(c, v, K)
while not dmp_zero_p(h, u):
k = dmp_degree(h, u)
R.append(h)
f, g, m, d = g, h, k, m - k
b = dmp_mul(dmp_neg(lc, v, K),
dmp_pow(c, d, v, K), v, K)
h = dmp_prem(f, g, u, K)
h = [ dmp_quo(ch, b, v, K) for ch in h ]
lc = dmp_LC(g, K)
if d > 1:
p = dmp_pow(dmp_neg(lc, v, K), d, v, K)
q = dmp_pow(c, d - 1, v, K)
c = dmp_quo(p, q, v, K)
else:
c = dmp_neg(lc, v, K)
S.append(dmp_neg(c, v, K))
return R, S
def dmp_subresultants(f, g, u, K):
"""
Computes subresultant PRS of two polynomials in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> a = 3*x*y**4 + y**3 - 27*y + 4
>>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
>>> R.dmp_subresultants(f, g) == [f, g, a, b]
True
"""
return dmp_inner_subresultants(f, g, u, K)[0]
def dmp_prs_resultant(f, g, u, K):
"""
Resultant algorithm in `K[X]` using subresultant PRS.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> a = 3*x*y**4 + y**3 - 27*y + 4
>>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
>>> res, prs = R.dmp_prs_resultant(f, g)
>>> res == b # resultant has n-1 variables
False
>>> res == b.drop(x)
True
>>> prs == [f, g, a, b]
True
"""
if not u:
return dup_prs_resultant(f, g, K)
if dmp_zero_p(f, u) or dmp_zero_p(g, u):
return (dmp_zero(u - 1), [])
R, S = dmp_inner_subresultants(f, g, u, K)
if dmp_degree(R[-1], u) > 0:
return (dmp_zero(u - 1), R)
return S[-1], R
def dmp_zz_modular_resultant(f, g, p, u, K):
"""
Compute resultant of `f` and `g` modulo a prime `p`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x + y + 2
>>> g = 2*x*y + x + 3
>>> R.dmp_zz_modular_resultant(f, g, 5)
-2*y**2 + 1
"""
if not u:
return gf_int(dup_prs_resultant(f, g, K)[0] % p, p)
v = u - 1
n = dmp_degree(f, u)
m = dmp_degree(g, u)
N = dmp_degree_in(f, 1, u)
M = dmp_degree_in(g, 1, u)
B = n*M + m*N
D, a = [K.one], -K.one
r = dmp_zero(v)
while dup_degree(D) <= B:
while True:
a += K.one
if a == p:
raise HomomorphismFailed('no luck')
F = dmp_eval_in(f, gf_int(a, p), 1, u, K)
if dmp_degree(F, v) == n:
G = dmp_eval_in(g, gf_int(a, p), 1, u, K)
if dmp_degree(G, v) == m:
break
R = dmp_zz_modular_resultant(F, G, p, v, K)
e = dmp_eval(r, a, v, K)
if not v:
R = dup_strip([R])
e = dup_strip([e])
else:
R = [R]
e = [e]
d = K.invert(dup_eval(D, a, K), p)
d = dup_mul_ground(D, d, K)
d = dmp_raise(d, v, 0, K)
c = dmp_mul(d, dmp_sub(R, e, v, K), v, K)
r = dmp_add(r, c, v, K)
r = dmp_ground_trunc(r, p, v, K)
D = dup_mul(D, [K.one, -a], K)
D = dup_trunc(D, p, K)
return r
def _collins_crt(r, R, P, p, K):
"""Wrapper of CRT for Collins's resultant algorithm. """
return gf_int(gf_crt([r, R], [P, p], K), P*p)
def dmp_zz_collins_resultant(f, g, u, K):
"""
Collins's modular resultant algorithm in `Z[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x + y + 2
>>> g = 2*x*y + x + 3
>>> R.dmp_zz_collins_resultant(f, g)
-2*y**2 - 5*y + 1
"""
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < 0 or m < 0:
return dmp_zero(u - 1)
A = dmp_max_norm(f, u, K)
B = dmp_max_norm(g, u, K)
a = dmp_ground_LC(f, u, K)
b = dmp_ground_LC(g, u, K)
v = u - 1
B = K(2)*K.factorial(K(n + m))*A**m*B**n
r, p, P = dmp_zero(v), K.one, K.one
while P <= B:
p = K(nextprime(p))
while not (a % p) or not (b % p):
p = K(nextprime(p))
F = dmp_ground_trunc(f, p, u, K)
G = dmp_ground_trunc(g, p, u, K)
try:
R = dmp_zz_modular_resultant(F, G, p, u, K)
except HomomorphismFailed:
continue
if K.is_one(P):
r = R
else:
r = dmp_apply_pairs(r, R, _collins_crt, (P, p, K), v, K)
P *= p
return r
def dmp_qq_collins_resultant(f, g, u, K0):
"""
Collins's modular resultant algorithm in `Q[X]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> f = QQ(1,2)*x + y + QQ(2,3)
>>> g = 2*x*y + x + 3
>>> R.dmp_qq_collins_resultant(f, g)
-2*y**2 - 7/3*y + 5/6
"""
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < 0 or m < 0:
return dmp_zero(u - 1)
K1 = K0.get_ring()
cf, f = dmp_clear_denoms(f, u, K0, K1)
cg, g = dmp_clear_denoms(g, u, K0, K1)
f = dmp_convert(f, u, K0, K1)
g = dmp_convert(g, u, K0, K1)
r = dmp_zz_collins_resultant(f, g, u, K1)
r = dmp_convert(r, u - 1, K1, K0)
c = K0.convert(cf**m * cg**n, K1)
return dmp_quo_ground(r, c, u - 1, K0)
def dmp_resultant(f, g, u, K, includePRS=False):
"""
Computes resultant of two polynomials in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> R.dmp_resultant(f, g)
-3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
"""
if not u:
return dup_resultant(f, g, K, includePRS=includePRS)
if includePRS:
return dmp_prs_resultant(f, g, u, K)
if K.is_Field:
if K.is_QQ and query('USE_COLLINS_RESULTANT'):
return dmp_qq_collins_resultant(f, g, u, K)
else:
if K.is_ZZ and query('USE_COLLINS_RESULTANT'):
return dmp_zz_collins_resultant(f, g, u, K)
return dmp_prs_resultant(f, g, u, K)[0]
def dup_discriminant(f, K):
"""
Computes discriminant of a polynomial in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_discriminant(x**2 + 2*x + 3)
-8
"""
d = dup_degree(f)
if d <= 0:
return K.zero
else:
s = (-1)**((d*(d - 1)) // 2)
c = dup_LC(f, K)
r = dup_resultant(f, dup_diff(f, 1, K), K)
return K.quo(r, c*K(s))
def dmp_discriminant(f, u, K):
"""
Computes discriminant of a polynomial in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y,z,t = ring("x,y,z,t", ZZ)
>>> R.dmp_discriminant(x**2*y + x*z + t)
-4*y*t + z**2
"""
if not u:
return dup_discriminant(f, K)
d, v = dmp_degree(f, u), u - 1
if d <= 0:
return dmp_zero(v)
else:
s = (-1)**((d*(d - 1)) // 2)
c = dmp_LC(f, K)
r = dmp_resultant(f, dmp_diff(f, 1, u, K), u, K)
c = dmp_mul_ground(c, K(s), v, K)
return dmp_quo(r, c, v, K)
def _dup_rr_trivial_gcd(f, g, K):
"""Handle trivial cases in GCD algorithm over a ring. """
if not (f or g):
return [], [], []
elif not f:
if K.is_nonnegative(dup_LC(g, K)):
return g, [], [K.one]
else:
return dup_neg(g, K), [], [-K.one]
elif not g:
if K.is_nonnegative(dup_LC(f, K)):
return f, [K.one], []
else:
return dup_neg(f, K), [-K.one], []
return None
def _dup_ff_trivial_gcd(f, g, K):
"""Handle trivial cases in GCD algorithm over a field. """
if not (f or g):
return [], [], []
elif not f:
return dup_monic(g, K), [], [dup_LC(g, K)]
elif not g:
return dup_monic(f, K), [dup_LC(f, K)], []
else:
return None
def _dmp_rr_trivial_gcd(f, g, u, K):
"""Handle trivial cases in GCD algorithm over a ring. """
zero_f = dmp_zero_p(f, u)
zero_g = dmp_zero_p(g, u)
if_contain_one = dmp_one_p(f, u, K) or dmp_one_p(g, u, K)
if zero_f and zero_g:
return tuple(dmp_zeros(3, u, K))
elif zero_f:
if K.is_nonnegative(dmp_ground_LC(g, u, K)):
return g, dmp_zero(u), dmp_one(u, K)
else:
return dmp_neg(g, u, K), dmp_zero(u), dmp_ground(-K.one, u)
elif zero_g:
if K.is_nonnegative(dmp_ground_LC(f, u, K)):
return f, dmp_one(u, K), dmp_zero(u)
else:
return dmp_neg(f, u, K), dmp_ground(-K.one, u), dmp_zero(u)
elif if_contain_one:
return dmp_one(u, K), f, g
elif query('USE_SIMPLIFY_GCD'):
return _dmp_simplify_gcd(f, g, u, K)
else:
return None
def _dmp_ff_trivial_gcd(f, g, u, K):
"""Handle trivial cases in GCD algorithm over a field. """
zero_f = dmp_zero_p(f, u)
zero_g = dmp_zero_p(g, u)
if zero_f and zero_g:
return tuple(dmp_zeros(3, u, K))
elif zero_f:
return (dmp_ground_monic(g, u, K),
dmp_zero(u),
dmp_ground(dmp_ground_LC(g, u, K), u))
elif zero_g:
return (dmp_ground_monic(f, u, K),
dmp_ground(dmp_ground_LC(f, u, K), u),
dmp_zero(u))
elif query('USE_SIMPLIFY_GCD'):
return _dmp_simplify_gcd(f, g, u, K)
else:
return None
def _dmp_simplify_gcd(f, g, u, K):
"""Try to eliminate `x_0` from GCD computation in `K[X]`. """
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if df > 0 and dg > 0:
return None
if not (df or dg):
F = dmp_LC(f, K)
G = dmp_LC(g, K)
else:
if not df:
F = dmp_LC(f, K)
G = dmp_content(g, u, K)
else:
F = dmp_content(f, u, K)
G = dmp_LC(g, K)
v = u - 1
h = dmp_gcd(F, G, v, K)
cff = [ dmp_quo(cf, h, v, K) for cf in f ]
cfg = [ dmp_quo(cg, h, v, K) for cg in g ]
return [h], cff, cfg
def dup_rr_prs_gcd(f, g, K):
"""
Computes polynomial GCD using subresultants over a ring.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``,
and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rr_prs_gcd(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
result = _dup_rr_trivial_gcd(f, g, K)
if result is not None:
return result
fc, F = dup_primitive(f, K)
gc, G = dup_primitive(g, K)
c = K.gcd(fc, gc)
h = dup_subresultants(F, G, K)[-1]
_, h = dup_primitive(h, K)
c *= K.canonical_unit(dup_LC(h, K))
h = dup_mul_ground(h, c, K)
cff = dup_quo(f, h, K)
cfg = dup_quo(g, h, K)
return h, cff, cfg
def dup_ff_prs_gcd(f, g, K):
"""
Computes polynomial GCD using subresultants over a field.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``,
and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_ff_prs_gcd(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
result = _dup_ff_trivial_gcd(f, g, K)
if result is not None:
return result
h = dup_subresultants(f, g, K)[-1]
h = dup_monic(h, K)
cff = dup_quo(f, h, K)
cfg = dup_quo(g, h, K)
return h, cff, cfg
def dmp_rr_prs_gcd(f, g, u, K):
"""
Computes polynomial GCD using subresultants over a ring.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``,
and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_rr_prs_gcd(f, g)
(x + y, x + y, x)
"""
if not u:
return dup_rr_prs_gcd(f, g, K)
result = _dmp_rr_trivial_gcd(f, g, u, K)
if result is not None:
return result
fc, F = dmp_primitive(f, u, K)
gc, G = dmp_primitive(g, u, K)
h = dmp_subresultants(F, G, u, K)[-1]
c, _, _ = dmp_rr_prs_gcd(fc, gc, u - 1, K)
if K.is_negative(dmp_ground_LC(h, u, K)):
h = dmp_neg(h, u, K)
_, h = dmp_primitive(h, u, K)
h = dmp_mul_term(h, c, 0, u, K)
cff = dmp_quo(f, h, u, K)
cfg = dmp_quo(g, h, u, K)
return h, cff, cfg
def dmp_ff_prs_gcd(f, g, u, K):
"""
Computes polynomial GCD using subresultants over a field.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``,
and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y, = ring("x,y", QQ)
>>> f = QQ(1,2)*x**2 + x*y + QQ(1,2)*y**2
>>> g = x**2 + x*y
>>> R.dmp_ff_prs_gcd(f, g)
(x + y, 1/2*x + 1/2*y, x)
"""
if not u:
return dup_ff_prs_gcd(f, g, K)
result = _dmp_ff_trivial_gcd(f, g, u, K)
if result is not None:
return result
fc, F = dmp_primitive(f, u, K)
gc, G = dmp_primitive(g, u, K)
h = dmp_subresultants(F, G, u, K)[-1]
c, _, _ = dmp_ff_prs_gcd(fc, gc, u - 1, K)
_, h = dmp_primitive(h, u, K)
h = dmp_mul_term(h, c, 0, u, K)
h = dmp_ground_monic(h, u, K)
cff = dmp_quo(f, h, u, K)
cfg = dmp_quo(g, h, u, K)
return h, cff, cfg
HEU_GCD_MAX = 6
def _dup_zz_gcd_interpolate(h, x, K):
"""Interpolate polynomial GCD from integer GCD. """
f = []
while h:
g = h % x
if g > x // 2:
g -= x
f.insert(0, g)
h = (h - g) // x
return f
def dup_zz_heu_gcd(f, g, K):
"""
Heuristic polynomial GCD in `Z[x]`.
Given univariate polynomials `f` and `g` in `Z[x]`, returns
their GCD and cofactors, i.e. polynomials ``h``, ``cff`` and ``cfg``
such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
f and g at certain points and computing (fast) integer GCD of those
evaluations. The polynomial GCD is recovered from the integer image
by interpolation. The final step is to verify if the result is the
correct GCD. This gives cofactors as a side effect.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_zz_heu_gcd(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
References
==========
.. [1] [Liao95]_
"""
result = _dup_rr_trivial_gcd(f, g, K)
if result is not None:
return result
df = dup_degree(f)
dg = dup_degree(g)
gcd, f, g = dup_extract(f, g, K)
if df == 0 or dg == 0:
return [gcd], f, g
f_norm = dup_max_norm(f, K)
g_norm = dup_max_norm(g, K)
B = K(2*min(f_norm, g_norm) + 29)
x = max(min(B, 99*K.sqrt(B)),
2*min(f_norm // abs(dup_LC(f, K)),
g_norm // abs(dup_LC(g, K))) + 2)
for i in range(0, HEU_GCD_MAX):
ff = dup_eval(f, x, K)
gg = dup_eval(g, x, K)
if ff and gg:
h = K.gcd(ff, gg)
cff = ff // h
cfg = gg // h
h = _dup_zz_gcd_interpolate(h, x, K)
h = dup_primitive(h, K)[1]
cff_, r = dup_div(f, h, K)
if not r:
cfg_, r = dup_div(g, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff_, cfg_
cff = _dup_zz_gcd_interpolate(cff, x, K)
h, r = dup_div(f, cff, K)
if not r:
cfg_, r = dup_div(g, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff, cfg_
cfg = _dup_zz_gcd_interpolate(cfg, x, K)
h, r = dup_div(g, cfg, K)
if not r:
cff_, r = dup_div(f, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff_, cfg
x = 73794*x * K.sqrt(K.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
def _dmp_zz_gcd_interpolate(h, x, v, K):
"""Interpolate polynomial GCD from integer GCD. """
f = []
while not dmp_zero_p(h, v):
g = dmp_ground_trunc(h, x, v, K)
f.insert(0, g)
h = dmp_sub(h, g, v, K)
h = dmp_quo_ground(h, x, v, K)
if K.is_negative(dmp_ground_LC(f, v + 1, K)):
return dmp_neg(f, v + 1, K)
else:
return f
def dmp_zz_heu_gcd(f, g, u, K):
"""
Heuristic polynomial GCD in `Z[X]`.
Given univariate polynomials `f` and `g` in `Z[X]`, returns
their GCD and cofactors, i.e. polynomials ``h``, ``cff`` and ``cfg``
such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
f and g at certain points and computing (fast) integer GCD of those
evaluations. The polynomial GCD is recovered from the integer image
by interpolation. The evaluation process reduces f and g variable by
variable into a large integer. The final step is to verify if the
interpolated polynomial is the correct GCD. This gives cofactors of
the input polynomials as a side effect.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_zz_heu_gcd(f, g)
(x + y, x + y, x)
References
==========
.. [1] [Liao95]_
"""
if not u:
return dup_zz_heu_gcd(f, g, K)
result = _dmp_rr_trivial_gcd(f, g, u, K)
if result is not None:
return result
gcd, f, g = dmp_ground_extract(f, g, u, K)
f_norm = dmp_max_norm(f, u, K)
g_norm = dmp_max_norm(g, u, K)
B = K(2*min(f_norm, g_norm) + 29)
x = max(min(B, 99*K.sqrt(B)),
2*min(f_norm // abs(dmp_ground_LC(f, u, K)),
g_norm // abs(dmp_ground_LC(g, u, K))) + 2)
for i in range(0, HEU_GCD_MAX):
ff = dmp_eval(f, x, u, K)
gg = dmp_eval(g, x, u, K)
v = u - 1
if not (dmp_zero_p(ff, v) or dmp_zero_p(gg, v)):
h, cff, cfg = dmp_zz_heu_gcd(ff, gg, v, K)
h = _dmp_zz_gcd_interpolate(h, x, v, K)
h = dmp_ground_primitive(h, u, K)[1]
cff_, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
cfg_, r = dmp_div(g, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff_, cfg_
cff = _dmp_zz_gcd_interpolate(cff, x, v, K)
h, r = dmp_div(f, cff, u, K)
if dmp_zero_p(r, u):
cfg_, r = dmp_div(g, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff, cfg_
cfg = _dmp_zz_gcd_interpolate(cfg, x, v, K)
h, r = dmp_div(g, cfg, u, K)
if dmp_zero_p(r, u):
cff_, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff_, cfg
x = 73794*x * K.sqrt(K.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
def dup_qq_heu_gcd(f, g, K0):
"""
Heuristic polynomial GCD in `Q[x]`.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``,
``cff = quo(f, h)``, and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = QQ(1,2)*x**2 + QQ(7,4)*x + QQ(3,2)
>>> g = QQ(1,2)*x**2 + x
>>> R.dup_qq_heu_gcd(f, g)
(x + 2, 1/2*x + 3/4, 1/2*x)
"""
result = _dup_ff_trivial_gcd(f, g, K0)
if result is not None:
return result
K1 = K0.get_ring()
cf, f = dup_clear_denoms(f, K0, K1)
cg, g = dup_clear_denoms(g, K0, K1)
f = dup_convert(f, K0, K1)
g = dup_convert(g, K0, K1)
h, cff, cfg = dup_zz_heu_gcd(f, g, K1)
h = dup_convert(h, K1, K0)
c = dup_LC(h, K0)
h = dup_monic(h, K0)
cff = dup_convert(cff, K1, K0)
cfg = dup_convert(cfg, K1, K0)
cff = dup_mul_ground(cff, K0.quo(c, cf), K0)
cfg = dup_mul_ground(cfg, K0.quo(c, cg), K0)
return h, cff, cfg
def dmp_qq_heu_gcd(f, g, u, K0):
"""
Heuristic polynomial GCD in `Q[X]`.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``,
``cff = quo(f, h)``, and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y, = ring("x,y", QQ)
>>> f = QQ(1,4)*x**2 + x*y + y**2
>>> g = QQ(1,2)*x**2 + x*y
>>> R.dmp_qq_heu_gcd(f, g)
(x + 2*y, 1/4*x + 1/2*y, 1/2*x)
"""
result = _dmp_ff_trivial_gcd(f, g, u, K0)
if result is not None:
return result
K1 = K0.get_ring()
cf, f = dmp_clear_denoms(f, u, K0, K1)
cg, g = dmp_clear_denoms(g, u, K0, K1)
f = dmp_convert(f, u, K0, K1)
g = dmp_convert(g, u, K0, K1)
h, cff, cfg = dmp_zz_heu_gcd(f, g, u, K1)
h = dmp_convert(h, u, K1, K0)
c = dmp_ground_LC(h, u, K0)
h = dmp_ground_monic(h, u, K0)
cff = dmp_convert(cff, u, K1, K0)
cfg = dmp_convert(cfg, u, K1, K0)
cff = dmp_mul_ground(cff, K0.quo(c, cf), u, K0)
cfg = dmp_mul_ground(cfg, K0.quo(c, cg), u, K0)
return h, cff, cfg
def dup_inner_gcd(f, g, K):
"""
Computes polynomial GCD and cofactors of `f` and `g` in `K[x]`.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``,
``cff = quo(f, h)``, and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_inner_gcd(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
if not K.is_Exact:
try:
exact = K.get_exact()
except DomainError:
return [K.one], f, g
f = dup_convert(f, K, exact)
g = dup_convert(g, K, exact)
h, cff, cfg = dup_inner_gcd(f, g, exact)
h = dup_convert(h, exact, K)
cff = dup_convert(cff, exact, K)
cfg = dup_convert(cfg, exact, K)
return h, cff, cfg
elif K.is_Field:
if K.is_QQ and query('USE_HEU_GCD'):
try:
return dup_qq_heu_gcd(f, g, K)
except HeuristicGCDFailed:
pass
return dup_ff_prs_gcd(f, g, K)
else:
if K.is_ZZ and query('USE_HEU_GCD'):
try:
return dup_zz_heu_gcd(f, g, K)
except HeuristicGCDFailed:
pass
return dup_rr_prs_gcd(f, g, K)
def _dmp_inner_gcd(f, g, u, K):
"""Helper function for `dmp_inner_gcd()`. """
if not K.is_Exact:
try:
exact = K.get_exact()
except DomainError:
return dmp_one(u, K), f, g
f = dmp_convert(f, u, K, exact)
g = dmp_convert(g, u, K, exact)
h, cff, cfg = _dmp_inner_gcd(f, g, u, exact)
h = dmp_convert(h, u, exact, K)
cff = dmp_convert(cff, u, exact, K)
cfg = dmp_convert(cfg, u, exact, K)
return h, cff, cfg
elif K.is_Field:
if K.is_QQ and query('USE_HEU_GCD'):
try:
return dmp_qq_heu_gcd(f, g, u, K)
except HeuristicGCDFailed:
pass
return dmp_ff_prs_gcd(f, g, u, K)
else:
if K.is_ZZ and query('USE_HEU_GCD'):
try:
return dmp_zz_heu_gcd(f, g, u, K)
except HeuristicGCDFailed:
pass
return dmp_rr_prs_gcd(f, g, u, K)
def dmp_inner_gcd(f, g, u, K):
"""
Computes polynomial GCD and cofactors of `f` and `g` in `K[X]`.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``,
``cff = quo(f, h)``, and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_inner_gcd(f, g)
(x + y, x + y, x)
"""
if not u:
return dup_inner_gcd(f, g, K)
J, (f, g) = dmp_multi_deflate((f, g), u, K)
h, cff, cfg = _dmp_inner_gcd(f, g, u, K)
return (dmp_inflate(h, J, u, K),
dmp_inflate(cff, J, u, K),
dmp_inflate(cfg, J, u, K))
def dup_gcd(f, g, K):
"""
Computes polynomial GCD of `f` and `g` in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
return dup_inner_gcd(f, g, K)[0]
def dmp_gcd(f, g, u, K):
"""
Computes polynomial GCD of `f` and `g` in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_gcd(f, g)
x + y
"""
return dmp_inner_gcd(f, g, u, K)[0]
def dup_rr_lcm(f, g, K):
"""
Computes polynomial LCM over a ring in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rr_lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
fc, f = dup_primitive(f, K)
gc, g = dup_primitive(g, K)
c = K.lcm(fc, gc)
h = dup_quo(dup_mul(f, g, K),
dup_gcd(f, g, K), K)
return dup_mul_ground(h, c, K)
def dup_ff_lcm(f, g, K):
"""
Computes polynomial LCM over a field in `K[x]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = QQ(1,2)*x**2 + QQ(7,4)*x + QQ(3,2)
>>> g = QQ(1,2)*x**2 + x
>>> R.dup_ff_lcm(f, g)
x**3 + 7/2*x**2 + 3*x
"""
h = dup_quo(dup_mul(f, g, K),
dup_gcd(f, g, K), K)
return dup_monic(h, K)
def dup_lcm(f, g, K):
"""
Computes polynomial LCM of `f` and `g` in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if K.is_Field:
return dup_ff_lcm(f, g, K)
else:
return dup_rr_lcm(f, g, K)
def dmp_rr_lcm(f, g, u, K):
"""
Computes polynomial LCM over a ring in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_rr_lcm(f, g)
x**3 + 2*x**2*y + x*y**2
"""
fc, f = dmp_ground_primitive(f, u, K)
gc, g = dmp_ground_primitive(g, u, K)
c = K.lcm(fc, gc)
h = dmp_quo(dmp_mul(f, g, u, K),
dmp_gcd(f, g, u, K), u, K)
return dmp_mul_ground(h, c, u, K)
def dmp_ff_lcm(f, g, u, K):
"""
Computes polynomial LCM over a field in `K[X]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y, = ring("x,y", QQ)
>>> f = QQ(1,4)*x**2 + x*y + y**2
>>> g = QQ(1,2)*x**2 + x*y
>>> R.dmp_ff_lcm(f, g)
x**3 + 4*x**2*y + 4*x*y**2
"""
h = dmp_quo(dmp_mul(f, g, u, K),
dmp_gcd(f, g, u, K), u, K)
return dmp_ground_monic(h, u, K)
def dmp_lcm(f, g, u, K):
"""
Computes polynomial LCM of `f` and `g` in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_lcm(f, g)
x**3 + 2*x**2*y + x*y**2
"""
if not u:
return dup_lcm(f, g, K)
if K.is_Field:
return dmp_ff_lcm(f, g, u, K)
else:
return dmp_rr_lcm(f, g, u, K)
def dmp_content(f, u, K):
"""
Returns GCD of multivariate coefficients.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> R.dmp_content(2*x*y + 6*x + 4*y + 12)
2*y + 6
"""
cont, v = dmp_LC(f, K), u - 1
if dmp_zero_p(f, u):
return cont
for c in f[1:]:
cont = dmp_gcd(cont, c, v, K)
if dmp_one_p(cont, v, K):
break
if K.is_negative(dmp_ground_LC(cont, v, K)):
return dmp_neg(cont, v, K)
else:
return cont
def dmp_primitive(f, u, K):
"""
Returns multivariate content and a primitive polynomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> R.dmp_primitive(2*x*y + 6*x + 4*y + 12)
(2*y + 6, x + 2)
"""
cont, v = dmp_content(f, u, K), u - 1
if dmp_zero_p(f, u) or dmp_one_p(cont, v, K):
return cont, f
else:
return cont, [ dmp_quo(c, cont, v, K) for c in f ]
def dup_cancel(f, g, K, include=True):
"""
Cancel common factors in a rational function `f/g`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_cancel(2*x**2 - 2, x**2 - 2*x + 1)
(2*x + 2, x - 1)
"""
return dmp_cancel(f, g, 0, K, include=include)
def dmp_cancel(f, g, u, K, include=True):
"""
Cancel common factors in a rational function `f/g`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_cancel(2*x**2 - 2, x**2 - 2*x + 1)
(2*x + 2, x - 1)
"""
K0 = None
if K.is_Field and K.has_assoc_Ring:
K0, K = K, K.get_ring()
cq, f = dmp_clear_denoms(f, u, K0, K, convert=True)
cp, g = dmp_clear_denoms(g, u, K0, K, convert=True)
else:
cp, cq = K.one, K.one
_, p, q = dmp_inner_gcd(f, g, u, K)
if K0 is not None:
_, cp, cq = K.cofactors(cp, cq)
p = dmp_convert(p, u, K, K0)
q = dmp_convert(q, u, K, K0)
K = K0
p_neg = K.is_negative(dmp_ground_LC(p, u, K))
q_neg = K.is_negative(dmp_ground_LC(q, u, K))
if p_neg and q_neg:
p, q = dmp_neg(p, u, K), dmp_neg(q, u, K)
elif p_neg:
cp, p = -cp, dmp_neg(p, u, K)
elif q_neg:
cp, q = -cp, dmp_neg(q, u, K)
if not include:
return cp, cq, p, q
p = dmp_mul_ground(p, cp, u, K)
q = dmp_mul_ground(q, cq, u, K)
return p, q
|
2861e609c1c1d920bc9682a45878c1d15bbda3d2fee28796e2e834c9a22d2cfe | """Tools for constructing domains for expressions. """
from sympy.core import sympify
from sympy.core.evalf import pure_complex
from sympy.core.sorting import ordered
from sympy.polys.domains import ZZ, QQ, ZZ_I, QQ_I, EX
from sympy.polys.domains.complexfield import ComplexField
from sympy.polys.domains.realfield import RealField
from sympy.polys.polyoptions import build_options
from sympy.polys.polyutils import parallel_dict_from_basic
from sympy.utilities import public
def _construct_simple(coeffs, opt):
"""Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains. """
rationals = floats = complexes = algebraics = False
float_numbers = []
if opt.extension is True:
is_algebraic = lambda coeff: coeff.is_number and coeff.is_algebraic
else:
is_algebraic = lambda coeff: False
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
if algebraics:
# there are both reals and algebraics -> EX
return False
else:
floats = True
float_numbers.append(coeff)
else:
is_complex = pure_complex(coeff)
if is_complex:
complexes = True
x, y = is_complex
if x.is_Rational and y.is_Rational:
if not (x.is_Integer and y.is_Integer):
rationals = True
continue
else:
floats = True
if x.is_Float:
float_numbers.append(x)
if y.is_Float:
float_numbers.append(y)
elif is_algebraic(coeff):
if floats:
# there are both algebraics and reals -> EX
return False
algebraics = True
else:
# this is a composite domain, e.g. ZZ[X], EX
return None
# Use the maximum precision of all coefficients for the RR or CC
# precision
max_prec = max(c._prec for c in float_numbers) if float_numbers else 53
if algebraics:
domain, result = _construct_algebraic(coeffs, opt)
else:
if floats and complexes:
domain = ComplexField(prec=max_prec)
elif floats:
domain = RealField(prec=max_prec)
elif rationals or opt.field:
domain = QQ_I if complexes else QQ
else:
domain = ZZ_I if complexes else ZZ
result = [domain.from_sympy(coeff) for coeff in coeffs]
return domain, result
def _construct_algebraic(coeffs, opt):
"""We know that coefficients are algebraic so construct the extension. """
from sympy.polys.numberfields import primitive_element
exts = set()
def build_trees(args):
trees = []
for a in args:
if a.is_Rational:
tree = ('Q', QQ.from_sympy(a))
elif a.is_Add:
tree = ('+', build_trees(a.args))
elif a.is_Mul:
tree = ('*', build_trees(a.args))
else:
tree = ('e', a)
exts.add(a)
trees.append(tree)
return trees
trees = build_trees(coeffs)
exts = list(ordered(exts))
g, span, H = primitive_element(exts, ex=True, polys=True)
root = sum([ s*ext for s, ext in zip(span, exts) ])
domain, g = QQ.algebraic_field((g, root)), g.rep.rep
exts_dom = [domain.dtype.from_list(h, g, QQ) for h in H]
exts_map = dict(zip(exts, exts_dom))
def convert_tree(tree):
op, args = tree
if op == 'Q':
return domain.dtype.from_list([args], g, QQ)
elif op == '+':
return sum((convert_tree(a) for a in args), domain.zero)
elif op == '*':
# return prod(convert(a) for a in args)
t = convert_tree(args[0])
for a in args[1:]:
t *= convert_tree(a)
return t
elif op == 'e':
return exts_map[args]
else:
raise RuntimeError
result = [convert_tree(tree) for tree in trees]
return domain, result
def _construct_composite(coeffs, opt):
"""Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X). """
numers, denoms = [], []
for coeff in coeffs:
numer, denom = coeff.as_numer_denom()
numers.append(numer)
denoms.append(denom)
polys, gens = parallel_dict_from_basic(numers + denoms) # XXX: sorting
if not gens:
return None
if opt.composite is None:
if any(gen.is_number and gen.is_algebraic for gen in gens):
return None # generators are number-like so lets better use EX
all_symbols = set()
for gen in gens:
symbols = gen.free_symbols
if all_symbols & symbols:
return None # there could be algebraic relations between generators
else:
all_symbols |= symbols
n = len(gens)
k = len(polys)//2
numers = polys[:k]
denoms = polys[k:]
if opt.field:
fractions = True
else:
fractions, zeros = False, (0,)*n
for denom in denoms:
if len(denom) > 1 or zeros not in denom:
fractions = True
break
coeffs = set()
if not fractions:
for numer, denom in zip(numers, denoms):
denom = denom[zeros]
for monom, coeff in numer.items():
coeff /= denom
coeffs.add(coeff)
numer[monom] = coeff
else:
for numer, denom in zip(numers, denoms):
coeffs.update(list(numer.values()))
coeffs.update(list(denom.values()))
rationals = floats = complexes = False
float_numbers = []
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
floats = True
float_numbers.append(coeff)
else:
is_complex = pure_complex(coeff)
if is_complex is not None:
complexes = True
x, y = is_complex
if x.is_Rational and y.is_Rational:
if not (x.is_Integer and y.is_Integer):
rationals = True
else:
floats = True
if x.is_Float:
float_numbers.append(x)
if y.is_Float:
float_numbers.append(y)
max_prec = max(c._prec for c in float_numbers) if float_numbers else 53
if floats and complexes:
ground = ComplexField(prec=max_prec)
elif floats:
ground = RealField(prec=max_prec)
elif complexes:
if rationals:
ground = QQ_I
else:
ground = ZZ_I
elif rationals:
ground = QQ
else:
ground = ZZ
result = []
if not fractions:
domain = ground.poly_ring(*gens)
for numer in numers:
for monom, coeff in numer.items():
numer[monom] = ground.from_sympy(coeff)
result.append(domain(numer))
else:
domain = ground.frac_field(*gens)
for numer, denom in zip(numers, denoms):
for monom, coeff in numer.items():
numer[monom] = ground.from_sympy(coeff)
for monom, coeff in denom.items():
denom[monom] = ground.from_sympy(coeff)
result.append(domain((numer, denom)))
return domain, result
def _construct_expression(coeffs, opt):
"""The last resort case, i.e. use the expression domain. """
domain, result = EX, []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return domain, result
@public
def construct_domain(obj, **args):
"""Construct a minimal domain for a list of expressions.
Explanation
===========
Given a list of normal SymPy expressions (of type :py:class:`~.Expr`)
``construct_domain`` will find a minimal :py:class:`~.Domain` that can
represent those expressions. The expressions will be converted to elements
of the domain and both the domain and the domain elements are returned.
Parameters
==========
obj: list or dict
The expressions to build a domain for.
**args: keyword arguments
Options that affect the choice of domain.
Returns
=======
(K, elements): Domain and list of domain elements
The domain K that can represent the expressions and the list or dict
of domain elements representing the same expressions as elements of K.
Examples
========
Given a list of :py:class:`~.Integer` ``construct_domain`` will return the
domain :ref:`ZZ` and a list of integers as elements of :ref:`ZZ`.
>>> from sympy import construct_domain, S
>>> expressions = [S(2), S(3), S(4)]
>>> K, elements = construct_domain(expressions)
>>> K
ZZ
>>> elements
[2, 3, 4]
>>> type(elements[0]) # doctest: +SKIP
<class 'int'>
>>> type(expressions[0])
<class 'sympy.core.numbers.Integer'>
If there are any :py:class:`~.Rational` then :ref:`QQ` is returned
instead.
>>> construct_domain([S(1)/2, S(3)/4])
(QQ, [1/2, 3/4])
If there are symbols then a polynomial ring :ref:`K[x]` is returned.
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> construct_domain([2*x + 1, S(3)/4])
(QQ[x], [2*x + 1, 3/4])
>>> construct_domain([2*x + 1, y])
(ZZ[x,y], [2*x + 1, y])
If any symbols appear with negative powers then a rational function field
:ref:`K(x)` will be returned.
>>> construct_domain([y/x, x/(1 - y)])
(ZZ(x,y), [y/x, -x/(y - 1)])
Irrational algebraic numbers will result in the :ref:`EX` domain by
default. The keyword argument ``extension=True`` leads to the construction
of an algebraic number field :ref:`QQ(a)`.
>>> from sympy import sqrt
>>> construct_domain([sqrt(2)])
(EX, [EX(sqrt(2))])
>>> construct_domain([sqrt(2)], extension=True) # doctest: +SKIP
(QQ<sqrt(2)>, [ANP([1, 0], [1, 0, -2], QQ)])
See also
========
Domain
Expr
"""
opt = build_options(args)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
if not obj:
monoms, coeffs = [], []
else:
monoms, coeffs = list(zip(*list(obj.items())))
else:
coeffs = obj
else:
coeffs = [obj]
coeffs = list(map(sympify, coeffs))
result = _construct_simple(coeffs, opt)
if result is not None:
if result is not False:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
else:
if opt.composite is False:
result = None
else:
result = _construct_composite(coeffs, opt)
if result is not None:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
return domain, dict(list(zip(monoms, coeffs)))
else:
return domain, coeffs
else:
return domain, coeffs[0]
|
116f7034daef682f9cdea5390e74ec99375456df5f207ebd409506713c11a1ce | """Polynomial factorization routines in characteristic zero. """
from sympy.polys.galoistools import (
gf_from_int_poly, gf_to_int_poly,
gf_lshift, gf_add_mul, gf_mul,
gf_div, gf_rem,
gf_gcdex,
gf_sqf_p,
gf_factor_sqf, gf_factor)
from sympy.polys.densebasic import (
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dmp_from_dict,
dmp_zero_p,
dmp_one,
dmp_nest, dmp_raise,
dup_strip,
dmp_ground,
dup_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd)
from sympy.polys.densearith import (
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dmp_pow,
dup_div, dmp_div,
dup_quo, dmp_quo,
dmp_expand,
dmp_add_mul,
dup_sub_mul, dmp_sub_mul,
dup_lshift,
dup_max_norm, dmp_max_norm,
dup_l1_norm,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_trunc, dmp_ground_trunc,
dup_content,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_ground_primitive,
dmp_eval_tail,
dmp_eval_in, dmp_diff_eval_in,
dmp_compose,
dup_shift, dup_mirror)
from sympy.polys.euclidtools import (
dmp_primitive,
dup_inner_gcd, dmp_inner_gcd)
from sympy.polys.sqfreetools import (
dup_sqf_p,
dup_sqf_norm, dmp_sqf_norm,
dup_sqf_part, dmp_sqf_part)
from sympy.polys.polyutils import _sort_factors
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import (
ExtraneousFactors, DomainError, CoercionFailed, EvaluationFailed)
from sympy.ntheory import nextprime, isprime, factorint
from sympy.utilities import subsets
from math import ceil as _ceil, log as _log
def dup_trial_division(f, factors, K):
"""
Determine multiplicities of factors for a univariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dup_div(f, factor, K)
if not r:
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dmp_trial_division(f, factors, u, K):
"""
Determine multiplicities of factors for a multivariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dmp_div(f, factor, u, K)
if dmp_zero_p(r, u):
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dup_zz_mignotte_bound(f, K):
"""
The Knuth-Cohen variant of Mignotte bound for
univariate polynomials in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**3 + 14*x**2 + 56*x + 64
>>> R.dup_zz_mignotte_bound(f)
152
By checking `factor(f)` we can see that max coeff is 8
Also consider a case that `f` is irreducible for example `f = 2*x**2 + 3*x + 4`
To avoid a bug for these cases, we return the bound plus the max coefficient of `f`
>>> f = 2*x**2 + 3*x + 4
>>> R.dup_zz_mignotte_bound(f)
6
Lastly,To see the difference between the new and the old Mignotte bound
consider the irreducible polynomial::
>>> f = 87*x**7 + 4*x**6 + 80*x**5 + 17*x**4 + 9*x**3 + 12*x**2 + 49*x + 26
>>> R.dup_zz_mignotte_bound(f)
744
The new Mignotte bound is 744 whereas the old one (SymPy 1.5.1) is 1937664.
References
==========
..[1] [Abbott2013]_
"""
from sympy.functions.combinatorial.factorials import binomial
d = dup_degree(f)
delta = _ceil(d / 2)
delta2 = _ceil(delta / 2)
# euclidean-norm
eucl_norm = K.sqrt( sum( [cf**2 for cf in f] ) )
# biggest values of binomial coefficients (p. 538 of reference)
t1 = binomial(delta - 1, delta2)
t2 = binomial(delta - 1, delta2 - 1)
lc = K.abs(dup_LC(f, K)) # leading coefficient
bound = t1 * eucl_norm + t2 * lc # (p. 538 of reference)
bound += dup_max_norm(f, K) # add max coeff for irreducible polys
bound = _ceil(bound / 2) * 2 # round up to even integer
return bound
def dmp_zz_mignotte_bound(f, u, K):
"""Mignotte bound for multivariate polynomials in `K[X]`. """
a = dmp_max_norm(f, u, K)
b = abs(dmp_ground_LC(f, u, K))
n = sum(dmp_degree_list(f, u))
return K.sqrt(K(n + 1))*2**n*a*b
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""
One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f = g*h (mod m)
s*g + t*h = 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) = 1
deg(f) = deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f = G*H (mod m**2)
S*G + T*H = 1 (mod m**2)
References
==========
.. [1] [Gathen99]_
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
def dup_zz_hensel_lift(p, f, f_list, l, K):
r"""
Multifactor Hensel lifting in `Z[x]`.
Given a prime `p`, polynomial `f` over `Z[x]` such that `lc(f)`
is a unit modulo `p`, monic pair-wise coprime polynomials `f_i`
over `Z[x]` satisfying::
f = lc(f) f_1 ... f_r (mod p)
and a positive integer `l`, returns a list of monic polynomials
`F_1,\ F_2,\ \dots,\ F_r` satisfying::
f = lc(f) F_1 ... F_r (mod p**l)
F_i = f_i (mod p), i = 1..r
References
==========
.. [1] [Gathen99]_
"""
r = len(f_list)
lc = dup_LC(f, K)
if r == 1:
F = dup_mul_ground(f, K.gcdex(lc, p**l)[0], K)
return [ dup_trunc(F, p**l, K) ]
m = p
k = r // 2
d = int(_ceil(_log(l, 2)))
g = gf_from_int_poly([lc], p)
for f_i in f_list[:k]:
g = gf_mul(g, gf_from_int_poly(f_i, p), p, K)
h = gf_from_int_poly(f_list[k], p)
for f_i in f_list[k + 1:]:
h = gf_mul(h, gf_from_int_poly(f_i, p), p, K)
s, t, _ = gf_gcdex(g, h, p, K)
g = gf_to_int_poly(g, p)
h = gf_to_int_poly(h, p)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
for _ in range(1, d + 1):
(g, h, s, t), m = dup_zz_hensel_step(m, f, g, h, s, t, K), m**2
return dup_zz_hensel_lift(p, g, f_list[:k], l, K) \
+ dup_zz_hensel_lift(p, h, f_list[k:], l, K)
def _test_pl(fc, q, pl):
if q > pl // 2:
q = q - pl
if not q:
return True
return fc % q == 0
def dup_zz_zassenhaus(f, K):
"""Factor primitive square-free polynomials in `Z[x]`. """
n = dup_degree(f)
if n == 1:
return [f]
fc = f[-1]
A = dup_max_norm(f, K)
b = dup_LC(f, K)
B = int(abs(K.sqrt(K(n + 1))*2**n*A*b))
C = int((n + 1)**(2*n)*A**(2*n - 1))
gamma = int(_ceil(2*_log(C, 2)))
bound = int(2*gamma*_log(gamma))
a = []
# choose a prime number `p` such that `f` be square free in Z_p
# if there are many factors in Z_p, choose among a few different `p`
# the one with fewer factors
for px in range(3, bound + 1):
if not isprime(px) or b % px == 0:
continue
px = K.convert(px)
F = gf_from_int_poly(f, px)
if not gf_sqf_p(F, px, K):
continue
fsqfx = gf_factor_sqf(F, px, K)[1]
a.append((px, fsqfx))
if len(fsqfx) < 15 or len(a) > 4:
break
p, fsqf = min(a, key=lambda x: len(x[1]))
l = int(_ceil(_log(2*B + 1, p)))
modular = [gf_to_int_poly(ff, p) for ff in fsqf]
g = dup_zz_hensel_lift(p, f, modular, l, K)
sorted_T = range(len(g))
T = set(sorted_T)
factors, s = [], 1
pl = p**l
while 2*s <= len(T):
for S in subsets(sorted_T, s):
# lift the constant coefficient of the product `G` of the factors
# in the subset `S`; if it is does not divide `fc`, `G` does
# not divide the input polynomial
if b == 1:
q = 1
for i in S:
q = q*g[i][-1]
q = q % pl
if not _test_pl(fc, q, pl):
continue
else:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
G = dup_primitive(G, K)[1]
q = G[-1]
if q and fc % q != 0:
continue
H = [b]
S = set(S)
T_S = T - S
if b == 1:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
for i in T_S:
H = dup_mul(H, g[i], K)
H = dup_trunc(H, pl, K)
G_norm = dup_l1_norm(G, K)
H_norm = dup_l1_norm(H, K)
if G_norm*H_norm <= B:
T = T_S
sorted_T = [i for i in sorted_T if i not in S]
G = dup_primitive(G, K)[1]
f = dup_primitive(H, K)[1]
factors.append(G)
b = dup_LC(f, K)
break
else:
s += 1
return factors + [f]
def dup_zz_irreducible_p(f, K):
"""Test irreducibility using Eisenstein's criterion. """
lc = dup_LC(f, K)
tc = dup_TC(f, K)
e_fc = dup_content(f[1:], K)
if e_fc:
e_ff = factorint(int(e_fc))
for p in e_ff.keys():
if (lc % p) and (tc % p**2):
return True
def dup_cyclotomic_p(f, K, irreducible=False):
"""
Efficiently test if ``f`` is a cyclotomic polynomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(f)
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(g)
True
"""
if K.is_QQ:
try:
K0, K = K, K.get_ring()
f = dup_convert(f, K0, K)
except CoercionFailed:
return False
elif not K.is_ZZ:
return False
lc = dup_LC(f, K)
tc = dup_TC(f, K)
if lc != 1 or (tc != -1 and tc != 1):
return False
if not irreducible:
coeff, factors = dup_factor_list(f, K)
if coeff != K.one or factors != [(f, 1)]:
return False
n = dup_degree(f)
g, h = [], []
for i in range(n, -1, -2):
g.insert(0, f[i])
for i in range(n - 1, -1, -2):
h.insert(0, f[i])
g = dup_sqr(dup_strip(g), K)
h = dup_sqr(dup_strip(h), K)
F = dup_sub(g, dup_lshift(h, 1, K), K)
if K.is_negative(dup_LC(F, K)):
F = dup_neg(F, K)
if F == f:
return True
g = dup_mirror(f, K)
if K.is_negative(dup_LC(g, K)):
g = dup_neg(g, K)
if F == g and dup_cyclotomic_p(g, K):
return True
G = dup_sqf_part(F, K)
if dup_sqr(G, K) == F and dup_cyclotomic_p(G, K):
return True
return False
def dup_zz_cyclotomic_poly(n, K):
"""Efficiently generate n-th cyclotomic polynomial. """
h = [K.one, -K.one]
for p, k in factorint(n).items():
h = dup_quo(dup_inflate(h, p, K), h, K)
h = dup_inflate(h, p**(k - 1), K)
return h
def _dup_cyclotomic_decompose(n, K):
H = [[K.one, -K.one]]
for p, k in factorint(n).items():
Q = [ dup_quo(dup_inflate(h, p, K), h, K) for h in H ]
H.extend(Q)
for i in range(1, k):
Q = [ dup_inflate(q, p, K) for q in Q ]
H.extend(Q)
return H
def dup_zz_cyclotomic_factor(f, K):
"""
Efficiently factor polynomials `x**n - 1` and `x**n + 1` in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` returns a list of factors
of `f`, provided that `f` is in the form `x**n - 1` or `x**n + 1` for
`n >= 1`. Otherwise returns None.
Factorization is performed using cyclotomic decomposition of `f`,
which makes this method much faster that any other direct factorization
approach (e.g. Zassenhaus's).
References
==========
.. [1] [Weisstein09]_
"""
lc_f, tc_f = dup_LC(f, K), dup_TC(f, K)
if dup_degree(f) <= 0:
return None
if lc_f != 1 or tc_f not in [-1, 1]:
return None
if any(bool(cf) for cf in f[1:-1]):
return None
n = dup_degree(f)
F = _dup_cyclotomic_decompose(n, K)
if not K.is_one(tc_f):
return F
else:
H = []
for h in _dup_cyclotomic_decompose(2*n, K):
if h not in F:
H.append(h)
return H
def dup_zz_factor_sqf(f, K):
"""Factor square-free (non-primitive) polynomials in `Z[x]`. """
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [g]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [g]
factors = None
if query('USE_CYCLOTOMIC_FACTOR'):
factors = dup_zz_cyclotomic_factor(g, K)
if factors is None:
factors = dup_zz_zassenhaus(g, K)
return cont, _sort_factors(factors, multiple=False)
def dup_zz_factor(f, K):
"""
Factor (non square-free) polynomials in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Zassenhaus algorithm. Trial division is used to recover the
multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Examples
========
Consider the polynomial `f = 2*x**4 - 2`::
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_zz_factor(2*x**4 - 2)
(2, [(x - 1, 1), (x + 1, 1), (x**2 + 1, 1)])
In result we got the following factorization::
f = 2 (x - 1) (x + 1) (x**2 + 1)
Note that this is a complete factorization over integers,
however over Gaussian integers we can factor the last term.
By default, polynomials `x**n - 1` and `x**n + 1` are factored
using cyclotomic decomposition to speedup computations. To
disable this behaviour set cyclotomic=False.
References
==========
.. [1] [Gathen99]_
"""
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [(g, 1)]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
g = dup_sqf_part(g, K)
H = None
if query('USE_CYCLOTOMIC_FACTOR'):
H = dup_zz_cyclotomic_factor(g, K)
if H is None:
H = dup_zz_zassenhaus(g, K)
factors = dup_trial_division(f, H, K)
return cont, factors
def dmp_zz_wang_non_divisors(E, cs, ct, K):
"""Wang/EEZ: Compute a set of valid divisors. """
result = [ cs*ct ]
for q in E:
q = abs(q)
for r in reversed(result):
while r != 1:
r = K.gcd(r, q)
q = q // r
if K.is_one(q):
return None
result.append(q)
return result[1:]
def dmp_zz_wang_test_points(f, T, ct, A, u, K):
"""Wang/EEZ: Test evaluation points for suitability. """
if not dmp_eval_tail(dmp_LC(f, K), A, u - 1, K):
raise EvaluationFailed('no luck')
g = dmp_eval_tail(f, A, u, K)
if not dup_sqf_p(g, K):
raise EvaluationFailed('no luck')
c, h = dup_primitive(g, K)
if K.is_negative(dup_LC(h, K)):
c, h = -c, dup_neg(h, K)
v = u - 1
E = [ dmp_eval_tail(t, A, v, K) for t, _ in T ]
D = dmp_zz_wang_non_divisors(E, c, ct, K)
if D is not None:
return c, h, E
else:
raise EvaluationFailed('no luck')
def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K):
"""Wang/EEZ: Compute correct leading coefficients. """
C, J, v = [], [0]*len(E), u - 1
for h in H:
c = dmp_one(v, K)
d = dup_LC(h, K)*cs
for i in reversed(range(len(E))):
k, e, (t, _) = 0, E[i], T[i]
while not (d % e):
d, k = d//e, k + 1
if k != 0:
c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1
C.append(c)
if not all(J):
raise ExtraneousFactors # pragma: no cover
CC, HH = [], []
for c, h in zip(C, H):
d = dmp_eval_tail(c, A, v, K)
lc = dup_LC(h, K)
if K.is_one(cs):
cc = lc//d
else:
g = K.gcd(lc, d)
d, cc = d//g, lc//g
h, cs = dup_mul_ground(h, d, K), cs//d
c = dmp_mul_ground(c, cc, v, K)
CC.append(c)
HH.append(h)
if K.is_one(cs):
return f, HH, CC
CCC, HHH = [], []
for c, h in zip(CC, HH):
CCC.append(dmp_mul_ground(c, cs, v, K))
HHH.append(dmp_mul_ground(h, cs, 0, K))
f = dmp_mul_ground(f, cs**(len(H) - 1), u, K)
return f, HHH, CCC
def dup_zz_diophantine(F, m, p, K):
"""Wang/EEZ: Solve univariate Diophantine equations. """
if len(F) == 2:
a, b = F
f = gf_from_int_poly(a, p)
g = gf_from_int_poly(b, p)
s, t, G = gf_gcdex(g, f, p, K)
s = gf_lshift(s, m, K)
t = gf_lshift(t, m, K)
q, s = gf_div(s, f, p, K)
t = gf_add_mul(t, q, g, p, K)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
result = [s, t]
else:
G = [F[-1]]
for f in reversed(F[1:-1]):
G.insert(0, dup_mul(f, G[0], K))
S, T = [], [[1]]
for f, g in zip(F, G):
t, s = dmp_zz_diophantine([g, f], T[-1], [], 0, p, 1, K)
T.append(t)
S.append(s)
result, S = [], S + [T[-1]]
for s, f in zip(S, F):
s = gf_from_int_poly(s, p)
f = gf_from_int_poly(f, p)
r = gf_rem(gf_lshift(s, m, K), f, p, K)
s = gf_to_int_poly(r, p)
result.append(s)
return result
def dmp_zz_diophantine(F, c, A, d, p, u, K):
"""Wang/EEZ: Solve multivariate Diophantine equations. """
if not A:
S = [ [] for _ in F ]
n = dup_degree(c)
for i, coeff in enumerate(c):
if not coeff:
continue
T = dup_zz_diophantine(F, n - i, p, K)
for j, (s, t) in enumerate(zip(S, T)):
t = dup_mul_ground(t, coeff, K)
S[j] = dup_trunc(dup_add(s, t, K), p, K)
else:
n = len(A)
e = dmp_expand(F, u, K)
a, A = A[-1], A[:-1]
B, G = [], []
for f in F:
B.append(dmp_quo(e, f, u, K))
G.append(dmp_eval_in(f, a, n, u, K))
C = dmp_eval_in(c, a, n, u, K)
v = u - 1
S = dmp_zz_diophantine(G, C, A, d, p, v, K)
S = [ dmp_raise(s, 1, v, K) for s in S ]
for s, b in zip(S, B):
c = dmp_sub_mul(c, s, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
m = dmp_nest([K.one, -a], n, K)
M = dmp_one(n, K)
for k in K.map(range(0, d)):
if dmp_zero_p(c, u):
break
M = dmp_mul(M, m, u, K)
C = dmp_diff_eval_in(c, k + 1, a, n, u, K)
if not dmp_zero_p(C, v):
C = dmp_quo_ground(C, K.factorial(k + 1), v, K)
T = dmp_zz_diophantine(G, C, A, d, p, v, K)
for i, t in enumerate(T):
T[i] = dmp_mul(dmp_raise(t, 1, v, K), M, u, K)
for i, (s, t) in enumerate(zip(S, T)):
S[i] = dmp_add(s, t, u, K)
for t, b in zip(T, B):
c = dmp_sub_mul(c, t, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
S = [ dmp_ground_trunc(s, p, u, K) for s in S ]
return S
def dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K):
"""Wang/EEZ: Parallel Hensel lifting algorithm. """
S, n, v = [f], len(A), u - 1
H = list(H)
for i, a in enumerate(reversed(A[1:])):
s = dmp_eval_in(S[0], a, n - i, u - i, K)
S.insert(0, dmp_ground_trunc(s, p, v - i, K))
d = max(dmp_degree_list(f, u)[1:])
for j, s, a in zip(range(2, n + 2), S, A):
G, w = list(H), j - 1
I, J = A[:j - 2], A[j - 1:]
for i, (h, lc) in enumerate(zip(H, LC)):
lc = dmp_ground_trunc(dmp_eval_tail(lc, J, v, K), p, w - 1, K)
H[i] = [lc] + dmp_raise(h[1:], 1, w - 1, K)
m = dmp_nest([K.one, -a], w, K)
M = dmp_one(w, K)
c = dmp_sub(s, dmp_expand(H, w, K), w, K)
dj = dmp_degree_in(s, w, w)
for k in K.map(range(0, dj)):
if dmp_zero_p(c, w):
break
M = dmp_mul(M, m, w, K)
C = dmp_diff_eval_in(c, k + 1, a, w, w, K)
if not dmp_zero_p(C, w - 1):
C = dmp_quo_ground(C, K.factorial(k + 1), w - 1, K)
T = dmp_zz_diophantine(G, C, I, d, p, w - 1, K)
for i, (h, t) in enumerate(zip(H, T)):
h = dmp_add_mul(h, dmp_raise(t, 1, w - 1, K), M, w, K)
H[i] = dmp_ground_trunc(h, p, w, K)
h = dmp_sub(s, dmp_expand(H, w, K), w, K)
c = dmp_ground_trunc(h, p, w, K)
if dmp_expand(H, u, K) != f:
raise ExtraneousFactors # pragma: no cover
else:
return H
def dmp_zz_wang(f, u, K, mod=None, seed=None):
r"""
Factor primitive square-free polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x_1,...,x_n]`, which is
primitive and square-free in `x_1`, computes factorization of `f` into
irreducibles over integers.
The procedure is based on Wang's Enhanced Extended Zassenhaus
algorithm. The algorithm works by viewing `f` as a univariate polynomial
in `Z[x_2,...,x_n][x_1]`, for which an evaluation mapping is computed::
x_2 -> a_2, ..., x_n -> a_n
where `a_i`, for `i = 2, \dots, n`, are carefully chosen integers. The
mapping is used to transform `f` into a univariate polynomial in `Z[x_1]`,
which can be factored efficiently using Zassenhaus algorithm. The last
step is to lift univariate factors to obtain true multivariate
factors. For this purpose a parallel Hensel lifting procedure is used.
The parameter ``seed`` is passed to _randint and can be used to seed randint
(when an integer) or (for testing purposes) can be a sequence of numbers.
References
==========
.. [1] [Wang78]_
.. [2] [Geddes92]_
"""
from sympy.testing.randtest import _randint
randint = _randint(seed)
ct, T = dmp_zz_factor(dmp_LC(f, K), u - 1, K)
b = dmp_zz_mignotte_bound(f, u, K)
p = K(nextprime(b))
if mod is None:
if u == 1:
mod = 2
else:
mod = 1
history, configs, A, r = set(), [], [K.zero]*u, None
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
_, H = dup_zz_factor_sqf(s, K)
r = len(H)
if r == 1:
return [f]
configs = [(s, cs, E, H, A)]
except EvaluationFailed:
pass
eez_num_configs = query('EEZ_NUMBER_OF_CONFIGS')
eez_num_tries = query('EEZ_NUMBER_OF_TRIES')
eez_mod_step = query('EEZ_MODULUS_STEP')
while len(configs) < eez_num_configs:
for _ in range(eez_num_tries):
A = [ K(randint(-mod, mod)) for _ in range(u) ]
if tuple(A) not in history:
history.add(tuple(A))
else:
continue
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
except EvaluationFailed:
continue
_, H = dup_zz_factor_sqf(s, K)
rr = len(H)
if r is not None:
if rr != r: # pragma: no cover
if rr < r:
configs, r = [], rr
else:
continue
else:
r = rr
if r == 1:
return [f]
configs.append((s, cs, E, H, A))
if len(configs) == eez_num_configs:
break
else:
mod += eez_mod_step
s_norm, s_arg, i = None, 0, 0
for s, _, _, _, _ in configs:
_s_norm = dup_max_norm(s, K)
if s_norm is not None:
if _s_norm < s_norm:
s_norm = _s_norm
s_arg = i
else:
s_norm = _s_norm
i += 1
_, cs, E, H, A = configs[s_arg]
orig_f = f
try:
f, H, LC = dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K)
factors = dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K)
except ExtraneousFactors: # pragma: no cover
if query('EEZ_RESTART_IF_NEEDED'):
return dmp_zz_wang(orig_f, u, K, mod + 1)
else:
raise ExtraneousFactors(
"we need to restart algorithm with better parameters")
result = []
for f in factors:
_, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
result.append(f)
return result
def dmp_zz_factor(f, u, K):
r"""
Factor (non square-free) polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, \dots, f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Enhanced Extended Zassenhaus (EEZ) algorithm. Trial division
is used to recover the multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*(x**2 - y**2)`::
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_zz_factor(2*x**2 - 2*y**2)
(2, [(x - y, 1), (x + y, 1)])
In result we got the following factorization::
f = 2 (x - y) (x + y)
References
==========
.. [1] [Gathen99]_
"""
if not u:
return dup_zz_factor(f, K)
if dmp_zero_p(f, u):
return K.zero, []
cont, g = dmp_ground_primitive(f, u, K)
if dmp_ground_LC(g, u, K) < 0:
cont, g = -cont, dmp_neg(g, u, K)
if all(d <= 0 for d in dmp_degree_list(g, u)):
return cont, []
G, g = dmp_primitive(g, u, K)
factors = []
if dmp_degree(g, u) > 0:
g = dmp_sqf_part(g, u, K)
H = dmp_zz_wang(g, u, K)
factors = dmp_trial_division(f, H, u, K)
for g, k in dmp_zz_factor(G, u - 1, K)[1]:
factors.insert(0, ([g], k))
return cont, _sort_factors(factors)
def dup_qq_i_factor(f, K0):
"""Factor univariate polynomials into irreducibles in `QQ_I[x]`. """
# Factor in QQ<I>
K1 = K0.as_AlgebraicField()
f = dup_convert(f, K0, K1)
coeff, factors = dup_factor_list(f, K1)
factors = [(dup_convert(fac, K1, K0), i) for fac, i in factors]
coeff = K0.convert(coeff, K1)
return coeff, factors
def dup_zz_i_factor(f, K0):
"""Factor univariate polynomials into irreducibles in `ZZ_I[x]`. """
# First factor in QQ_I
K1 = K0.get_field()
f = dup_convert(f, K0, K1)
coeff, factors = dup_qq_i_factor(f, K1)
new_factors = []
for fac, i in factors:
# Extract content
fac_denom, fac_num = dup_clear_denoms(fac, K1)
fac_num_ZZ_I = dup_convert(fac_num, K1, K0)
content, fac_prim = dmp_ground_primitive(fac_num_ZZ_I, 0, K1)
coeff = (coeff * content ** i) // fac_denom ** i
new_factors.append((fac_prim, i))
factors = new_factors
coeff = K0.convert(coeff, K1)
return coeff, factors
def dmp_qq_i_factor(f, u, K0):
"""Factor multivariate polynomials into irreducibles in `QQ_I[X]`. """
# Factor in QQ<I>
K1 = K0.as_AlgebraicField()
f = dmp_convert(f, u, K0, K1)
coeff, factors = dmp_factor_list(f, u, K1)
factors = [(dmp_convert(fac, u, K1, K0), i) for fac, i in factors]
coeff = K0.convert(coeff, K1)
return coeff, factors
def dmp_zz_i_factor(f, u, K0):
"""Factor multivariate polynomials into irreducibles in `ZZ_I[X]`. """
# First factor in QQ_I
K1 = K0.get_field()
f = dmp_convert(f, u, K0, K1)
coeff, factors = dmp_qq_i_factor(f, u, K1)
new_factors = []
for fac, i in factors:
# Extract content
fac_denom, fac_num = dmp_clear_denoms(fac, u, K1)
fac_num_ZZ_I = dmp_convert(fac_num, u, K1, K0)
content, fac_prim = dmp_ground_primitive(fac_num_ZZ_I, u, K1)
coeff = (coeff * content ** i) // fac_denom ** i
new_factors.append((fac_prim, i))
factors = new_factors
coeff = K0.convert(coeff, K1)
return coeff, factors
def dup_ext_factor(f, K):
"""Factor univariate polynomials over algebraic number fields. """
n, lc = dup_degree(f), dup_LC(f, K)
f = dup_monic(f, K)
if n <= 0:
return lc, []
if n == 1:
return lc, [(f, 1)]
f, F = dup_sqf_part(f, K), f
s, g, r = dup_sqf_norm(f, K)
factors = dup_factor_list_include(r, K.dom)
if len(factors) == 1:
return lc, [(f, n//dup_degree(f))]
H = s*K.unit
for i, (factor, _) in enumerate(factors):
h = dup_convert(factor, K.dom, K)
h, _, g = dup_inner_gcd(h, g, K)
h = dup_shift(h, H, K)
factors[i] = h
factors = dup_trial_division(F, factors, K)
return lc, factors
def dmp_ext_factor(f, u, K):
"""Factor multivariate polynomials over algebraic number fields. """
if not u:
return dup_ext_factor(f, K)
lc = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
if all(d <= 0 for d in dmp_degree_list(f, u)):
return lc, []
f, F = dmp_sqf_part(f, u, K), f
s, g, r = dmp_sqf_norm(f, u, K)
factors = dmp_factor_list_include(r, u, K.dom)
if len(factors) == 1:
factors = [f]
else:
H = dmp_raise([K.one, s*K.unit], u, 0, K)
for i, (factor, _) in enumerate(factors):
h = dmp_convert(factor, u, K.dom, K)
h, _, g = dmp_inner_gcd(h, g, u, K)
h = dmp_compose(h, H, u, K)
factors[i] = h
return lc, dmp_trial_division(F, factors, u, K)
def dup_gf_factor(f, K):
"""Factor univariate polynomials over finite fields. """
f = dup_convert(f, K, K.dom)
coeff, factors = gf_factor(f, K.mod, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K.dom, K), k)
return K.convert(coeff, K.dom), factors
def dmp_gf_factor(f, u, K):
"""Factor multivariate polynomials over finite fields. """
raise NotImplementedError('multivariate polynomials over finite fields')
def dup_factor_list(f, K0):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
j, f = dup_terms_gcd(f, K0)
cont, f = dup_primitive(f, K0)
if K0.is_FiniteField:
coeff, factors = dup_gf_factor(f, K0)
elif K0.is_Algebraic:
coeff, factors = dup_ext_factor(f, K0)
elif K0.is_GaussianRing:
coeff, factors = dup_zz_i_factor(f, K0)
elif K0.is_GaussianField:
coeff, factors = dup_qq_i_factor(f, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dup_convert(f, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dup_clear_denoms(f, K0, K)
f = dup_convert(f, K0, K)
else:
K = K0
if K.is_ZZ:
coeff, factors = dup_zz_factor(f, K)
elif K.is_Poly:
f, u = dmp_inject(f, 0, K)
coeff, factors = dmp_factor_list(f, u, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, u, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dup_max_norm(f, K0)
f = dup_quo_ground(f, max_norm, K0)
f = dup_convert(f, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
if j:
factors.insert(0, ([K0.one, K0.zero], j))
return coeff*cont, _sort_factors(factors)
def dup_factor_list_include(f, K):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
coeff, factors = dup_factor_list(f, K)
if not factors:
return [(dup_strip([coeff]), 1)]
else:
g = dup_mul_ground(factors[0][0], coeff, K)
return [(g, factors[0][1])] + factors[1:]
def dmp_factor_list(f, u, K0):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list(f, K0)
J, f = dmp_terms_gcd(f, u, K0)
cont, f = dmp_ground_primitive(f, u, K0)
if K0.is_FiniteField: # pragma: no cover
coeff, factors = dmp_gf_factor(f, u, K0)
elif K0.is_Algebraic:
coeff, factors = dmp_ext_factor(f, u, K0)
elif K0.is_GaussianRing:
coeff, factors = dmp_zz_i_factor(f, u, K0)
elif K0.is_GaussianField:
coeff, factors = dmp_qq_i_factor(f, u, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dmp_convert(f, u, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dmp_clear_denoms(f, u, K0, K)
f = dmp_convert(f, u, K0, K)
else:
K = K0
if K.is_ZZ:
levels, f, v = dmp_exclude(f, u, K)
coeff, factors = dmp_zz_factor(f, v, K)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_include(f, levels, v, K), k)
elif K.is_Poly:
f, v = dmp_inject(f, u, K)
coeff, factors = dmp_factor_list(f, v, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, v, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dmp_max_norm(f, u, K0)
f = dmp_quo_ground(f, max_norm, u, K0)
f = dmp_convert(f, u, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
for i, j in enumerate(reversed(J)):
if not j:
continue
term = {(0,)*(u - i) + (1,) + (0,)*i: K0.one}
factors.insert(0, (dmp_from_dict(term, u, K0), j))
return coeff*cont, _sort_factors(factors)
def dmp_factor_list_include(f, u, K):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list_include(f, K)
coeff, factors = dmp_factor_list(f, u, K)
if not factors:
return [(dmp_ground(coeff, u), 1)]
else:
g = dmp_mul_ground(factors[0][0], coeff, u, K)
return [(g, factors[0][1])] + factors[1:]
def dup_irreducible_p(f, K):
"""
Returns ``True`` if a univariate polynomial ``f`` has no factors
over its domain.
"""
return dmp_irreducible_p(f, 0, K)
def dmp_irreducible_p(f, u, K):
"""
Returns ``True`` if a multivariate polynomial ``f`` has no factors
over its domain.
"""
_, factors = dmp_factor_list(f, u, K)
if not factors:
return True
elif len(factors) > 1:
return False
else:
_, k = factors[0]
return k == 1
|
0786358d3f138ffa35c3aa199dc46563316663efb31932ed2265cab556203f2a | r"""
Sparse distributed elements of free modules over multivariate (generalized)
polynomial rings.
This code and its data structures are very much like the distributed
polynomials, except that the first "exponent" of the monomial is
a module generator index. That is, the multi-exponent ``(i, e_1, ..., e_n)``
represents the "monomial" `x_1^{e_1} \cdots x_n^{e_n} f_i` of the free module
`F` generated by `f_1, \ldots, f_r` over (a localization of) the ring
`K[x_1, \ldots, x_n]`. A module element is simply stored as a list of terms
ordered by the monomial order. Here a term is a pair of a multi-exponent and a
coefficient. In general, this coefficient should never be zero (since it can
then be omitted). The zero module element is stored as an empty list.
The main routines are ``sdm_nf_mora`` and ``sdm_groebner`` which can be used
to compute, respectively, weak normal forms and standard bases. They work with
arbitrary (not necessarily global) monomial orders.
In general, product orders have to be used to construct valid monomial orders
for modules. However, ``lex`` can be used as-is.
Note that the "level" (number of variables, i.e. parameter u+1 in
distributedpolys.py) is never needed in this code.
The main reference for this file is [SCA],
"A Singular Introduction to Commutative Algebra".
"""
from itertools import permutations
from sympy.polys.monomials import (
monomial_mul, monomial_lcm, monomial_div, monomial_deg
)
from sympy.polys.polytools import Poly
from sympy.polys.polyutils import parallel_dict_from_expr
from sympy.core.singleton import S
from sympy.core.sympify import sympify
# Additional monomial tools.
def sdm_monomial_mul(M, X):
"""
Multiply tuple ``X`` representing a monomial of `K[X]` into the tuple
``M`` representing a monomial of `F`.
Examples
========
Multiplying `xy^3` into `x f_1` yields `x^2 y^3 f_1`:
>>> from sympy.polys.distributedmodules import sdm_monomial_mul
>>> sdm_monomial_mul((1, 1, 0), (1, 3))
(1, 2, 3)
"""
return (M[0],) + monomial_mul(X, M[1:])
def sdm_monomial_deg(M):
"""
Return the total degree of ``M``.
Examples
========
For example, the total degree of `x^2 y f_5` is 3:
>>> from sympy.polys.distributedmodules import sdm_monomial_deg
>>> sdm_monomial_deg((5, 2, 1))
3
"""
return monomial_deg(M[1:])
def sdm_monomial_lcm(A, B):
r"""
Return the "least common multiple" of ``A`` and ``B``.
IF `A = M e_j` and `B = N e_j`, where `M` and `N` are polynomial monomials,
this returns `\lcm(M, N) e_j`. Note that ``A`` and ``B`` involve distinct
monomials.
Otherwise the result is undefined.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_monomial_lcm
>>> sdm_monomial_lcm((1, 2, 3), (1, 0, 5))
(1, 2, 5)
"""
return (A[0],) + monomial_lcm(A[1:], B[1:])
def sdm_monomial_divides(A, B):
"""
Does there exist a (polynomial) monomial X such that XA = B?
Examples
========
Positive examples:
In the following examples, the monomial is given in terms of x, y and the
generator(s), f_1, f_2 etc. The tuple form of that monomial is used in
the call to sdm_monomial_divides.
Note: the generator appears last in the expression but first in the tuple
and other factors appear in the same order that they appear in the monomial
expression.
`A = f_1` divides `B = f_1`
>>> from sympy.polys.distributedmodules import sdm_monomial_divides
>>> sdm_monomial_divides((1, 0, 0), (1, 0, 0))
True
`A = f_1` divides `B = x^2 y f_1`
>>> sdm_monomial_divides((1, 0, 0), (1, 2, 1))
True
`A = xy f_5` divides `B = x^2 y f_5`
>>> sdm_monomial_divides((5, 1, 1), (5, 2, 1))
True
Negative examples:
`A = f_1` does not divide `B = f_2`
>>> sdm_monomial_divides((1, 0, 0), (2, 0, 0))
False
`A = x f_1` does not divide `B = f_1`
>>> sdm_monomial_divides((1, 1, 0), (1, 0, 0))
False
`A = xy^2 f_5` does not divide `B = y f_5`
>>> sdm_monomial_divides((5, 1, 2), (5, 0, 1))
False
"""
return A[0] == B[0] and all(a <= b for a, b in zip(A[1:], B[1:]))
# The actual distributed modules code.
def sdm_LC(f, K):
"""Returns the leading coeffcient of ``f``. """
if not f:
return K.zero
else:
return f[0][1]
def sdm_to_dict(f):
"""Make a dictionary from a distributed polynomial. """
return dict(f)
def sdm_from_dict(d, O):
"""
Create an sdm from a dictionary.
Here ``O`` is the monomial order to use.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_from_dict
>>> from sympy.polys import QQ, lex
>>> dic = {(1, 1, 0): QQ(1), (1, 0, 0): QQ(2), (0, 1, 0): QQ(0)}
>>> sdm_from_dict(dic, lex)
[((1, 1, 0), 1), ((1, 0, 0), 2)]
"""
return sdm_strip(sdm_sort(list(d.items()), O))
def sdm_sort(f, O):
"""Sort terms in ``f`` using the given monomial order ``O``. """
return sorted(f, key=lambda term: O(term[0]), reverse=True)
def sdm_strip(f):
"""Remove terms with zero coefficients from ``f`` in ``K[X]``. """
return [ (monom, coeff) for monom, coeff in f if coeff ]
def sdm_add(f, g, O, K):
"""
Add two module elements ``f``, ``g``.
Addition is done over the ground field ``K``, monomials are ordered
according to ``O``.
Examples
========
All examples use lexicographic order.
`(xy f_1) + (f_2) = f_2 + xy f_1`
>>> from sympy.polys.distributedmodules import sdm_add
>>> from sympy.polys import lex, QQ
>>> sdm_add([((1, 1, 1), QQ(1))], [((2, 0, 0), QQ(1))], lex, QQ)
[((2, 0, 0), 1), ((1, 1, 1), 1)]
`(xy f_1) + (-xy f_1)` = 0`
>>> sdm_add([((1, 1, 1), QQ(1))], [((1, 1, 1), QQ(-1))], lex, QQ)
[]
`(f_1) + (2f_1) = 3f_1`
>>> sdm_add([((1, 0, 0), QQ(1))], [((1, 0, 0), QQ(2))], lex, QQ)
[((1, 0, 0), 3)]
`(yf_1) + (xf_1) = xf_1 + yf_1`
>>> sdm_add([((1, 0, 1), QQ(1))], [((1, 1, 0), QQ(1))], lex, QQ)
[((1, 1, 0), 1), ((1, 0, 1), 1)]
"""
h = dict(f)
for monom, c in g:
if monom in h:
coeff = h[monom] + c
if not coeff:
del h[monom]
else:
h[monom] = coeff
else:
h[monom] = c
return sdm_from_dict(h, O)
def sdm_LM(f):
r"""
Returns the leading monomial of ``f``.
Only valid if `f \ne 0`.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_LM, sdm_from_dict
>>> from sympy.polys import QQ, lex
>>> dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(1), (4, 0, 1): QQ(1)}
>>> sdm_LM(sdm_from_dict(dic, lex))
(4, 0, 1)
"""
return f[0][0]
def sdm_LT(f):
r"""
Returns the leading term of ``f``.
Only valid if `f \ne 0`.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_LT, sdm_from_dict
>>> from sympy.polys import QQ, lex
>>> dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(2), (4, 0, 1): QQ(3)}
>>> sdm_LT(sdm_from_dict(dic, lex))
((4, 0, 1), 3)
"""
return f[0]
def sdm_mul_term(f, term, O, K):
"""
Multiply a distributed module element ``f`` by a (polynomial) term ``term``.
Multiplication of coefficients is done over the ground field ``K``, and
monomials are ordered according to ``O``.
Examples
========
`0 f_1 = 0`
>>> from sympy.polys.distributedmodules import sdm_mul_term
>>> from sympy.polys import lex, QQ
>>> sdm_mul_term([((1, 0, 0), QQ(1))], ((0, 0), QQ(0)), lex, QQ)
[]
`x 0 = 0`
>>> sdm_mul_term([], ((1, 0), QQ(1)), lex, QQ)
[]
`(x) (f_1) = xf_1`
>>> sdm_mul_term([((1, 0, 0), QQ(1))], ((1, 0), QQ(1)), lex, QQ)
[((1, 1, 0), 1)]
`(2xy) (3x f_1 + 4y f_2) = 8xy^2 f_2 + 6x^2y f_1`
>>> f = [((2, 0, 1), QQ(4)), ((1, 1, 0), QQ(3))]
>>> sdm_mul_term(f, ((1, 1), QQ(2)), lex, QQ)
[((2, 1, 2), 8), ((1, 2, 1), 6)]
"""
X, c = term
if not f or not c:
return []
else:
if K.is_one(c):
return [ (sdm_monomial_mul(f_M, X), f_c) for f_M, f_c in f ]
else:
return [ (sdm_monomial_mul(f_M, X), f_c * c) for f_M, f_c in f ]
def sdm_zero():
"""Return the zero module element."""
return []
def sdm_deg(f):
"""
Degree of ``f``.
This is the maximum of the degrees of all its monomials.
Invalid if ``f`` is zero.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_deg
>>> sdm_deg([((1, 2, 3), 1), ((10, 0, 1), 1), ((2, 3, 4), 4)])
7
"""
return max(sdm_monomial_deg(M[0]) for M in f)
# Conversion
def sdm_from_vector(vec, O, K, **opts):
"""
Create an sdm from an iterable of expressions.
Coefficients are created in the ground field ``K``, and terms are ordered
according to monomial order ``O``. Named arguments are passed on to the
polys conversion code and can be used to specify for example generators.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_from_vector
>>> from sympy.abc import x, y, z
>>> from sympy.polys import QQ, lex
>>> sdm_from_vector([x**2+y**2, 2*z], lex, QQ)
[((1, 0, 0, 1), 2), ((0, 2, 0, 0), 1), ((0, 0, 2, 0), 1)]
"""
dics, gens = parallel_dict_from_expr(sympify(vec), **opts)
dic = {}
for i, d in enumerate(dics):
for k, v in d.items():
dic[(i,) + k] = K.convert(v)
return sdm_from_dict(dic, O)
def sdm_to_vector(f, gens, K, n=None):
"""
Convert sdm ``f`` into a list of polynomial expressions.
The generators for the polynomial ring are specified via ``gens``. The rank
of the module is guessed, or passed via ``n``. The ground field is assumed
to be ``K``.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_to_vector
>>> from sympy.abc import x, y, z
>>> from sympy.polys import QQ
>>> f = [((1, 0, 0, 1), QQ(2)), ((0, 2, 0, 0), QQ(1)), ((0, 0, 2, 0), QQ(1))]
>>> sdm_to_vector(f, [x, y, z], QQ)
[x**2 + y**2, 2*z]
"""
dic = sdm_to_dict(f)
dics = {}
for k, v in dic.items():
dics.setdefault(k[0], []).append((k[1:], v))
n = n or len(dics)
res = []
for k in range(n):
if k in dics:
res.append(Poly(dict(dics[k]), gens=gens, domain=K).as_expr())
else:
res.append(S.Zero)
return res
# Algorithms.
def sdm_spoly(f, g, O, K, phantom=None):
"""
Compute the generalized s-polynomial of ``f`` and ``g``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
This is invalid if either of ``f`` or ``g`` is zero.
If the leading terms of `f` and `g` involve different basis elements of
`F`, their s-poly is defined to be zero. Otherwise it is a certain linear
combination of `f` and `g` in which the leading terms cancel.
See [SCA, defn 2.3.6] for details.
If ``phantom`` is not ``None``, it should be a pair of module elements on
which to perform the same operation(s) as on ``f`` and ``g``. The in this
case both results are returned.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_spoly
>>> from sympy.polys import QQ, lex
>>> f = [((2, 1, 1), QQ(1)), ((1, 0, 1), QQ(1))]
>>> g = [((2, 3, 0), QQ(1))]
>>> h = [((1, 2, 3), QQ(1))]
>>> sdm_spoly(f, h, lex, QQ)
[]
>>> sdm_spoly(f, g, lex, QQ)
[((1, 2, 1), 1)]
"""
if not f or not g:
return sdm_zero()
LM1 = sdm_LM(f)
LM2 = sdm_LM(g)
if LM1[0] != LM2[0]:
return sdm_zero()
LM1 = LM1[1:]
LM2 = LM2[1:]
lcm = monomial_lcm(LM1, LM2)
m1 = monomial_div(lcm, LM1)
m2 = monomial_div(lcm, LM2)
c = K.quo(-sdm_LC(f, K), sdm_LC(g, K))
r1 = sdm_add(sdm_mul_term(f, (m1, K.one), O, K),
sdm_mul_term(g, (m2, c), O, K), O, K)
if phantom is None:
return r1
r2 = sdm_add(sdm_mul_term(phantom[0], (m1, K.one), O, K),
sdm_mul_term(phantom[1], (m2, c), O, K), O, K)
return r1, r2
def sdm_ecart(f):
"""
Compute the ecart of ``f``.
This is defined to be the difference of the total degree of `f` and the
total degree of the leading monomial of `f` [SCA, defn 2.3.7].
Invalid if f is zero.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_ecart
>>> sdm_ecart([((1, 2, 3), 1), ((1, 0, 1), 1)])
0
>>> sdm_ecart([((2, 2, 1), 1), ((1, 5, 1), 1)])
3
"""
return sdm_deg(f) - sdm_monomial_deg(sdm_LM(f))
def sdm_nf_mora(f, G, O, K, phantom=None):
r"""
Compute a weak normal form of ``f`` with respect to ``G`` and order ``O``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
Weak normal forms are defined in [SCA, defn 2.3.3]. They are not unique.
This function deterministically computes a weak normal form, depending on
the order of `G`.
The most important property of a weak normal form is the following: if
`R` is the ring associated with the monomial ordering (if the ordering is
global, we just have `R = K[x_1, \ldots, x_n]`, otherwise it is a certain
localization thereof), `I` any ideal of `R` and `G` a standard basis for
`I`, then for any `f \in R`, we have `f \in I` if and only if
`NF(f | G) = 0`.
This is the generalized Mora algorithm for computing weak normal forms with
respect to arbitrary monomial orders [SCA, algorithm 2.3.9].
If ``phantom`` is not ``None``, it should be a pair of "phantom" arguments
on which to perform the same computations as on ``f``, ``G``, both results
are then returned.
"""
from itertools import repeat
h = f
T = list(G)
if phantom is not None:
# "phantom" variables with suffix p
hp = phantom[0]
Tp = list(phantom[1])
phantom = True
else:
Tp = repeat([])
phantom = False
while h:
# TODO better data structure!!!
Th = [(g, sdm_ecart(g), gp) for g, gp in zip(T, Tp)
if sdm_monomial_divides(sdm_LM(g), sdm_LM(h))]
if not Th:
break
g, _, gp = min(Th, key=lambda x: x[1])
if sdm_ecart(g) > sdm_ecart(h):
T.append(h)
if phantom:
Tp.append(hp)
if phantom:
h, hp = sdm_spoly(h, g, O, K, phantom=(hp, gp))
else:
h = sdm_spoly(h, g, O, K)
if phantom:
return h, hp
return h
def sdm_nf_buchberger(f, G, O, K, phantom=None):
r"""
Compute a weak normal form of ``f`` with respect to ``G`` and order ``O``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
This is the standard Buchberger algorithm for computing weak normal forms with
respect to *global* monomial orders [SCA, algorithm 1.6.10].
If ``phantom`` is not ``None``, it should be a pair of "phantom" arguments
on which to perform the same computations as on ``f``, ``G``, both results
are then returned.
"""
from itertools import repeat
h = f
T = list(G)
if phantom is not None:
# "phantom" variables with suffix p
hp = phantom[0]
Tp = list(phantom[1])
phantom = True
else:
Tp = repeat([])
phantom = False
while h:
try:
g, gp = next((g, gp) for g, gp in zip(T, Tp)
if sdm_monomial_divides(sdm_LM(g), sdm_LM(h)))
except StopIteration:
break
if phantom:
h, hp = sdm_spoly(h, g, O, K, phantom=(hp, gp))
else:
h = sdm_spoly(h, g, O, K)
if phantom:
return h, hp
return h
def sdm_nf_buchberger_reduced(f, G, O, K):
r"""
Compute a reduced normal form of ``f`` with respect to ``G`` and order ``O``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
In contrast to weak normal forms, reduced normal forms *are* unique, but
their computation is more expensive.
This is the standard Buchberger algorithm for computing reduced normal forms
with respect to *global* monomial orders [SCA, algorithm 1.6.11].
The ``pantom`` option is not supported, so this normal form cannot be used
as a normal form for the "extended" groebner algorithm.
"""
h = sdm_zero()
g = f
while g:
g = sdm_nf_buchberger(g, G, O, K)
if g:
h = sdm_add(h, [sdm_LT(g)], O, K)
g = g[1:]
return h
def sdm_groebner(G, NF, O, K, extended=False):
"""
Compute a minimal standard basis of ``G`` with respect to order ``O``.
The algorithm uses a normal form ``NF``, for example ``sdm_nf_mora``.
The ground field is assumed to be ``K``, and monomials ordered according
to ``O``.
Let `N` denote the submodule generated by elements of `G`. A standard
basis for `N` is a subset `S` of `N`, such that `in(S) = in(N)`, where for
any subset `X` of `F`, `in(X)` denotes the submodule generated by the
initial forms of elements of `X`. [SCA, defn 2.3.2]
A standard basis is called minimal if no subset of it is a standard basis.
One may show that standard bases are always generating sets.
Minimal standard bases are not unique. This algorithm computes a
deterministic result, depending on the particular order of `G`.
If ``extended=True``, also compute the transition matrix from the initial
generators to the groebner basis. That is, return a list of coefficient
vectors, expressing the elements of the groebner basis in terms of the
elements of ``G``.
This functions implements the "sugar" strategy, see
Giovini et al: "One sugar cube, please" OR Selection strategies in
Buchberger algorithm.
"""
# The critical pair set.
# A critical pair is stored as (i, j, s, t) where (i, j) defines the pair
# (by indexing S), s is the sugar of the pair, and t is the lcm of their
# leading monomials.
P = []
# The eventual standard basis.
S = []
Sugars = []
def Ssugar(i, j):
"""Compute the sugar of the S-poly corresponding to (i, j)."""
LMi = sdm_LM(S[i])
LMj = sdm_LM(S[j])
return max(Sugars[i] - sdm_monomial_deg(LMi),
Sugars[j] - sdm_monomial_deg(LMj)) \
+ sdm_monomial_deg(sdm_monomial_lcm(LMi, LMj))
ourkey = lambda p: (p[2], O(p[3]), p[1])
def update(f, sugar, P):
"""Add f with sugar ``sugar`` to S, update P."""
if not f:
return P
k = len(S)
S.append(f)
Sugars.append(sugar)
LMf = sdm_LM(f)
def removethis(pair):
i, j, s, t = pair
if LMf[0] != t[0]:
return False
tik = sdm_monomial_lcm(LMf, sdm_LM(S[i]))
tjk = sdm_monomial_lcm(LMf, sdm_LM(S[j]))
return tik != t and tjk != t and sdm_monomial_divides(tik, t) and \
sdm_monomial_divides(tjk, t)
# apply the chain criterion
P = [p for p in P if not removethis(p)]
# new-pair set
N = [(i, k, Ssugar(i, k), sdm_monomial_lcm(LMf, sdm_LM(S[i])))
for i in range(k) if LMf[0] == sdm_LM(S[i])[0]]
# TODO apply the product criterion?
N.sort(key=ourkey)
remove = set()
for i, p in enumerate(N):
for j in range(i + 1, len(N)):
if sdm_monomial_divides(p[3], N[j][3]):
remove.add(j)
# TODO mergesort?
P.extend(reversed([p for i, p in enumerate(N) if not i in remove]))
P.sort(key=ourkey, reverse=True)
# NOTE reverse-sort, because we want to pop from the end
return P
# Figure out the number of generators in the ground ring.
try:
# NOTE: we look for the first non-zero vector, take its first monomial
# the number of generators in the ring is one less than the length
# (since the zeroth entry is for the module generators)
numgens = len(next(x[0] for x in G if x)[0]) - 1
except StopIteration:
# No non-zero elements in G ...
if extended:
return [], []
return []
# This list will store expressions of the elements of S in terms of the
# initial generators
coefficients = []
# First add all the elements of G to S
for i, f in enumerate(G):
P = update(f, sdm_deg(f), P)
if extended and f:
coefficients.append(sdm_from_dict({(i,) + (0,)*numgens: K(1)}, O))
# Now carry out the buchberger algorithm.
while P:
i, j, s, t = P.pop()
f, g = S[i], S[j]
if extended:
sp, coeff = sdm_spoly(f, g, O, K,
phantom=(coefficients[i], coefficients[j]))
h, hcoeff = NF(sp, S, O, K, phantom=(coeff, coefficients))
if h:
coefficients.append(hcoeff)
else:
h = NF(sdm_spoly(f, g, O, K), S, O, K)
P = update(h, Ssugar(i, j), P)
# Finally interreduce the standard basis.
# (TODO again, better data structures)
S = {(tuple(f), i) for i, f in enumerate(S)}
for (a, ai), (b, bi) in permutations(S, 2):
A = sdm_LM(a)
B = sdm_LM(b)
if sdm_monomial_divides(A, B) and (b, bi) in S and (a, ai) in S:
S.remove((b, bi))
L = sorted(((list(f), i) for f, i in S), key=lambda p: O(sdm_LM(p[0])),
reverse=True)
res = [x[0] for x in L]
if extended:
return res, [coefficients[i] for _, i in L]
return res
|
126afc528ba6de0c4aa4c0ea1472488f1df10a102b50837a5c8cba34f7c24bc2 | """ Functions to support rewriting of SymPy expressions """
from sympy.core.expr import Expr
from sympy.assumptions import ask
from sympy.strategies.tools import subs
from sympy.unify.usympy import rebuild, unify
def rewriterule(source, target, variables=(), condition=None, assume=None):
""" Rewrite rule.
Transform expressions that match source into expressions that match target
treating all ``variables`` as wilds.
Examples
========
>>> from sympy.abc import w, x, y, z
>>> from sympy.unify.rewrite import rewriterule
>>> from sympy import default_sort_key
>>> rl = rewriterule(x + y, x**y, [x, y])
>>> sorted(rl(z + 3), key=default_sort_key)
[3**z, z**3]
Use ``condition`` to specify additional requirements. Inputs are taken in
the same order as is found in variables.
>>> rl = rewriterule(x + y, x**y, [x, y], lambda x, y: x.is_integer)
>>> list(rl(z + 3))
[3**z]
Use ``assume`` to specify additional requirements using new assumptions.
>>> from sympy.assumptions import Q
>>> rl = rewriterule(x + y, x**y, [x, y], assume=Q.integer(x))
>>> list(rl(z + 3))
[3**z]
Assumptions for the local context are provided at rule runtime
>>> list(rl(w + z, Q.integer(z)))
[z**w]
"""
def rewrite_rl(expr, assumptions=True):
for match in unify(source, expr, {}, variables=variables):
if (condition and
not condition(*[match.get(var, var) for var in variables])):
continue
if (assume and not ask(assume.xreplace(match), assumptions)):
continue
expr2 = subs(match)(target)
if isinstance(expr2, Expr):
expr2 = rebuild(expr2)
yield expr2
return rewrite_rl
|
e5db65460f35138b663a094ab05a9a7eea2d8415153c4902ec21fc41b13acc01 | """py.test hacks to support XFAIL/XPASS"""
import sys
import functools
import os
import contextlib
import warnings
from typing import Any, Callable
from sympy.utilities.exceptions import SymPyDeprecationWarning
ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None)
try:
import pytest
USE_PYTEST = getattr(sys, '_running_pytest', False)
except ImportError:
USE_PYTEST = False
raises: Callable[[Any, Any], Any]
XFAIL: Callable[[Any], Any]
skip: Callable[[Any], Any]
SKIP: Callable[[Any], Any]
slow: Callable[[Any], Any]
nocache_fail: Callable[[Any], Any]
if USE_PYTEST:
raises = pytest.raises
warns = pytest.warns
skip = pytest.skip
XFAIL = pytest.mark.xfail
SKIP = pytest.mark.skip
slow = pytest.mark.slow
nocache_fail = pytest.mark.nocache_fail
from _pytest.outcomes import Failed
else:
# Not using pytest so define the things that would have been imported from
# there.
# _pytest._code.code.ExceptionInfo
class ExceptionInfo:
def __init__(self, value):
self.value = value
def __repr__(self):
return "<ExceptionInfo {!r}>".format(self.value)
def raises(expectedException, code=None):
"""
Tests that ``code`` raises the exception ``expectedException``.
``code`` may be a callable, such as a lambda expression or function
name.
If ``code`` is not given or None, ``raises`` will return a context
manager for use in ``with`` statements; the code to execute then
comes from the scope of the ``with``.
``raises()`` does nothing if the callable raises the expected exception,
otherwise it raises an AssertionError.
Examples
========
>>> from sympy.testing.pytest import raises
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ZeroDivisionError(...)>
>>> raises(ZeroDivisionError, lambda: 1/2)
Traceback (most recent call last):
...
Failed: DID NOT RAISE
>>> with raises(ZeroDivisionError):
... n = 1/0
>>> with raises(ZeroDivisionError):
... n = 1/2
Traceback (most recent call last):
...
Failed: DID NOT RAISE
Note that you cannot test multiple statements via
``with raises``:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise, aborting the ``with``
... n = 9999/0 # never executed
This is just what ``with`` is supposed to do: abort the
contained statement sequence at the first exception and let
the context manager deal with the exception.
To test multiple statements, you'll need a separate ``with``
for each:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise
>>> with raises(ZeroDivisionError):
... n = 9999/0 # will also execute and raise
"""
if code is None:
return RaisesContext(expectedException)
elif callable(code):
try:
code()
except expectedException as e:
return ExceptionInfo(e)
raise Failed("DID NOT RAISE")
elif isinstance(code, str):
raise TypeError(
'\'raises(xxx, "code")\' has been phased out; '
'change \'raises(xxx, "expression")\' '
'to \'raises(xxx, lambda: expression)\', '
'\'raises(xxx, "statement")\' '
'to \'with raises(xxx): statement\'')
else:
raise TypeError(
'raises() expects a callable for the 2nd argument.')
class RaisesContext:
def __init__(self, expectedException):
self.expectedException = expectedException
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
raise Failed("DID NOT RAISE")
return issubclass(exc_type, self.expectedException)
class XFail(Exception):
pass
class XPass(Exception):
pass
class Skipped(Exception):
pass
class Failed(Exception): # type: ignore
pass
def XFAIL(func):
def wrapper():
try:
func()
except Exception as e:
message = str(e)
if message != "Timeout":
raise XFail(func.__name__)
else:
raise Skipped("Timeout")
raise XPass(func.__name__)
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def skip(str):
raise Skipped(str)
def SKIP(reason):
"""Similar to ``skip()``, but this is a decorator. """
def wrapper(func):
def func_wrapper():
raise Skipped(reason)
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
return wrapper
def slow(func):
func._slow = True
def func_wrapper():
func()
func_wrapper = functools.update_wrapper(func_wrapper, func)
func_wrapper.__wrapped__ = func
return func_wrapper
def nocache_fail(func):
"Dummy decorator for marking tests that fail when cache is disabled"
return func
@contextlib.contextmanager
def warns(warningcls, *, match=''):
'''Like raises but tests that warnings are emitted.
>>> from sympy.testing.pytest import warns
>>> import warnings
>>> with warns(UserWarning):
... warnings.warn('deprecated', UserWarning)
>>> with warns(UserWarning):
... pass
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type UserWarning\
was emitted. The list of emitted warnings is: [].
'''
# Absorbs all warnings in warnrec
with warnings.catch_warnings(record=True) as warnrec:
# Hide all warnings but make sure that our warning is emitted
warnings.simplefilter("ignore")
warnings.filterwarnings("always", match, warningcls)
# Now run the test
yield
# Raise if expected warning not found
if not any(issubclass(w.category, warningcls) for w in warnrec):
msg = ('Failed: DID NOT WARN.'
' No warnings of type %s was emitted.'
' The list of emitted warnings is: %s.'
) % (warningcls, [w.message for w in warnrec])
raise Failed(msg)
def _both_exp_pow(func):
"""
Decorator used to run the test twice: the first time `e^x` is represented
as ``Pow(E, x)``, the second time as ``exp(x)`` (exponential object is not
a power).
This is a temporary trick helping to manage the elimination of the class
``exp`` in favor of a replacement by ``Pow(E, ...)``.
"""
from sympy.core.parameters import _exp_is_pow
def func_wrap():
with _exp_is_pow(True):
func()
with _exp_is_pow(False):
func()
wrapper = functools.update_wrapper(func_wrap, func)
return wrapper
@contextlib.contextmanager
def warns_deprecated_sympy():
'''Shorthand for ``warns(SymPyDeprecationWarning)``
This is the recommended way to test that ``SymPyDeprecationWarning`` is
emitted for deprecated features in SymPy. To test for other warnings use
``warns``. To suppress warnings without asserting that they are emitted
use ``ignore_warnings``.
>>> from sympy.testing.pytest import warns_deprecated_sympy
>>> from sympy.utilities.exceptions import SymPyDeprecationWarning
>>> with warns_deprecated_sympy():
... SymPyDeprecationWarning("Don't use", feature="old thing",
... deprecated_since_version="1.0", issue=123).warn()
>>> with warns_deprecated_sympy():
... pass
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type \
SymPyDeprecationWarning was emitted. The list of emitted warnings is: [].
'''
with warns(SymPyDeprecationWarning):
yield
@contextlib.contextmanager
def ignore_warnings(warningcls):
'''Context manager to suppress warnings during tests.
This function is useful for suppressing warnings during tests. The warns
function should be used to assert that a warning is raised. The
ignore_warnings function is useful in situation when the warning is not
guaranteed to be raised (e.g. on importing a module) or if the warning
comes from third-party code.
When the warning is coming (reliably) from SymPy the warns function should
be preferred to ignore_warnings.
>>> from sympy.testing.pytest import ignore_warnings
>>> import warnings
Here's a warning:
>>> with warnings.catch_warnings(): # reset warnings in doctest
... warnings.simplefilter('error')
... warnings.warn('deprecated', UserWarning)
Traceback (most recent call last):
...
UserWarning: deprecated
Let's suppress it with ignore_warnings:
>>> with warnings.catch_warnings(): # reset warnings in doctest
... warnings.simplefilter('error')
... with ignore_warnings(UserWarning):
... warnings.warn('deprecated', UserWarning)
(No warning emitted)
'''
# Absorbs all warnings in warnrec
with warnings.catch_warnings(record=True) as warnrec:
# Make sure our warning doesn't get filtered
warnings.simplefilter("always", warningcls)
# Now run the test
yield
# Reissue any warnings that we aren't testing for
for w in warnrec:
if not issubclass(w.category, warningcls):
warnings.warn_explicit(w.message, w.category, w.filename, w.lineno)
|
324ca8a52b81ab844e88e3d0aa185085c5dac25159728a8eafcec1a58fbefc61 | import re
import fnmatch
# XXX Python 2 unicode import test.
# May remove after deprecating Python 2.7.
message_unicode_A = \
"File contains a unicode character : %s, line %s. " \
"But with no encoding header. " \
"See https://www.python.org/dev/peps/pep-0263/ " \
"and add '# coding=utf-8'"
message_unicode_B = \
"File contains a unicode character : %s, line %s. " \
"But not in the whitelist. " \
"Add the file to the whitelist in " + __file__
message_unicode_C = \
"File contains a unicode character : %s, line %s. " \
"And is in the whitelist, but without the encoding header. " \
"See https://www.python.org/dev/peps/pep-0263/ " \
"and add '# coding=utf-8'."
message_unicode_D = \
"File does not contain a unicode character : %s." \
"but is in the whitelist. " \
"Remove the file from the whitelist in " + __file__
message_unicode_E = \
"File does not contain a unicode character : %s." \
"but contains the header '# coding=utf-8' or equivalent." \
"Remove the header."
encoding_header_re = re.compile(
r'^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)')
# Whitelist pattern for files which can have unicode.
unicode_whitelist = [
# Author names can include non-ASCII characters
r'*/bin/authors_update.py',
# These files have functions and test functions for unicode input and
# output.
r'*/sympy/testing/tests/test_code_quality.py',
r'*/sympy/physics/vector/tests/test_printing.py',
r'*/physics/quantum/tests/test_printing.py',
r'*/sympy/vector/tests/test_printing.py',
r'*/sympy/parsing/tests/test_sympy_parser.py',
r'*/sympy/printing/pretty/tests/test_pretty.py',
r'*/sympy/printing/tests/test_conventions.py',
r'*/sympy/printing/tests/test_preview.py',
r'*/liealgebras/type_g.py',
r'*/liealgebras/weyl_group.py',
r'*/liealgebras/tests/test_type_G.py',
# wigner.py and polarization.py have unicode doctests. These probably
# don't need to be there but some of the examples that are there are
# pretty ugly without use_unicode (matrices need to be wrapped across
# multiple lines etc)
r'*/sympy/physics/wigner.py',
r'*/sympy/physics/optics/polarization.py',
# joint.py uses some unicode for variable names in the docstrings
r'*/sympy/physics/mechanics/joint.py',
]
unicode_strict_whitelist = [
r'*/sympy/parsing/latex/_antlr/__init__.py',
]
def _test_this_file_encoding(
fname, test_file,
unicode_whitelist=unicode_whitelist,
unicode_strict_whitelist=unicode_strict_whitelist):
"""Test helper function for Python 2 importability test
This test checks whether the file has
# coding=utf-8
or
# -*- coding: utf-8 -*-
line if there is a unicode character in the code
The test may have to operate on filewise manner, so it had moved
to a separate process.
May remove after deprecating Python 2.7.
"""
has_coding_utf8 = False
has_unicode = False
is_in_whitelist = False
is_in_strict_whitelist = False
for patt in unicode_whitelist:
if fnmatch.fnmatch(fname, patt):
is_in_whitelist = True
break
for patt in unicode_strict_whitelist:
if fnmatch.fnmatch(fname, patt):
is_in_strict_whitelist = True
is_in_whitelist = True
break
if is_in_whitelist:
for idx, line in enumerate(test_file):
if idx in (0, 1):
match = encoding_header_re.match(line)
if match and match.group(1).lower() == 'utf-8':
has_coding_utf8 = True
try:
line.encode(encoding='ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
has_unicode = True
if has_coding_utf8 is False:
assert False, \
message_unicode_C % (fname, idx + 1)
if not has_unicode and not is_in_strict_whitelist:
assert False, message_unicode_D % fname
else:
for idx, line in enumerate(test_file):
if idx in (0, 1):
match = encoding_header_re.match(line)
if match and match.group(1).lower() == 'utf-8':
has_coding_utf8 = True
try:
line.encode(encoding='ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
has_unicode = True
if has_coding_utf8:
assert False, \
message_unicode_B % (fname, idx + 1)
else:
assert False, \
message_unicode_A % (fname, idx + 1)
if not has_unicode and has_coding_utf8:
assert False, \
message_unicode_E % fname
|
52231a0aea841871a93ae75cc345418786905fed5170e523b218fbcb3853e526 | """
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
import time
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import shutil
import signal
import stat
import tempfile
import warnings
from contextlib import contextmanager
from inspect import unwrap
from sympy.core.cache import clear_cache
from sympy.external import import_module
from sympy.external.gmpy import GROUND_TYPES, HAS_GMPY
IS_WINDOWS = (os.name == 'nt')
ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None)
# emperically generated list of the proportion of time spent running
# an even split of tests. This should periodically be regenerated.
# A list of [.6, .1, .3] would mean that if the tests are evenly split
# into '1/3', '2/3', '3/3', the first split would take 60% of the time,
# the second 10% and the third 30%. These lists are normalized to sum
# to 1, so [60, 10, 30] has the same behavior as [6, 1, 3] or [.6, .1, .3].
#
# This list can be generated with the code:
# from time import time
# import sympy
# import os
# os.environ["TRAVIS_BUILD_NUMBER"] = '2' # Mock travis to get more correct densities
# delays, num_splits = [], 30
# for i in range(1, num_splits + 1):
# tic = time()
# sympy.test(split='{}/{}'.format(i, num_splits), time_balance=False) # Add slow=True for slow tests
# delays.append(time() - tic)
# tot = sum(delays)
# print([round(x / tot, 4) for x in delays])
SPLIT_DENSITY = [
0.0059, 0.0027, 0.0068, 0.0011, 0.0006,
0.0058, 0.0047, 0.0046, 0.004, 0.0257,
0.0017, 0.0026, 0.004, 0.0032, 0.0016,
0.0015, 0.0004, 0.0011, 0.0016, 0.0014,
0.0077, 0.0137, 0.0217, 0.0074, 0.0043,
0.0067, 0.0236, 0.0004, 0.1189, 0.0142,
0.0234, 0.0003, 0.0003, 0.0047, 0.0006,
0.0013, 0.0004, 0.0008, 0.0007, 0.0006,
0.0139, 0.0013, 0.0007, 0.0051, 0.002,
0.0004, 0.0005, 0.0213, 0.0048, 0.0016,
0.0012, 0.0014, 0.0024, 0.0015, 0.0004,
0.0005, 0.0007, 0.011, 0.0062, 0.0015,
0.0021, 0.0049, 0.0006, 0.0006, 0.0011,
0.0006, 0.0019, 0.003, 0.0044, 0.0054,
0.0057, 0.0049, 0.0016, 0.0006, 0.0009,
0.0006, 0.0012, 0.0006, 0.0149, 0.0532,
0.0076, 0.0041, 0.0024, 0.0135, 0.0081,
0.2209, 0.0459, 0.0438, 0.0488, 0.0137,
0.002, 0.0003, 0.0008, 0.0039, 0.0024,
0.0005, 0.0004, 0.003, 0.056, 0.0026]
SPLIT_DENSITY_SLOW = [0.0086, 0.0004, 0.0568, 0.0003, 0.0032, 0.0005, 0.0004, 0.0013, 0.0016, 0.0648, 0.0198, 0.1285, 0.098, 0.0005, 0.0064, 0.0003, 0.0004, 0.0026, 0.0007, 0.0051, 0.0089, 0.0024, 0.0033, 0.0057, 0.0005, 0.0003, 0.001, 0.0045, 0.0091, 0.0006, 0.0005, 0.0321, 0.0059, 0.1105, 0.216, 0.1489, 0.0004, 0.0003, 0.0006, 0.0483]
class Skipped(Exception):
pass
class TimeOutError(Exception):
pass
class DependencyError(Exception):
pass
# add more flags ??
future_flags = division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent # type: ignore
# override reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if IS_WINDOWS:
DocTestRunner.report_failure = _report_failure # type: ignore
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(os.path.normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root SymPy directory and set the global value
indicating whether the system is case sensitive or not.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return os.path.normcase(sympy_dir)
def setup_pprint():
from sympy.interactive.printing import init_printing
from sympy.printing.pretty.pretty import pprint_use_unicode
import sympy.interactive.printing as interactive_printing
# force pprint to be in ascii mode in doctests
use_unicode_prev = pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
# Prevent init_printing() in doctests from affecting other doctests
interactive_printing.NO_GLOBAL = True
return use_unicode_prev
@contextmanager
def raise_on_deprecated():
"""Context manager to make DeprecationWarning raise an error
This is to catch SymPyDeprecationWarning from library code while running
tests and doctests. It is important to use this context manager around
each individual test/doctest in case some tests modify the warning
filters.
"""
with warnings.catch_warnings():
warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*')
yield
def run_in_subprocess_with_hash_randomization(
function, function_args=(),
function_kwargs=None, command=sys.executable,
module='sympy.testing.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.testing.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.testing.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
cwd = get_sympy_dir()
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't recognize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
function_kwargs = function_kwargs or {}
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring], cwd=cwd)
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs=None,
doctest_args=(), doctest_kwargs=None,
examples_args=(), examples_kwargs=None):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), and the examples (examples/all.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.testing.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
test_kwargs = test_kwargs or {}
doctest_kwargs = doctest_kwargs or {}
examples_kwargs = examples_kwargs or {'quiet': True}
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples") # examples/all.py
from all import run_examples # type: ignore
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, subprocess=True, rerun=0, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
The ``time_balance`` option can be passed in conjunction with ``split``.
If ``time_balance=True`` (the default for ``sympy.test``), SymPy will attempt
to split the tests such that each split takes equal time. This heuristic
for balancing is based on pre-recorded test data.
>>> sympy.test(split='1/2', time_balance=True) # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths,
verbose=False, tb="short", kw=None, pdb=False, colors=True,
force_colors=False, sort=True, seed=None, timeout=False,
fail_on_timeout=False, slow=False, enhance_asserts=False, split=None,
time_balance=True, blacklist=('sympy/integrals/rubi/rubi_tests/tests',),
fast_threshold=None, slow_threshold=None):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
kw = kw or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw,)
post_mortem = pdb
if seed is None:
seed = random.randrange(100000000)
if ON_TRAVIS and timeout is False:
# Travis times out if no activity is seen for 10 minutes.
timeout = 595
fail_on_timeout = True
if ON_TRAVIS:
# pyglet does not work on Travis
blacklist = list(blacklist) + ['sympy/plotting/pygletplot/tests']
blacklist = convert_to_native_paths(blacklist)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed,
fast_threshold=fast_threshold,
slow_threshold=slow_threshold)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
density = None
if time_balance:
if slow:
density = SPLIT_DENSITY_SLOW
else:
density = SPLIT_DENSITY
if split:
matched = split_list(matched, split, density=density)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout, slow=slow,
enhance_asserts=enhance_asserts, fail_on_timeout=fail_on_timeout))
def doctest(*paths, subprocess=True, rerun=0, **kwargs):
r"""
Runs doctests in all \*.py files in the SymPy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _get_doctest_blacklist():
'''Get the default blacklist for the doctests'''
blacklist = []
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"doc/src/modules/physics/mechanics/autolev_parser.rst",
"sympy/galgebra.py", # no longer part of SymPy
"sympy/this.py", # prints text
"sympy/matrices/densearith.py", # raises deprecation warning
"sympy/matrices/densesolve.py", # raises deprecation warning
"sympy/matrices/densetools.py", # raises deprecation warning
"sympy/printing/ccode.py", # backwards compatibility shim, importing it breaks the codegen doctests
"sympy/printing/fcode.py", # backwards compatibility shim, importing it breaks the codegen doctests
"sympy/printing/cxxcode.py", # backwards compatibility shim, importing it breaks the codegen doctests
"sympy/parsing/autolev/_antlr/autolevlexer.py", # generated code
"sympy/parsing/autolev/_antlr/autolevparser.py", # generated code
"sympy/parsing/autolev/_antlr/autolevlistener.py", # generated code
"sympy/parsing/latex/_antlr/latexlexer.py", # generated code
"sympy/parsing/latex/_antlr/latexparser.py", # generated code
"sympy/integrals/rubi/rubi.py",
"sympy/plotting/pygletplot/__init__.py", # crashes on some systems
"sympy/plotting/pygletplot/plot.py", # crashes on some systems
"sympy/codegen/array_utils.py", # raises deprecation warning
])
# autolev parser tests
num = 12
for i in range (1, num+1):
blacklist.append("sympy/parsing/autolev/test-examples/ruletest" + str(i) + ".py")
blacklist.extend(["sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.py",
"sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py",
"sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.py",
"sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.py"])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# Use a non-windowed backend, so that the tests work on Travis
import matplotlib
matplotlib.use('Agg')
if ON_TRAVIS or import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('aesara') is None:
blacklist.extend([
"sympy/printing/aesaracode.py",
"doc/src/modules/numeric-computation.rst",
])
if import_module('cupy') is None:
blacklist.extend([
"doc/src/modules/numeric-computation.rst",
])
if import_module('antlr4') is None:
blacklist.extend([
"sympy/parsing/autolev/__init__.py",
"sympy/parsing/latex/_parse_latex_antlr.py",
])
if import_module('lfortran') is None:
#throws ImportError when lfortran not installed
blacklist.extend([
"sympy/parsing/sym_expr.py",
])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py", # Python 2.7 issues
"sympy/testing/benchmarking.py",
])
# These are deprecated stubs to be removed:
blacklist.extend([
"sympy/utilities/benchmarking.py",
"sympy/utilities/tmpfiles.py",
"sympy/utilities/pytest.py",
"sympy/utilities/runtests.py",
"sympy/utilities/quality_unicode.py",
"sympy/utilities/randtest.py",
])
blacklist = convert_to_native_paths(blacklist)
return blacklist
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
from sympy.printing.pretty.pretty import pprint_use_unicode
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend(_get_doctest_blacklist())
# Use a non-windowed backend, so that the tests work on Travis
if import_module('matplotlib') is not None:
import matplotlib
matplotlib.use('Agg')
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Disable showing up of plots
from sympy.plotting.plot import unset_show
unset_show()
r = PyTestReporter(verbose, split=split, colors=colors,\
force_colors=force_colors)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
use_unicode_prev = setup_pprint()
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
# The NO_GLOBAL flag overrides the no_global flag to init_printing
# if True
import sympy.interactive.printing as interactive_printing
interactive_printing.NO_GLOBAL = False
pprint_use_unicode(use_unicode_prev)
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split, density=None):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
`density` may be specified as a list. If specified,
tests will be balanced so that each split has as equal-as-possible
amount of mass according to `density`.
>>> from sympy.testing.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
if not density:
return l[(i - 1)*len(l)//t : i*len(l)//t]
# normalize density
tot = sum(density)
density = [x / tot for x in density]
def density_inv(x):
"""Interpolate the inverse to the cumulative
distribution function given by density"""
if x <= 0:
return 0
if x >= sum(density):
return 1
# find the first time the cumulative sum surpasses x
# and linearly interpolate
cumm = 0
for i, d in enumerate(density):
cumm += d
if cumm >= x:
break
frac = (d - (cumm - x)) / d
return (i + frac) / len(density)
lower_frac = density_inv((i - 1) / t)
higher_frac = density_inv(i / t)
return l[int(lower_frac*len(l)) : int(higher_frac*len(l))]
from collections import namedtuple
SymPyTestResults = namedtuple('SymPyTestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests:
def __init__(self, reporter, kw="", post_mortem=False,
seed=None, fast_threshold=None, slow_threshold=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
# Defaults in seconds, from human / UX design limits
# http://www.nngroup.com/articles/response-times-3-important-limits/
#
# These defaults are *NOT* set in stone as we are measuring different
# things, so others feel free to come up with a better yardstick :)
if fast_threshold:
self._fast_threshold = float(fast_threshold)
else:
self._fast_threshold = 8
if slow_threshold:
self._slow_threshold = float(slow_threshold)
else:
self._slow_threshold = 10
def test(self, sort=False, timeout=False, slow=False,
enhance_asserts=False, fail_on_timeout=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
elif slow:
pass
else:
random.seed(self._seed)
random.shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow,
enhance_asserts, fail_on_timeout)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False,
enhance_asserts=False, fail_on_timeout=False):
reporter = self._reporter
funcs = []
try:
gl = {'__file__': filename}
try:
open_file = lambda: open(filename, encoding="utf8")
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec", flags=0, dont_inherit=True)
exec(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
reporter.import_error(filename, sys.exc_info())
return
except Exception:
reporter.test_exception(sys.exc_info())
clear_cache()
self._count += 1
random.seed(self._seed)
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# We have to be careful about decorated functions. As long as
# the decorator uses functools.wraps, we can detect it.
funcs = []
for f in gl:
if (f.startswith("test_") and (inspect.isfunction(gl[f])
or inspect.ismethod(gl[f]))):
func = gl[f]
# Handle multiple decorators
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if inspect.getsourcefile(func) == filename:
funcs.append(gl[f])
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if inspect.isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
reporter.entering_filename(filename, len(funcs))
raise
reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
start = time.time()
reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
with raise_on_deprecated():
if timeout:
self._timeout(f, timeout, fail_on_timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
reporter.test_skip(v)
elif t.__name__ == "XFail":
reporter.test_xfail()
elif t.__name__ == "XPass":
reporter.test_xpass(v)
else:
reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
reporter.test_pass()
taken = time.time() - start
if taken > self._slow_threshold:
filename = os.path.relpath(filename, reporter._root_dir)
reporter.slow_test_functions.append(
(filename + "::" + f.__name__, taken))
if getattr(f, '_slow', False) and slow:
if taken < self._fast_threshold:
filename = os.path.relpath(filename, reporter._root_dir)
reporter.fast_test_functions.append(
(filename + "::" + f.__name__, taken))
reporter.leaving_filename()
def _timeout(self, function, timeout, fail_on_timeout):
def callback(x, y):
signal.alarm(0)
if fail_on_timeout:
raise TimeOutError("Timed out after %d seconds" % timeout)
else:
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the SymPy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([os.path.normcase(gi) for gi in g])
class SymPyDocTests:
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from io import StringIO
import sympy.interactive.printing as interactive_printing
from sympy.printing.pretty.pretty import pprint_use_unicode
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
if self._reporter._verbose:
self._reporter.write("\n{} ".format(test.name))
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
try:
self._check_dependencies(**test.globs['_doctest_depends_on'])
except DependencyError as e:
self._reporter.test_skip(v=str(e))
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
old_displayhook = sys.displayhook
use_unicode_prev = setup_pprint()
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
sys.displayhook = old_displayhook
interactive_printing.NO_GLOBAL = False
pprint_use_unicode(use_unicode_prev)
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
r"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [os.path.normcase(gi) for gi in g]
def _check_dependencies(self,
executables=(),
modules=(),
disable_viewers=(),
python_version=(3, 5)):
"""
Checks if the dependencies for the test are installed.
Raises ``DependencyError`` it at least one dependency is not installed.
"""
for executable in executables:
if not shutil.which(executable):
raise DependencyError("Could not find %s" % executable)
for module in modules:
if module == 'matplotlib':
matplotlib = import_module(
'matplotlib',
import_kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is None:
raise DependencyError("Could not import matplotlib")
else:
if not import_module(module):
raise DependencyError("Could not import %s" % module)
if disable_viewers:
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
vw = ('#!/usr/bin/env python3\n'
'import sys\n'
'if len(sys.argv) <= 1:\n'
' exit("wrong number of args")\n')
for viewer in disable_viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if python_version:
if sys.version_info < python_version:
raise DependencyError("Requires Python >= " + '.'.join(map(str, python_version)))
if 'pyglet' in modules:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow:
def __init__(self, *args, **kwargs):
self.has_exit = True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version to look harder for code that
appears comes from a different module. For example, the @vectorize
decorator makes it look like functions come from multidimensional.py
even though their code exists elsewhere.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, str):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, str)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(unwrap(val)) or
inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, str):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall(r"line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, str):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
obj = unwrap(obj)
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
globs['_doctest_depends_on'] = getattr(obj, '_doctest_depends_on', {})
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Fail for deprecation warnings
with raise_on_deprecated():
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
monkeypatched_methods = [
'patched_linecache_getlines',
'run',
'record_outcome'
]
for method in monkeypatched_methods:
oldname = '_DocTestRunner__' + method
newname = '_SymPyDocTestRunner__' + method
setattr(SymPyDocTestRunner, newname, getattr(DocTestRunner, oldname))
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occurring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub(r'(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter:
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
self._active_file = ''
self._active_f = None
# TODO: Should these be protected?
self.slow_test_functions = []
self.fast_test_functions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
stdout = stdout.decode("utf-8")
except OSError:
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
# remove the first item, as that is always runtests.py
tb = tb.tb_next
t = traceback.format_exception(e, val, tb)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from sympy.utilities.misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
numpy = import_module('numpy')
self.write("numpy: %s\n" % (None if not numpy else numpy.__version__))
if seed is not None:
self.write("random seed: %d\n" % seed)
from sympy.utilities.misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if self.slow_test_functions:
self.write_center('slowest tests', '_')
sorted_slow = sorted(self.slow_test_functions, key=lambda r: r[1])
for slow_func_name, taken in sorted_slow:
print('%s - Took %.3f seconds' % (slow_func_name, taken))
if self.fast_test_functions:
self.write_center('unexpectedly fast tests', '_')
sorted_fast = sorted(self.fast_test_functions,
key=lambda r: r[1])
for fast_func_name, taken in sorted_fast:
print('%s - Took %.3f seconds' % (fast_func_name, taken))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
if self._verbose:
if v is not None:
self.write(message + ' ', "Blue")
else:
self.write(" - ", "Blue")
self.write(char, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
if exc_info[0] is TimeOutError:
self.write("T", "Red")
else:
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
|
a9813212a67f3b8a596f5fdcf55d91b24b523a69b76b1e42d1e66e1026668056 | """ Helpers for randomized testing """
from random import uniform, Random, randrange, randint
from sympy.core.containers import Tuple
from sympy.core.function import Derivative
from sympy.core.numbers import comp, I
from sympy.core.symbol import Symbol
from sympy.simplify.simplify import nsimplify
from sympy.utilities.iterables import is_sequence
from sympy.utilities.misc import as_int
def random_complex_number(a=2, b=-1, c=3, d=1, rational=False, tolerance=None):
"""
Return a random complex number.
To reduce chance of hitting branch cuts or anything, we guarantee
b <= Im z <= d, a <= Re z <= c
When rational is True, a rational approximation to a random number
is obtained within specified tolerance, if any.
"""
A, B = uniform(a, c), uniform(b, d)
if not rational:
return A + I*B
return (nsimplify(A, rational=True, tolerance=tolerance) +
I*nsimplify(B, rational=True, tolerance=tolerance))
def verify_numerically(f, g, z=None, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that f and g agree when evaluated in the argument z.
If z is None, all symbols will be tested. This routine does not test
whether there are Floats present with precision higher than 15 digits
so if there are, your results may not be what you expect due to round-
off errors.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x
>>> from sympy.testing.randtest import verify_numerically as tn
>>> tn(sin(x)**2 + cos(x)**2, 1, x)
True
"""
f, g, z = Tuple(f, g, z)
z = [z] if isinstance(z, Symbol) else (f.free_symbols | g.free_symbols)
reps = list(zip(z, [random_complex_number(a, b, c, d) for _ in z]))
z1 = f.subs(reps).n()
z2 = g.subs(reps).n()
return comp(z1, z2, tol)
def test_derivative_numerically(f, z, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that the symbolically computed derivative of f
with respect to z is correct.
This routine does not test whether there are Floats present with
precision higher than 15 digits so if there are, your results may
not be what you expect due to round-off errors.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x
>>> from sympy.testing.randtest import test_derivative_numerically as td
>>> td(sin(x), x)
True
"""
z0 = random_complex_number(a, b, c, d)
f1 = f.diff(z).subs(z, z0)
f2 = Derivative(f, z).doit_numerically(z0)
return comp(f1.n(), f2.n(), tol)
def _randrange(seed=None):
"""Return a randrange generator. ``seed`` can be
o None - return randomly seeded generator
o int - return a generator seeded with the int
o list - the values to be returned will be taken from the list
in the order given; the provided list is not modified.
Examples
========
>>> from sympy.testing.randtest import _randrange
>>> rr = _randrange()
>>> rr(1000) # doctest: +SKIP
999
>>> rr = _randrange(3)
>>> rr(1000) # doctest: +SKIP
238
>>> rr = _randrange([0, 5, 1, 3, 4])
>>> rr(3), rr(3)
(0, 1)
"""
if seed is None:
return randrange
elif isinstance(seed, int):
return Random(seed).randrange
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
def give(a, b=None, seq=seed):
if b is None:
a, b = 0, a
a, b = as_int(a), as_int(b)
w = b - a
if w < 1:
raise ValueError('_randrange got empty range')
try:
x = seq.pop()
except IndexError:
raise ValueError('_randrange sequence was too short')
if a <= x < b:
return x
else:
return give(a, b, seq)
return give
else:
raise ValueError('_randrange got an unexpected seed')
def _randint(seed=None):
"""Return a randint generator. ``seed`` can be
o None - return randomly seeded generator
o int - return a generator seeded with the int
o list - the values to be returned will be taken from the list
in the order given; the provided list is not modified.
Examples
========
>>> from sympy.testing.randtest import _randint
>>> ri = _randint()
>>> ri(1, 1000) # doctest: +SKIP
999
>>> ri = _randint(3)
>>> ri(1, 1000) # doctest: +SKIP
238
>>> ri = _randint([0, 5, 1, 2, 4])
>>> ri(1, 3), ri(1, 3)
(1, 2)
"""
if seed is None:
return randint
elif isinstance(seed, int):
return Random(seed).randint
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
def give(a, b, seq=seed):
a, b = as_int(a), as_int(b)
w = b - a
if w < 0:
raise ValueError('_randint got empty range')
try:
x = seq.pop()
except IndexError:
raise ValueError('_randint sequence was too short')
if a <= x <= b:
return x
else:
return give(a, b, seq)
return give
else:
raise ValueError('_randint got an unexpected seed')
|
12d58d59f3af02ecbfc22a5a9bd480eff9ae292aec3fee3750f4420b3e7ab1bd | from sympy.core.basic import Basic
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.matrices.dense import (eye, rot_axis1, rot_axis2, rot_axis3)
from sympy.matrices.immutable import ImmutableDenseMatrix as Matrix
from sympy.core.cache import cacheit
from sympy.core.symbol import Str
import sympy.vector
class Orienter(Basic):
"""
Super-class for all orienter classes.
"""
def rotation_matrix(self):
"""
The rotation matrix corresponding to this orienter
instance.
"""
return self._parent_orient
class AxisOrienter(Orienter):
"""
Class to denote an axis orienter.
"""
def __new__(cls, angle, axis):
if not isinstance(axis, sympy.vector.Vector):
raise TypeError("axis should be a Vector")
angle = sympify(angle)
obj = super().__new__(cls, angle, axis)
obj._angle = angle
obj._axis = axis
return obj
def __init__(self, angle, axis):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a SymPy expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSys3D('N')
>>> from sympy.vector import AxisOrienter
>>> orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> B = N.orient_new('B', (orienter, ))
"""
# Dummy initializer for docstrings
pass
@cacheit
def rotation_matrix(self, system):
"""
The rotation matrix corresponding to this orienter
instance.
Parameters
==========
system : CoordSys3D
The coordinate system wrt which the rotation matrix
is to be computed
"""
axis = sympy.vector.express(self.axis, system).normalize()
axis = axis.to_matrix(system)
theta = self.angle
parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +
Matrix([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]) * sin(theta) +
axis * axis.T)
parent_orient = parent_orient.T
return parent_orient
@property
def angle(self):
return self._angle
@property
def axis(self):
return self._axis
class ThreeAngleOrienter(Orienter):
"""
Super-class for Body and Space orienters.
"""
def __new__(cls, angle1, angle2, angle3, rot_order):
if isinstance(rot_order, Str):
rot_order = rot_order.name
approved_orders = ('123', '231', '312', '132', '213',
'321', '121', '131', '212', '232',
'313', '323', '')
original_rot_order = rot_order
rot_order = str(rot_order).upper()
if not (len(rot_order) == 3):
raise TypeError('rot_order should be a str of length 3')
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if rot_order not in approved_orders:
raise TypeError('Invalid rot_type parameter')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
angle1 = sympify(angle1)
angle2 = sympify(angle2)
angle3 = sympify(angle3)
if cls._in_order:
parent_orient = (_rot(a1, angle1) *
_rot(a2, angle2) *
_rot(a3, angle3))
else:
parent_orient = (_rot(a3, angle3) *
_rot(a2, angle2) *
_rot(a1, angle1))
parent_orient = parent_orient.T
obj = super().__new__(
cls, angle1, angle2, angle3, Str(rot_order))
obj._angle1 = angle1
obj._angle2 = angle2
obj._angle3 = angle3
obj._rot_order = original_rot_order
obj._parent_orient = parent_orient
return obj
@property
def angle1(self):
return self._angle1
@property
def angle2(self):
return self._angle2
@property
def angle3(self):
return self._angle3
@property
def rot_order(self):
return self._rot_order
class BodyOrienter(ThreeAngleOrienter):
"""
Class to denote a body-orienter.
"""
_in_order = True
def __new__(cls, angle1, angle2, angle3, rot_order):
obj = ThreeAngleOrienter.__new__(cls, angle1, angle2, angle3,
rot_order)
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Body orientation takes this coordinate system through three
successive simple rotations.
Body fixed rotations include both Euler Angles and
Tait-Bryan Angles, see https://en.wikipedia.org/wiki/Euler_angles.
Parameters
==========
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
Examples
========
>>> from sympy.vector import CoordSys3D, BodyOrienter
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
A 'Body' fixed rotation is described by three angles and
three body-fixed rotation axes. To orient a coordinate system D
with respect to N, each sequential rotation is always about
the orthogonal unit vectors fixed to D. For example, a '123'
rotation will specify rotations about N.i, then D.j, then
D.k. (Initially, D.i is same as N.i)
Therefore,
>>> body_orienter = BodyOrienter(q1, q2, q3, '123')
>>> D = N.orient_new('D', (body_orienter, ))
is same as
>>> from sympy.vector import AxisOrienter
>>> axis_orienter1 = AxisOrienter(q1, N.i)
>>> D = N.orient_new('D', (axis_orienter1, ))
>>> axis_orienter2 = AxisOrienter(q2, D.j)
>>> D = D.orient_new('D', (axis_orienter2, ))
>>> axis_orienter3 = AxisOrienter(q3, D.k)
>>> D = D.orient_new('D', (axis_orienter3, ))
Acceptable rotation orders are of length 3, expressed in XYZ or
123, and cannot have a rotation about about an axis twice in a row.
>>> body_orienter1 = BodyOrienter(q1, q2, q3, '123')
>>> body_orienter2 = BodyOrienter(q1, q2, 0, 'ZXZ')
>>> body_orienter3 = BodyOrienter(0, 0, 0, 'XYX')
"""
# Dummy initializer for docstrings
pass
class SpaceOrienter(ThreeAngleOrienter):
"""
Class to denote a space-orienter.
"""
_in_order = False
def __new__(cls, angle1, angle2, angle3, rot_order):
obj = ThreeAngleOrienter.__new__(cls, angle1, angle2, angle3,
rot_order)
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Space rotation is similar to Body rotation, but the rotations
are applied in the opposite order.
Parameters
==========
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
See Also
========
BodyOrienter : Orienter to orient systems wrt Euler angles.
Examples
========
>>> from sympy.vector import CoordSys3D, SpaceOrienter
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
To orient a coordinate system D with respect to N, each
sequential rotation is always about N's orthogonal unit vectors.
For example, a '123' rotation will specify rotations about
N.i, then N.j, then N.k.
Therefore,
>>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
>>> D = N.orient_new('D', (space_orienter, ))
is same as
>>> from sympy.vector import AxisOrienter
>>> axis_orienter1 = AxisOrienter(q1, N.i)
>>> B = N.orient_new('B', (axis_orienter1, ))
>>> axis_orienter2 = AxisOrienter(q2, N.j)
>>> C = B.orient_new('C', (axis_orienter2, ))
>>> axis_orienter3 = AxisOrienter(q3, N.k)
>>> D = C.orient_new('C', (axis_orienter3, ))
"""
# Dummy initializer for docstrings
pass
class QuaternionOrienter(Orienter):
"""
Class to denote a quaternion-orienter.
"""
def __new__(cls, q0, q1, q2, q3):
q0 = sympify(q0)
q1 = sympify(q1)
q2 = sympify(q2)
q3 = sympify(q3)
parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 -
q3 ** 2,
2 * (q1 * q2 - q0 * q3),
2 * (q0 * q2 + q1 * q3)],
[2 * (q1 * q2 + q0 * q3),
q0 ** 2 - q1 ** 2 +
q2 ** 2 - q3 ** 2,
2 * (q2 * q3 - q0 * q1)],
[2 * (q1 * q3 - q0 * q2),
2 * (q0 * q1 + q2 * q3),
q0 ** 2 - q1 ** 2 -
q2 ** 2 + q3 ** 2]]))
parent_orient = parent_orient.T
obj = super().__new__(cls, q0, q1, q2, q3)
obj._q0 = q0
obj._q1 = q1
obj._q2 = q2
obj._q3 = q3
obj._parent_orient = parent_orient
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Quaternion orientation orients the new CoordSys3D with
Quaternions, defined as a finite rotation about lambda, a unit
vector, by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
Parameters
==========
q0, q1, q2, q3 : Expr
The quaternions to rotate the coordinate system by
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSys3D('N')
>>> from sympy.vector import QuaternionOrienter
>>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
>>> B = N.orient_new('B', (q_orienter, ))
"""
# Dummy initializer for docstrings
pass
@property
def q0(self):
return self._q0
@property
def q1(self):
return self._q1
@property
def q2(self):
return self._q2
@property
def q3(self):
return self._q3
def _rot(axis, angle):
"""DCM for simple axis 1, 2 or 3 rotations. """
if axis == 1:
return Matrix(rot_axis1(angle).T)
elif axis == 2:
return Matrix(rot_axis2(angle).T)
elif axis == 3:
return Matrix(rot_axis3(angle).T)
|
9e4cac34d4bfd555c3331742a78c3b920a24a54805a9f7641fda90caafe02a54 | from collections.abc import Callable
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core import S, Dummy, Lambda
from sympy.core.symbol import Str
from sympy.core.symbol import symbols
from sympy.matrices.immutable import ImmutableDenseMatrix as Matrix
from sympy.matrices.matrices import MatrixBase
from sympy.solvers import solve
from sympy.vector.scalar import BaseScalar
from sympy.core.containers import Tuple
from sympy.core.function import diff
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (acos, atan2, cos, sin)
from sympy.matrices.dense import eye
from sympy.matrices.immutable import ImmutableDenseMatrix
from sympy.simplify.simplify import simplify
from sympy.simplify.trigsimp import trigsimp
import sympy.vector
from sympy.vector.orienters import (Orienter, AxisOrienter, BodyOrienter,
SpaceOrienter, QuaternionOrienter)
def CoordSysCartesian(*args, **kwargs):
SymPyDeprecationWarning(
feature="CoordSysCartesian",
useinstead="CoordSys3D",
issue=12865,
deprecated_since_version="1.1"
).warn()
return CoordSys3D(*args, **kwargs)
class CoordSys3D(Basic):
"""
Represents a coordinate system in 3-D space.
"""
def __new__(cls, name, transformation=None, parent=None, location=None,
rotation_matrix=None, vector_names=None, variable_names=None):
"""
The orientation/location parameters are necessary if this system
is being defined at a certain orientation or location wrt another.
Parameters
==========
name : str
The name of the new CoordSys3D instance.
transformation : Lambda, Tuple, str
Transformation defined by transformation equations or chosen
from predefined ones.
location : Vector
The position vector of the new system's origin wrt the parent
instance.
rotation_matrix : SymPy ImmutableMatrix
The rotation matrix of the new coordinate system with respect
to the parent. In other words, the output of
new_system.rotation_matrix(parent).
parent : CoordSys3D
The coordinate system wrt which the orientation/location
(or both) is being defined.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
"""
name = str(name)
Vector = sympy.vector.Vector
Point = sympy.vector.Point
if not isinstance(name, str):
raise TypeError("name should be a string")
if transformation is not None:
if (location is not None) or (rotation_matrix is not None):
raise ValueError("specify either `transformation` or "
"`location`/`rotation_matrix`")
if isinstance(transformation, (Tuple, tuple, list)):
if isinstance(transformation[0], MatrixBase):
rotation_matrix = transformation[0]
location = transformation[1]
else:
transformation = Lambda(transformation[0],
transformation[1])
elif isinstance(transformation, Callable):
x1, x2, x3 = symbols('x1 x2 x3', cls=Dummy)
transformation = Lambda((x1, x2, x3),
transformation(x1, x2, x3))
elif isinstance(transformation, str):
transformation = Str(transformation)
elif isinstance(transformation, (Str, Lambda)):
pass
else:
raise TypeError("transformation: "
"wrong type {}".format(type(transformation)))
# If orientation information has been provided, store
# the rotation matrix accordingly
if rotation_matrix is None:
rotation_matrix = ImmutableDenseMatrix(eye(3))
else:
if not isinstance(rotation_matrix, MatrixBase):
raise TypeError("rotation_matrix should be an Immutable" +
"Matrix instance")
rotation_matrix = rotation_matrix.as_immutable()
# If location information is not given, adjust the default
# location as Vector.zero
if parent is not None:
if not isinstance(parent, CoordSys3D):
raise TypeError("parent should be a " +
"CoordSys3D/None")
if location is None:
location = Vector.zero
else:
if not isinstance(location, Vector):
raise TypeError("location should be a Vector")
# Check that location does not contain base
# scalars
for x in location.free_symbols:
if isinstance(x, BaseScalar):
raise ValueError("location should not contain" +
" BaseScalars")
origin = parent.origin.locate_new(name + '.origin',
location)
else:
location = Vector.zero
origin = Point(name + '.origin')
if transformation is None:
transformation = Tuple(rotation_matrix, location)
if isinstance(transformation, Tuple):
lambda_transformation = CoordSys3D._compose_rotation_and_translation(
transformation[0],
transformation[1],
parent
)
r, l = transformation
l = l._projections
lambda_lame = CoordSys3D._get_lame_coeff('cartesian')
lambda_inverse = lambda x, y, z: r.inv()*Matrix(
[x-l[0], y-l[1], z-l[2]])
elif isinstance(transformation, Str):
trname = transformation.name
lambda_transformation = CoordSys3D._get_transformation_lambdas(trname)
if parent is not None:
if parent.lame_coefficients() != (S.One, S.One, S.One):
raise ValueError('Parent for pre-defined coordinate '
'system should be Cartesian.')
lambda_lame = CoordSys3D._get_lame_coeff(trname)
lambda_inverse = CoordSys3D._set_inv_trans_equations(trname)
elif isinstance(transformation, Lambda):
if not CoordSys3D._check_orthogonality(transformation):
raise ValueError("The transformation equation does not "
"create orthogonal coordinate system")
lambda_transformation = transformation
lambda_lame = CoordSys3D._calculate_lame_coeff(lambda_transformation)
lambda_inverse = None
else:
lambda_transformation = lambda x, y, z: transformation(x, y, z)
lambda_lame = CoordSys3D._get_lame_coeff(transformation)
lambda_inverse = None
if variable_names is None:
if isinstance(transformation, Lambda):
variable_names = ["x1", "x2", "x3"]
elif isinstance(transformation, Str):
if transformation.name == 'spherical':
variable_names = ["r", "theta", "phi"]
elif transformation.name == 'cylindrical':
variable_names = ["r", "theta", "z"]
else:
variable_names = ["x", "y", "z"]
else:
variable_names = ["x", "y", "z"]
if vector_names is None:
vector_names = ["i", "j", "k"]
# All systems that are defined as 'roots' are unequal, unless
# they have the same name.
# Systems defined at same orientation/position wrt the same
# 'parent' are equal, irrespective of the name.
# This is true even if the same orientation is provided via
# different methods like Axis/Body/Space/Quaternion.
# However, coincident systems may be seen as unequal if
# positioned/oriented wrt different parents, even though
# they may actually be 'coincident' wrt the root system.
if parent is not None:
obj = super().__new__(
cls, Str(name), transformation, parent)
else:
obj = super().__new__(
cls, Str(name), transformation)
obj._name = name
# Initialize the base vectors
_check_strings('vector_names', vector_names)
vector_names = list(vector_names)
latex_vects = [(r'\mathbf{\hat{%s}_{%s}}' % (x, name)) for
x in vector_names]
pretty_vects = ['%s_%s' % (x, name) for x in vector_names]
obj._vector_names = vector_names
v1 = BaseVector(0, obj, pretty_vects[0], latex_vects[0])
v2 = BaseVector(1, obj, pretty_vects[1], latex_vects[1])
v3 = BaseVector(2, obj, pretty_vects[2], latex_vects[2])
obj._base_vectors = (v1, v2, v3)
# Initialize the base scalars
_check_strings('variable_names', vector_names)
variable_names = list(variable_names)
latex_scalars = [(r"\mathbf{{%s}_{%s}}" % (x, name)) for
x in variable_names]
pretty_scalars = ['%s_%s' % (x, name) for x in variable_names]
obj._variable_names = variable_names
obj._vector_names = vector_names
x1 = BaseScalar(0, obj, pretty_scalars[0], latex_scalars[0])
x2 = BaseScalar(1, obj, pretty_scalars[1], latex_scalars[1])
x3 = BaseScalar(2, obj, pretty_scalars[2], latex_scalars[2])
obj._base_scalars = (x1, x2, x3)
obj._transformation = transformation
obj._transformation_lambda = lambda_transformation
obj._lame_coefficients = lambda_lame(x1, x2, x3)
obj._transformation_from_parent_lambda = lambda_inverse
setattr(obj, variable_names[0], x1)
setattr(obj, variable_names[1], x2)
setattr(obj, variable_names[2], x3)
setattr(obj, vector_names[0], v1)
setattr(obj, vector_names[1], v2)
setattr(obj, vector_names[2], v3)
# Assign params
obj._parent = parent
if obj._parent is not None:
obj._root = obj._parent._root
else:
obj._root = obj
obj._parent_rotation_matrix = rotation_matrix
obj._origin = origin
# Return the instance
return obj
def _sympystr(self, printer):
return self._name
def __iter__(self):
return iter(self.base_vectors())
@staticmethod
def _check_orthogonality(equations):
"""
Helper method for _connect_to_cartesian. It checks if
set of transformation equations create orthogonal curvilinear
coordinate system
Parameters
==========
equations : Lambda
Lambda of transformation equations
"""
x1, x2, x3 = symbols("x1, x2, x3", cls=Dummy)
equations = equations(x1, x2, x3)
v1 = Matrix([diff(equations[0], x1),
diff(equations[1], x1), diff(equations[2], x1)])
v2 = Matrix([diff(equations[0], x2),
diff(equations[1], x2), diff(equations[2], x2)])
v3 = Matrix([diff(equations[0], x3),
diff(equations[1], x3), diff(equations[2], x3)])
if any(simplify(i[0] + i[1] + i[2]) == 0 for i in (v1, v2, v3)):
return False
else:
if simplify(v1.dot(v2)) == 0 and simplify(v2.dot(v3)) == 0 \
and simplify(v3.dot(v1)) == 0:
return True
else:
return False
@staticmethod
def _set_inv_trans_equations(curv_coord_name):
"""
Store information about inverse transformation equations for
pre-defined coordinate systems.
Parameters
==========
curv_coord_name : str
Name of coordinate system
"""
if curv_coord_name == 'cartesian':
return lambda x, y, z: (x, y, z)
if curv_coord_name == 'spherical':
return lambda x, y, z: (
sqrt(x**2 + y**2 + z**2),
acos(z/sqrt(x**2 + y**2 + z**2)),
atan2(y, x)
)
if curv_coord_name == 'cylindrical':
return lambda x, y, z: (
sqrt(x**2 + y**2),
atan2(y, x),
z
)
raise ValueError('Wrong set of parameters.'
'Type of coordinate system is defined')
def _calculate_inv_trans_equations(self):
"""
Helper method for set_coordinate_type. It calculates inverse
transformation equations for given transformations equations.
"""
x1, x2, x3 = symbols("x1, x2, x3", cls=Dummy, reals=True)
x, y, z = symbols("x, y, z", cls=Dummy)
equations = self._transformation(x1, x2, x3)
solved = solve([equations[0] - x,
equations[1] - y,
equations[2] - z], (x1, x2, x3), dict=True)[0]
solved = solved[x1], solved[x2], solved[x3]
self._transformation_from_parent_lambda = \
lambda x1, x2, x3: tuple(i.subs(list(zip((x, y, z), (x1, x2, x3)))) for i in solved)
@staticmethod
def _get_lame_coeff(curv_coord_name):
"""
Store information about Lame coefficients for pre-defined
coordinate systems.
Parameters
==========
curv_coord_name : str
Name of coordinate system
"""
if isinstance(curv_coord_name, str):
if curv_coord_name == 'cartesian':
return lambda x, y, z: (S.One, S.One, S.One)
if curv_coord_name == 'spherical':
return lambda r, theta, phi: (S.One, r, r*sin(theta))
if curv_coord_name == 'cylindrical':
return lambda r, theta, h: (S.One, r, S.One)
raise ValueError('Wrong set of parameters.'
' Type of coordinate system is not defined')
return CoordSys3D._calculate_lame_coefficients(curv_coord_name)
@staticmethod
def _calculate_lame_coeff(equations):
"""
It calculates Lame coefficients
for given transformations equations.
Parameters
==========
equations : Lambda
Lambda of transformation equations.
"""
return lambda x1, x2, x3: (
sqrt(diff(equations(x1, x2, x3)[0], x1)**2 +
diff(equations(x1, x2, x3)[1], x1)**2 +
diff(equations(x1, x2, x3)[2], x1)**2),
sqrt(diff(equations(x1, x2, x3)[0], x2)**2 +
diff(equations(x1, x2, x3)[1], x2)**2 +
diff(equations(x1, x2, x3)[2], x2)**2),
sqrt(diff(equations(x1, x2, x3)[0], x3)**2 +
diff(equations(x1, x2, x3)[1], x3)**2 +
diff(equations(x1, x2, x3)[2], x3)**2)
)
def _inverse_rotation_matrix(self):
"""
Returns inverse rotation matrix.
"""
return simplify(self._parent_rotation_matrix**-1)
@staticmethod
def _get_transformation_lambdas(curv_coord_name):
"""
Store information about transformation equations for pre-defined
coordinate systems.
Parameters
==========
curv_coord_name : str
Name of coordinate system
"""
if isinstance(curv_coord_name, str):
if curv_coord_name == 'cartesian':
return lambda x, y, z: (x, y, z)
if curv_coord_name == 'spherical':
return lambda r, theta, phi: (
r*sin(theta)*cos(phi),
r*sin(theta)*sin(phi),
r*cos(theta)
)
if curv_coord_name == 'cylindrical':
return lambda r, theta, h: (
r*cos(theta),
r*sin(theta),
h
)
raise ValueError('Wrong set of parameters.'
'Type of coordinate system is defined')
@classmethod
def _rotation_trans_equations(cls, matrix, equations):
"""
Returns the transformation equations obtained from rotation matrix.
Parameters
==========
matrix : Matrix
Rotation matrix
equations : tuple
Transformation equations
"""
return tuple(matrix * Matrix(equations))
@property
def origin(self):
return self._origin
@property
def delop(self):
SymPyDeprecationWarning(
feature="coord_system.delop has been replaced.",
useinstead="Use the Del() class",
deprecated_since_version="1.1",
issue=12866,
).warn()
from sympy.vector.deloperator import Del
return Del()
def base_vectors(self):
return self._base_vectors
def base_scalars(self):
return self._base_scalars
def lame_coefficients(self):
return self._lame_coefficients
def transformation_to_parent(self):
return self._transformation_lambda(*self.base_scalars())
def transformation_from_parent(self):
if self._parent is None:
raise ValueError("no parent coordinate system, use "
"`transformation_from_parent_function()`")
return self._transformation_from_parent_lambda(
*self._parent.base_scalars())
def transformation_from_parent_function(self):
return self._transformation_from_parent_lambda
def rotation_matrix(self, other):
"""
Returns the direction cosine matrix(DCM), also known as the
'rotation matrix' of this coordinate system with respect to
another system.
If v_a is a vector defined in system 'A' (in matrix format)
and v_b is the same vector defined in system 'B', then
v_a = A.rotation_matrix(B) * v_b.
A SymPy Matrix is returned.
Parameters
==========
other : CoordSys3D
The system which the DCM is generated to.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSys3D('N')
>>> A = N.orient_new_axis('A', q1, N.i)
>>> N.rotation_matrix(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
"""
from sympy.vector.functions import _path
if not isinstance(other, CoordSys3D):
raise TypeError(str(other) +
" is not a CoordSys3D")
# Handle special cases
if other == self:
return eye(3)
elif other == self._parent:
return self._parent_rotation_matrix
elif other._parent == self:
return other._parent_rotation_matrix.T
# Else, use tree to calculate position
rootindex, path = _path(self, other)
result = eye(3)
i = -1
for i in range(rootindex):
result *= path[i]._parent_rotation_matrix
i += 2
while i < len(path):
result *= path[i]._parent_rotation_matrix.T
i += 1
return result
@cacheit
def position_wrt(self, other):
"""
Returns the position vector of the origin of this coordinate
system with respect to another Point/CoordSys3D.
Parameters
==========
other : Point/CoordSys3D
If other is a Point, the position of this system's origin
wrt it is returned. If its an instance of CoordSyRect,
the position wrt its origin is returned.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> N1 = N.locate_new('N1', 10 * N.i)
>>> N.position_wrt(N1)
(-10)*N.i
"""
return self.origin.position_wrt(other)
def scalar_map(self, other):
"""
Returns a dictionary which expresses the coordinate variables
(base scalars) of this frame in terms of the variables of
otherframe.
Parameters
==========
otherframe : CoordSys3D
The other system to map the variables to.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import Symbol
>>> A = CoordSys3D('A')
>>> q = Symbol('q')
>>> B = A.orient_new_axis('B', q, A.k)
>>> A.scalar_map(B)
{A.x: B.x*cos(q) - B.y*sin(q), A.y: B.x*sin(q) + B.y*cos(q), A.z: B.z}
"""
relocated_scalars = []
origin_coords = tuple(self.position_wrt(other).to_matrix(other))
for i, x in enumerate(other.base_scalars()):
relocated_scalars.append(x - origin_coords[i])
vars_matrix = (self.rotation_matrix(other) *
Matrix(relocated_scalars))
mapping = {}
for i, x in enumerate(self.base_scalars()):
mapping[x] = trigsimp(vars_matrix[i])
return mapping
def locate_new(self, name, position, vector_names=None,
variable_names=None):
"""
Returns a CoordSys3D with its origin located at the given
position wrt this coordinate system's origin.
Parameters
==========
name : str
The name of the new CoordSys3D instance.
position : Vector
The position vector of the new system's origin wrt this
one.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> A = CoordSys3D('A')
>>> B = A.locate_new('B', 10 * A.i)
>>> B.origin.position_wrt(A.origin)
10*A.i
"""
if variable_names is None:
variable_names = self._variable_names
if vector_names is None:
vector_names = self._vector_names
return CoordSys3D(name, location=position,
vector_names=vector_names,
variable_names=variable_names,
parent=self)
def orient_new(self, name, orienters, location=None,
vector_names=None, variable_names=None):
"""
Creates a new CoordSys3D oriented in the user-specified way
with respect to this system.
Please refer to the documentation of the orienter classes
for more information about the orientation procedure.
Parameters
==========
name : str
The name of the new CoordSys3D instance.
orienters : iterable/Orienter
An Orienter or an iterable of Orienters for orienting the
new coordinate system.
If an Orienter is provided, it is applied to get the new
system.
If an iterable is provided, the orienters will be applied
in the order in which they appear in the iterable.
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSys3D('N')
Using an AxisOrienter
>>> from sympy.vector import AxisOrienter
>>> axis_orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> A = N.orient_new('A', (axis_orienter, ))
Using a BodyOrienter
>>> from sympy.vector import BodyOrienter
>>> body_orienter = BodyOrienter(q1, q2, q3, '123')
>>> B = N.orient_new('B', (body_orienter, ))
Using a SpaceOrienter
>>> from sympy.vector import SpaceOrienter
>>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
>>> C = N.orient_new('C', (space_orienter, ))
Using a QuaternionOrienter
>>> from sympy.vector import QuaternionOrienter
>>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
>>> D = N.orient_new('D', (q_orienter, ))
"""
if variable_names is None:
variable_names = self._variable_names
if vector_names is None:
vector_names = self._vector_names
if isinstance(orienters, Orienter):
if isinstance(orienters, AxisOrienter):
final_matrix = orienters.rotation_matrix(self)
else:
final_matrix = orienters.rotation_matrix()
# TODO: trigsimp is needed here so that the matrix becomes
# canonical (scalar_map also calls trigsimp; without this, you can
# end up with the same CoordinateSystem that compares differently
# due to a differently formatted matrix). However, this is
# probably not so good for performance.
final_matrix = trigsimp(final_matrix)
else:
final_matrix = Matrix(eye(3))
for orienter in orienters:
if isinstance(orienter, AxisOrienter):
final_matrix *= orienter.rotation_matrix(self)
else:
final_matrix *= orienter.rotation_matrix()
return CoordSys3D(name, rotation_matrix=final_matrix,
vector_names=vector_names,
variable_names=variable_names,
location=location,
parent=self)
def orient_new_axis(self, name, angle, axis, location=None,
vector_names=None, variable_names=None):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a SymPy expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
name : string
The name of the new coordinate system
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSys3D('N')
>>> B = N.orient_new_axis('B', q1, N.i + 2 * N.j)
"""
if variable_names is None:
variable_names = self._variable_names
if vector_names is None:
vector_names = self._vector_names
orienter = AxisOrienter(angle, axis)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_body(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Body orientation takes this coordinate system through three
successive simple rotations.
Body fixed rotations include both Euler Angles and
Tait-Bryan Angles, see https://en.wikipedia.org/wiki/Euler_angles.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
A 'Body' fixed rotation is described by three angles and
three body-fixed rotation axes. To orient a coordinate system D
with respect to N, each sequential rotation is always about
the orthogonal unit vectors fixed to D. For example, a '123'
rotation will specify rotations about N.i, then D.j, then
D.k. (Initially, D.i is same as N.i)
Therefore,
>>> D = N.orient_new_body('D', q1, q2, q3, '123')
is same as
>>> D = N.orient_new_axis('D', q1, N.i)
>>> D = D.orient_new_axis('D', q2, D.j)
>>> D = D.orient_new_axis('D', q3, D.k)
Acceptable rotation orders are of length 3, expressed in XYZ or
123, and cannot have a rotation about about an axis twice in a row.
>>> B = N.orient_new_body('B', q1, q2, q3, '123')
>>> B = N.orient_new_body('B', q1, q2, 0, 'ZXZ')
>>> B = N.orient_new_body('B', 0, 0, 0, 'XYX')
"""
orienter = BodyOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_space(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Space rotation is similar to Body rotation, but the rotations
are applied in the opposite order.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
See Also
========
CoordSys3D.orient_new_body : method to orient via Euler
angles
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
To orient a coordinate system D with respect to N, each
sequential rotation is always about N's orthogonal unit vectors.
For example, a '123' rotation will specify rotations about
N.i, then N.j, then N.k.
Therefore,
>>> D = N.orient_new_space('D', q1, q2, q3, '312')
is same as
>>> B = N.orient_new_axis('B', q1, N.i)
>>> C = B.orient_new_axis('C', q2, N.j)
>>> D = C.orient_new_axis('D', q3, N.k)
"""
orienter = SpaceOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_quaternion(self, name, q0, q1, q2, q3, location=None,
vector_names=None, variable_names=None):
"""
Quaternion orientation orients the new CoordSys3D with
Quaternions, defined as a finite rotation about lambda, a unit
vector, by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
Parameters
==========
name : string
The name of the new coordinate system
q0, q1, q2, q3 : Expr
The quaternions to rotate the coordinate system by
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSys3D('N')
>>> B = N.orient_new_quaternion('B', q0, q1, q2, q3)
"""
orienter = QuaternionOrienter(q0, q1, q2, q3)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def create_new(self, name, transformation, variable_names=None, vector_names=None):
"""
Returns a CoordSys3D which is connected to self by transformation.
Parameters
==========
name : str
The name of the new CoordSys3D instance.
transformation : Lambda, Tuple, str
Transformation defined by transformation equations or chosen
from predefined ones.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> a = CoordSys3D('a')
>>> b = a.create_new('b', transformation='spherical')
>>> b.transformation_to_parent()
(b.r*sin(b.theta)*cos(b.phi), b.r*sin(b.phi)*sin(b.theta), b.r*cos(b.theta))
>>> b.transformation_from_parent()
(sqrt(a.x**2 + a.y**2 + a.z**2), acos(a.z/sqrt(a.x**2 + a.y**2 + a.z**2)), atan2(a.y, a.x))
"""
return CoordSys3D(name, parent=self, transformation=transformation,
variable_names=variable_names, vector_names=vector_names)
def __init__(self, name, location=None, rotation_matrix=None,
parent=None, vector_names=None, variable_names=None,
latex_vects=None, pretty_vects=None, latex_scalars=None,
pretty_scalars=None, transformation=None):
# Dummy initializer for setting docstring
pass
__init__.__doc__ = __new__.__doc__
@staticmethod
def _compose_rotation_and_translation(rot, translation, parent):
r = lambda x, y, z: CoordSys3D._rotation_trans_equations(rot, (x, y, z))
if parent is None:
return r
dx, dy, dz = [translation.dot(i) for i in parent.base_vectors()]
t = lambda x, y, z: (
x + dx,
y + dy,
z + dz,
)
return lambda x, y, z: t(*r(x, y, z))
def _check_strings(arg_name, arg):
errorstr = arg_name + " must be an iterable of 3 string-types"
if len(arg) != 3:
raise ValueError(errorstr)
for s in arg:
if not isinstance(s, str):
raise TypeError(errorstr)
# Delayed import to avoid cyclic import problems:
from sympy.vector.vector import BaseVector
|
3a9b30de978e06261de9127a2020ecf95ec41a2b2a392324576bf5d210d15bab | from typing import Any, Dict as tDict
from sympy.simplify import simplify as simp, trigsimp as tsimp
from sympy.core.decorators import call_highest_priority, _sympifyit
from sympy.core.assumptions import StdFactKB
from sympy.core.function import diff as df
from sympy.integrals.integrals import Integral
from sympy.polys.polytools import factor as fctr
from sympy.core import S, Add, Mul
from sympy.core.expr import Expr
class BasisDependent(Expr):
"""
Super class containing functionality common to vectors and
dyadics.
Named so because the representation of these quantities in
sympy.vector is dependent on the basis they are expressed in.
"""
@call_highest_priority('__radd__')
def __add__(self, other):
return self._add_func(self, other)
@call_highest_priority('__add__')
def __radd__(self, other):
return self._add_func(other, self)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return self._add_func(self, -other)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return self._add_func(other, -self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return self._mul_func(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return self._mul_func(other, self)
def __neg__(self):
return self._mul_func(S.NegativeOne, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self._div_helper(other)
@call_highest_priority('__truediv__')
def __rtruediv__(self, other):
return TypeError("Invalid divisor for division")
def evalf(self, n=15, subs=None, maxn=100, chop=False, strict=False, quad=None, verbose=False):
"""
Implements the SymPy evalf routine for this quantity.
evalf's documentation
=====================
"""
options = {'subs':subs, 'maxn':maxn, 'chop':chop, 'strict':strict,
'quad':quad, 'verbose':verbose}
vec = self.zero
for k, v in self.components.items():
vec += v.evalf(n, **options) * k
return vec
evalf.__doc__ += Expr.evalf.__doc__ # type: ignore
n = evalf
def simplify(self, **kwargs):
"""
Implements the SymPy simplify routine for this quantity.
simplify's documentation
========================
"""
simp_components = [simp(v, **kwargs) * k for
k, v in self.components.items()]
return self._add_func(*simp_components)
simplify.__doc__ += simp.__doc__ # type: ignore
def trigsimp(self, **opts):
"""
Implements the SymPy trigsimp routine, for this quantity.
trigsimp's documentation
========================
"""
trig_components = [tsimp(v, **opts) * k for
k, v in self.components.items()]
return self._add_func(*trig_components)
trigsimp.__doc__ += tsimp.__doc__ # type: ignore
def _eval_simplify(self, **kwargs):
return self.simplify(**kwargs)
def _eval_trigsimp(self, **opts):
return self.trigsimp(**opts)
def _eval_derivative(self, wrt):
return self.diff(wrt)
def _eval_Integral(self, *symbols, **assumptions):
integral_components = [Integral(v, *symbols, **assumptions) * k
for k, v in self.components.items()]
return self._add_func(*integral_components)
def as_numer_denom(self):
"""
Returns the expression as a tuple wrt the following
transformation -
expression -> a/b -> a, b
"""
return self, S.One
def factor(self, *args, **kwargs):
"""
Implements the SymPy factor routine, on the scalar parts
of a basis-dependent expression.
factor's documentation
========================
"""
fctr_components = [fctr(v, *args, **kwargs) * k for
k, v in self.components.items()]
return self._add_func(*fctr_components)
factor.__doc__ += fctr.__doc__ # type: ignore
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return (S.One, self)
def as_coeff_add(self, *deps):
"""Efficiently extract the coefficient of a summation. """
l = [x * self.components[x] for x in self.components]
return 0, tuple(l)
def diff(self, *args, **kwargs):
"""
Implements the SymPy diff routine, for vectors.
diff's documentation
========================
"""
for x in args:
if isinstance(x, BasisDependent):
raise TypeError("Invalid arg for differentiation")
diff_components = [df(v, *args, **kwargs) * k for
k, v in self.components.items()]
return self._add_func(*diff_components)
diff.__doc__ += df.__doc__ # type: ignore
def doit(self, **hints):
"""Calls .doit() on each term in the Dyadic"""
doit_components = [self.components[x].doit(**hints) * x
for x in self.components]
return self._add_func(*doit_components)
class BasisDependentAdd(BasisDependent, Add):
"""
Denotes sum of basis dependent quantities such that they cannot
be expressed as base or Mul instances.
"""
def __new__(cls, *args, **options):
components = {}
# Check each arg and simultaneously learn the components
for i, arg in enumerate(args):
if not isinstance(arg, cls._expr_type):
if isinstance(arg, Mul):
arg = cls._mul_func(*(arg.args))
elif isinstance(arg, Add):
arg = cls._add_func(*(arg.args))
else:
raise TypeError(str(arg) +
" cannot be interpreted correctly")
# If argument is zero, ignore
if arg == cls.zero:
continue
# Else, update components accordingly
if hasattr(arg, "components"):
for x in arg.components:
components[x] = components.get(x, 0) + arg.components[x]
temp = list(components.keys())
for x in temp:
if components[x] == 0:
del components[x]
# Handle case of zero vector
if len(components) == 0:
return cls.zero
# Build object
newargs = [x * components[x] for x in components]
obj = super().__new__(cls, *newargs, **options)
if isinstance(obj, Mul):
return cls._mul_func(*obj.args)
assumptions = {'commutative': True}
obj._assumptions = StdFactKB(assumptions)
obj._components = components
obj._sys = (list(components.keys()))[0]._sys
return obj
class BasisDependentMul(BasisDependent, Mul):
"""
Denotes product of base- basis dependent quantity with a scalar.
"""
def __new__(cls, *args, **options):
from sympy.vector import Cross, Dot, Curl, Gradient
count = 0
measure_number = S.One
zeroflag = False
extra_args = []
# Determine the component and check arguments
# Also keep a count to ensure two vectors aren't
# being multiplied
for arg in args:
if isinstance(arg, cls._zero_func):
count += 1
zeroflag = True
elif arg == S.Zero:
zeroflag = True
elif isinstance(arg, (cls._base_func, cls._mul_func)):
count += 1
expr = arg._base_instance
measure_number *= arg._measure_number
elif isinstance(arg, cls._add_func):
count += 1
expr = arg
elif isinstance(arg, (Cross, Dot, Curl, Gradient)):
extra_args.append(arg)
else:
measure_number *= arg
# Make sure incompatible types weren't multiplied
if count > 1:
raise ValueError("Invalid multiplication")
elif count == 0:
return Mul(*args, **options)
# Handle zero vector case
if zeroflag:
return cls.zero
# If one of the args was a VectorAdd, return an
# appropriate VectorAdd instance
if isinstance(expr, cls._add_func):
newargs = [cls._mul_func(measure_number, x) for
x in expr.args]
return cls._add_func(*newargs)
obj = super().__new__(cls, measure_number,
expr._base_instance,
*extra_args,
**options)
if isinstance(obj, Add):
return cls._add_func(*obj.args)
obj._base_instance = expr._base_instance
obj._measure_number = measure_number
assumptions = {'commutative': True}
obj._assumptions = StdFactKB(assumptions)
obj._components = {expr._base_instance: measure_number}
obj._sys = expr._base_instance._sys
return obj
def _sympystr(self, printer):
measure_str = printer._print(self._measure_number)
if ('(' in measure_str or '-' in measure_str or
'+' in measure_str):
measure_str = '(' + measure_str + ')'
return measure_str + '*' + printer._print(self._base_instance)
class BasisDependentZero(BasisDependent):
"""
Class to denote a zero basis dependent instance.
"""
# XXX: Can't type the keys as BaseVector because of cyclic import
# problems.
components = {} # type: tDict[Any, Expr]
def __new__(cls):
obj = super().__new__(cls)
# Pre-compute a specific hash value for the zero vector
# Use the same one always
obj._hash = tuple([S.Zero, cls]).__hash__()
return obj
def __hash__(self):
return self._hash
@call_highest_priority('__req__')
def __eq__(self, other):
return isinstance(other, self._zero_func)
__req__ = __eq__
@call_highest_priority('__radd__')
def __add__(self, other):
if isinstance(other, self._expr_type):
return other
else:
raise TypeError("Invalid argument types for addition")
@call_highest_priority('__add__')
def __radd__(self, other):
if isinstance(other, self._expr_type):
return other
else:
raise TypeError("Invalid argument types for addition")
@call_highest_priority('__rsub__')
def __sub__(self, other):
if isinstance(other, self._expr_type):
return -other
else:
raise TypeError("Invalid argument types for subtraction")
@call_highest_priority('__sub__')
def __rsub__(self, other):
if isinstance(other, self._expr_type):
return other
else:
raise TypeError("Invalid argument types for subtraction")
def __neg__(self):
return self
def normalize(self):
"""
Returns the normalized version of this vector.
"""
return self
def _sympystr(self, printer):
return '0'
|
e748206134f28c95bc40a58f0f29bef15507253fe52a87aef3bd3479a31eaf00 | from typing import Type
from sympy.core.add import Add
from sympy.core.assumptions import StdFactKB
from sympy.core.expr import AtomicExpr, Expr
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.sorting import default_sort_key
from sympy.core.sympify import sympify
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.matrices.immutable import ImmutableDenseMatrix as Matrix
from sympy.vector.basisdependent import (BasisDependentZero,
BasisDependent, BasisDependentMul, BasisDependentAdd)
from sympy.vector.coordsysrect import CoordSys3D
from sympy.vector.dyadic import Dyadic, BaseDyadic, DyadicAdd
class Vector(BasisDependent):
"""
Super class for all Vector classes.
Ideally, neither this class nor any of its subclasses should be
instantiated by the user.
"""
is_Vector = True
_op_priority = 12.0
_expr_type = None # type: Type[Vector]
_mul_func = None # type: Type[Vector]
_add_func = None # type: Type[Vector]
_zero_func = None # type: Type[Vector]
_base_func = None # type: Type[Vector]
zero = None # type: VectorZero
@property
def components(self):
"""
Returns the components of this vector in the form of a
Python dictionary mapping BaseVector instances to the
corresponding measure numbers.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> C = CoordSys3D('C')
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v.components
{C.i: 3, C.j: 4, C.k: 5}
"""
# The '_components' attribute is defined according to the
# subclass of Vector the instance belongs to.
return self._components
def magnitude(self):
"""
Returns the magnitude of this vector.
"""
return sqrt(self & self)
def normalize(self):
"""
Returns the normalized version of this vector.
"""
return self / self.magnitude()
def dot(self, other):
"""
Returns the dot product of this Vector, either with another
Vector, or a Dyadic, or a Del operator.
If 'other' is a Vector, returns the dot product scalar (SymPy
expression).
If 'other' is a Dyadic, the dot product is returned as a Vector.
If 'other' is an instance of Del, returns the directional
derivative operator as a Python function. If this function is
applied to a scalar expression, it returns the directional
derivative of the scalar field wrt this Vector.
Parameters
==========
other: Vector/Dyadic/Del
The Vector or Dyadic we are dotting with, or a Del operator .
Examples
========
>>> from sympy.vector import CoordSys3D, Del
>>> C = CoordSys3D('C')
>>> delop = Del()
>>> C.i.dot(C.j)
0
>>> C.i & C.i
1
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v.dot(C.k)
5
>>> (C.i & delop)(C.x*C.y*C.z)
C.y*C.z
>>> d = C.i.outer(C.i)
>>> C.i.dot(d)
C.i
"""
# Check special cases
if isinstance(other, Dyadic):
if isinstance(self, VectorZero):
return Vector.zero
outvec = Vector.zero
for k, v in other.components.items():
vect_dot = k.args[0].dot(self)
outvec += vect_dot * v * k.args[1]
return outvec
from sympy.vector.deloperator import Del
if not isinstance(other, Vector) and not isinstance(other, Del):
raise TypeError(str(other) + " is not a vector, dyadic or " +
"del operator")
# Check if the other is a del operator
if isinstance(other, Del):
def directional_derivative(field):
from sympy.vector.functions import directional_derivative
return directional_derivative(field, self)
return directional_derivative
return dot(self, other)
def __and__(self, other):
return self.dot(other)
__and__.__doc__ = dot.__doc__
def cross(self, other):
"""
Returns the cross product of this Vector with another Vector or
Dyadic instance.
The cross product is a Vector, if 'other' is a Vector. If 'other'
is a Dyadic, this returns a Dyadic instance.
Parameters
==========
other: Vector/Dyadic
The Vector or Dyadic we are crossing with.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> C = CoordSys3D('C')
>>> C.i.cross(C.j)
C.k
>>> C.i ^ C.i
0
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v ^ C.i
5*C.j + (-4)*C.k
>>> d = C.i.outer(C.i)
>>> C.j.cross(d)
(-1)*(C.k|C.i)
"""
# Check special cases
if isinstance(other, Dyadic):
if isinstance(self, VectorZero):
return Dyadic.zero
outdyad = Dyadic.zero
for k, v in other.components.items():
cross_product = self.cross(k.args[0])
outer = cross_product.outer(k.args[1])
outdyad += v * outer
return outdyad
return cross(self, other)
def __xor__(self, other):
return self.cross(other)
__xor__.__doc__ = cross.__doc__
def outer(self, other):
"""
Returns the outer product of this vector with another, in the
form of a Dyadic instance.
Parameters
==========
other : Vector
The Vector with respect to which the outer product is to
be computed.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> N.i.outer(N.j)
(N.i|N.j)
"""
# Handle the special cases
if not isinstance(other, Vector):
raise TypeError("Invalid operand for outer product")
elif (isinstance(self, VectorZero) or
isinstance(other, VectorZero)):
return Dyadic.zero
# Iterate over components of both the vectors to generate
# the required Dyadic instance
args = []
for k1, v1 in self.components.items():
for k2, v2 in other.components.items():
args.append((v1 * v2) * BaseDyadic(k1, k2))
return DyadicAdd(*args)
def projection(self, other, scalar=False):
"""
Returns the vector or scalar projection of the 'other' on 'self'.
Examples
========
>>> from sympy.vector.coordsysrect import CoordSys3D
>>> C = CoordSys3D('C')
>>> i, j, k = C.base_vectors()
>>> v1 = i + j + k
>>> v2 = 3*i + 4*j
>>> v1.projection(v2)
7/3*C.i + 7/3*C.j + 7/3*C.k
>>> v1.projection(v2, scalar=True)
7/3
"""
if self.equals(Vector.zero):
return S.Zero if scalar else Vector.zero
if scalar:
return self.dot(other) / self.dot(self)
else:
return self.dot(other) / self.dot(self) * self
@property
def _projections(self):
"""
Returns the components of this vector but the output includes
also zero values components.
Examples
========
>>> from sympy.vector import CoordSys3D, Vector
>>> C = CoordSys3D('C')
>>> v1 = 3*C.i + 4*C.j + 5*C.k
>>> v1._projections
(3, 4, 5)
>>> v2 = C.x*C.y*C.z*C.i
>>> v2._projections
(C.x*C.y*C.z, 0, 0)
>>> v3 = Vector.zero
>>> v3._projections
(0, 0, 0)
"""
from sympy.vector.operators import _get_coord_sys_from_expr
if isinstance(self, VectorZero):
return (S.Zero, S.Zero, S.Zero)
base_vec = next(iter(_get_coord_sys_from_expr(self))).base_vectors()
return tuple([self.dot(i) for i in base_vec])
def __or__(self, other):
return self.outer(other)
__or__.__doc__ = outer.__doc__
def to_matrix(self, system):
"""
Returns the matrix form of this vector with respect to the
specified coordinate system.
Parameters
==========
system : CoordSys3D
The system wrt which the matrix form is to be computed
Examples
========
>>> from sympy.vector import CoordSys3D
>>> C = CoordSys3D('C')
>>> from sympy.abc import a, b, c
>>> v = a*C.i + b*C.j + c*C.k
>>> v.to_matrix(C)
Matrix([
[a],
[b],
[c]])
"""
return Matrix([self.dot(unit_vec) for unit_vec in
system.base_vectors()])
def separate(self):
"""
The constituents of this vector in different coordinate systems,
as per its definition.
Returns a dict mapping each CoordSys3D to the corresponding
constituent Vector.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> R1 = CoordSys3D('R1')
>>> R2 = CoordSys3D('R2')
>>> v = R1.i + R2.i
>>> v.separate() == {R1: R1.i, R2: R2.i}
True
"""
parts = {}
for vect, measure in self.components.items():
parts[vect.system] = (parts.get(vect.system, Vector.zero) +
vect * measure)
return parts
def _div_helper(one, other):
""" Helper for division involving vectors. """
if isinstance(one, Vector) and isinstance(other, Vector):
raise TypeError("Cannot divide two vectors")
elif isinstance(one, Vector):
if other == S.Zero:
raise ValueError("Cannot divide a vector by zero")
return VectorMul(one, Pow(other, S.NegativeOne))
else:
raise TypeError("Invalid division involving a vector")
class BaseVector(Vector, AtomicExpr):
"""
Class to denote a base vector.
Unicode pretty forms in Python 2 should use the prefix ``u``.
"""
def __new__(cls, index, system, pretty_str=None, latex_str=None):
if pretty_str is None:
pretty_str = "x{}".format(index)
if latex_str is None:
latex_str = "x_{}".format(index)
pretty_str = str(pretty_str)
latex_str = str(latex_str)
# Verify arguments
if index not in range(0, 3):
raise ValueError("index must be 0, 1 or 2")
if not isinstance(system, CoordSys3D):
raise TypeError("system should be a CoordSys3D")
name = system._vector_names[index]
# Initialize an object
obj = super().__new__(cls, S(index), system)
# Assign important attributes
obj._base_instance = obj
obj._components = {obj: S.One}
obj._measure_number = S.One
obj._name = system._name + '.' + name
obj._pretty_form = '' + pretty_str
obj._latex_form = latex_str
obj._system = system
# The _id is used for printing purposes
obj._id = (index, system)
assumptions = {'commutative': True}
obj._assumptions = StdFactKB(assumptions)
# This attr is used for re-expression to one of the systems
# involved in the definition of the Vector. Applies to
# VectorMul and VectorAdd too.
obj._sys = system
return obj
@property
def system(self):
return self._system
def _sympystr(self, printer):
return self._name
@property
def free_symbols(self):
return {self}
class VectorAdd(BasisDependentAdd, Vector):
"""
Class to denote sum of Vector instances.
"""
def __new__(cls, *args, **options):
obj = BasisDependentAdd.__new__(cls, *args, **options)
return obj
def _sympystr(self, printer):
ret_str = ''
items = list(self.separate().items())
items.sort(key=lambda x: x[0].__str__())
for system, vect in items:
base_vects = system.base_vectors()
for x in base_vects:
if x in vect.components:
temp_vect = self.components[x] * x
ret_str += printer._print(temp_vect) + " + "
return ret_str[:-3]
class VectorMul(BasisDependentMul, Vector):
"""
Class to denote products of scalars and BaseVectors.
"""
def __new__(cls, *args, **options):
obj = BasisDependentMul.__new__(cls, *args, **options)
return obj
@property
def base_vector(self):
""" The BaseVector involved in the product. """
return self._base_instance
@property
def measure_number(self):
""" The scalar expression involved in the definition of
this VectorMul.
"""
return self._measure_number
class VectorZero(BasisDependentZero, Vector):
"""
Class to denote a zero vector
"""
_op_priority = 12.1
_pretty_form = '0'
_latex_form = r'\mathbf{\hat{0}}'
def __new__(cls):
obj = BasisDependentZero.__new__(cls)
return obj
class Cross(Vector):
"""
Represents unevaluated Cross product.
Examples
========
>>> from sympy.vector import CoordSys3D, Cross
>>> R = CoordSys3D('R')
>>> v1 = R.i + R.j + R.k
>>> v2 = R.x * R.i + R.y * R.j + R.z * R.k
>>> Cross(v1, v2)
Cross(R.i + R.j + R.k, R.x*R.i + R.y*R.j + R.z*R.k)
>>> Cross(v1, v2).doit()
(-R.y + R.z)*R.i + (R.x - R.z)*R.j + (-R.x + R.y)*R.k
"""
def __new__(cls, expr1, expr2):
expr1 = sympify(expr1)
expr2 = sympify(expr2)
if default_sort_key(expr1) > default_sort_key(expr2):
return -Cross(expr2, expr1)
obj = Expr.__new__(cls, expr1, expr2)
obj._expr1 = expr1
obj._expr2 = expr2
return obj
def doit(self, **kwargs):
return cross(self._expr1, self._expr2)
class Dot(Expr):
"""
Represents unevaluated Dot product.
Examples
========
>>> from sympy.vector import CoordSys3D, Dot
>>> from sympy import symbols
>>> R = CoordSys3D('R')
>>> a, b, c = symbols('a b c')
>>> v1 = R.i + R.j + R.k
>>> v2 = a * R.i + b * R.j + c * R.k
>>> Dot(v1, v2)
Dot(R.i + R.j + R.k, a*R.i + b*R.j + c*R.k)
>>> Dot(v1, v2).doit()
a + b + c
"""
def __new__(cls, expr1, expr2):
expr1 = sympify(expr1)
expr2 = sympify(expr2)
expr1, expr2 = sorted([expr1, expr2], key=default_sort_key)
obj = Expr.__new__(cls, expr1, expr2)
obj._expr1 = expr1
obj._expr2 = expr2
return obj
def doit(self, **kwargs):
return dot(self._expr1, self._expr2)
def cross(vect1, vect2):
"""
Returns cross product of two vectors.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector.vector import cross
>>> R = CoordSys3D('R')
>>> v1 = R.i + R.j + R.k
>>> v2 = R.x * R.i + R.y * R.j + R.z * R.k
>>> cross(v1, v2)
(-R.y + R.z)*R.i + (R.x - R.z)*R.j + (-R.x + R.y)*R.k
"""
if isinstance(vect1, Add):
return VectorAdd.fromiter(cross(i, vect2) for i in vect1.args)
if isinstance(vect2, Add):
return VectorAdd.fromiter(cross(vect1, i) for i in vect2.args)
if isinstance(vect1, BaseVector) and isinstance(vect2, BaseVector):
if vect1._sys == vect2._sys:
n1 = vect1.args[0]
n2 = vect2.args[0]
if n1 == n2:
return Vector.zero
n3 = ({0,1,2}.difference({n1, n2})).pop()
sign = 1 if ((n1 + 1) % 3 == n2) else -1
return sign*vect1._sys.base_vectors()[n3]
from .functions import express
try:
v = express(vect1, vect2._sys)
except ValueError:
return Cross(vect1, vect2)
else:
return cross(v, vect2)
if isinstance(vect1, VectorZero) or isinstance(vect2, VectorZero):
return Vector.zero
if isinstance(vect1, VectorMul):
v1, m1 = next(iter(vect1.components.items()))
return m1*cross(v1, vect2)
if isinstance(vect2, VectorMul):
v2, m2 = next(iter(vect2.components.items()))
return m2*cross(vect1, v2)
return Cross(vect1, vect2)
def dot(vect1, vect2):
"""
Returns dot product of two vectors.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector.vector import dot
>>> R = CoordSys3D('R')
>>> v1 = R.i + R.j + R.k
>>> v2 = R.x * R.i + R.y * R.j + R.z * R.k
>>> dot(v1, v2)
R.x + R.y + R.z
"""
if isinstance(vect1, Add):
return Add.fromiter(dot(i, vect2) for i in vect1.args)
if isinstance(vect2, Add):
return Add.fromiter(dot(vect1, i) for i in vect2.args)
if isinstance(vect1, BaseVector) and isinstance(vect2, BaseVector):
if vect1._sys == vect2._sys:
return S.One if vect1 == vect2 else S.Zero
from .functions import express
try:
v = express(vect2, vect1._sys)
except ValueError:
return Dot(vect1, vect2)
else:
return dot(vect1, v)
if isinstance(vect1, VectorZero) or isinstance(vect2, VectorZero):
return S.Zero
if isinstance(vect1, VectorMul):
v1, m1 = next(iter(vect1.components.items()))
return m1*dot(v1, vect2)
if isinstance(vect2, VectorMul):
v2, m2 = next(iter(vect2.components.items()))
return m2*dot(vect1, v2)
return Dot(vect1, vect2)
Vector._expr_type = Vector
Vector._mul_func = VectorMul
Vector._add_func = VectorAdd
Vector._zero_func = VectorZero
Vector._base_func = BaseVector
Vector.zero = VectorZero()
|
3476dad3f42d47004094936130b4bf2033568bde406f446bbb8a3d23a089021b | import collections
from sympy.core.expr import Expr
from sympy.core import sympify, S, preorder_traversal
from sympy.vector.coordsysrect import CoordSys3D
from sympy.vector.vector import Vector, VectorMul, VectorAdd, Cross, Dot
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.core.function import Derivative
from sympy.core.add import Add
from sympy.core.mul import Mul
def _get_coord_systems(expr):
g = preorder_traversal(expr)
ret = set()
for i in g:
if isinstance(i, CoordSys3D):
ret.add(i)
g.skip()
return frozenset(ret)
def _get_coord_sys_from_expr(expr, coord_sys=None):
"""
expr : expression
The coordinate system is extracted from this parameter.
"""
# TODO: Remove this line when warning from issue #12884 will be removed
if coord_sys is not None:
SymPyDeprecationWarning(
feature="coord_sys parameter",
useinstead="do not use it",
deprecated_since_version="1.1",
issue=12884,
).warn()
return _get_coord_systems(expr)
def _split_mul_args_wrt_coordsys(expr):
d = collections.defaultdict(lambda: S.One)
for i in expr.args:
d[_get_coord_systems(i)] *= i
return list(d.values())
class Gradient(Expr):
"""
Represents unevaluated Gradient.
Examples
========
>>> from sympy.vector import CoordSys3D, Gradient
>>> R = CoordSys3D('R')
>>> s = R.x*R.y*R.z
>>> Gradient(s)
Gradient(R.x*R.y*R.z)
"""
def __new__(cls, expr):
expr = sympify(expr)
obj = Expr.__new__(cls, expr)
obj._expr = expr
return obj
def doit(self, **kwargs):
return gradient(self._expr, doit=True)
class Divergence(Expr):
"""
Represents unevaluated Divergence.
Examples
========
>>> from sympy.vector import CoordSys3D, Divergence
>>> R = CoordSys3D('R')
>>> v = R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> Divergence(v)
Divergence(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
"""
def __new__(cls, expr):
expr = sympify(expr)
obj = Expr.__new__(cls, expr)
obj._expr = expr
return obj
def doit(self, **kwargs):
return divergence(self._expr, doit=True)
class Curl(Expr):
"""
Represents unevaluated Curl.
Examples
========
>>> from sympy.vector import CoordSys3D, Curl
>>> R = CoordSys3D('R')
>>> v = R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> Curl(v)
Curl(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
"""
def __new__(cls, expr):
expr = sympify(expr)
obj = Expr.__new__(cls, expr)
obj._expr = expr
return obj
def doit(self, **kwargs):
return curl(self._expr, doit=True)
def curl(vect, coord_sys=None, doit=True):
"""
Returns the curl of a vector field computed wrt the base scalars
of the given coordinate system.
Parameters
==========
vect : Vector
The vector operand
coord_sys : CoordSys3D
The coordinate system to calculate the gradient in.
Deprecated since version 1.1
doit : bool
If True, the result is returned after calling .doit() on
each component. Else, the returned expression contains
Derivative instances
Examples
========
>>> from sympy.vector import CoordSys3D, curl
>>> R = CoordSys3D('R')
>>> v1 = R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> curl(v1)
0
>>> v2 = R.x*R.y*R.z*R.i
>>> curl(v2)
R.x*R.y*R.j + (-R.x*R.z)*R.k
"""
coord_sys = _get_coord_sys_from_expr(vect, coord_sys)
if len(coord_sys) == 0:
return Vector.zero
elif len(coord_sys) == 1:
coord_sys = next(iter(coord_sys))
i, j, k = coord_sys.base_vectors()
x, y, z = coord_sys.base_scalars()
h1, h2, h3 = coord_sys.lame_coefficients()
vectx = vect.dot(i)
vecty = vect.dot(j)
vectz = vect.dot(k)
outvec = Vector.zero
outvec += (Derivative(vectz * h3, y) -
Derivative(vecty * h2, z)) * i / (h2 * h3)
outvec += (Derivative(vectx * h1, z) -
Derivative(vectz * h3, x)) * j / (h1 * h3)
outvec += (Derivative(vecty * h2, x) -
Derivative(vectx * h1, y)) * k / (h2 * h1)
if doit:
return outvec.doit()
return outvec
else:
if isinstance(vect, (Add, VectorAdd)):
from sympy.vector import express
try:
cs = next(iter(coord_sys))
args = [express(i, cs, variables=True) for i in vect.args]
except ValueError:
args = vect.args
return VectorAdd.fromiter(curl(i, doit=doit) for i in args)
elif isinstance(vect, (Mul, VectorMul)):
vector = [i for i in vect.args if isinstance(i, (Vector, Cross, Gradient))][0]
scalar = Mul.fromiter(i for i in vect.args if not isinstance(i, (Vector, Cross, Gradient)))
res = Cross(gradient(scalar), vector).doit() + scalar*curl(vector, doit=doit)
if doit:
return res.doit()
return res
elif isinstance(vect, (Cross, Curl, Gradient)):
return Curl(vect)
else:
raise Curl(vect)
def divergence(vect, coord_sys=None, doit=True):
"""
Returns the divergence of a vector field computed wrt the base
scalars of the given coordinate system.
Parameters
==========
vector : Vector
The vector operand
coord_sys : CoordSys3D
The coordinate system to calculate the gradient in
Deprecated since version 1.1
doit : bool
If True, the result is returned after calling .doit() on
each component. Else, the returned expression contains
Derivative instances
Examples
========
>>> from sympy.vector import CoordSys3D, divergence
>>> R = CoordSys3D('R')
>>> v1 = R.x*R.y*R.z * (R.i+R.j+R.k)
>>> divergence(v1)
R.x*R.y + R.x*R.z + R.y*R.z
>>> v2 = 2*R.y*R.z*R.j
>>> divergence(v2)
2*R.z
"""
coord_sys = _get_coord_sys_from_expr(vect, coord_sys)
if len(coord_sys) == 0:
return S.Zero
elif len(coord_sys) == 1:
if isinstance(vect, (Cross, Curl, Gradient)):
return Divergence(vect)
# TODO: is case of many coord systems, this gets a random one:
coord_sys = next(iter(coord_sys))
i, j, k = coord_sys.base_vectors()
x, y, z = coord_sys.base_scalars()
h1, h2, h3 = coord_sys.lame_coefficients()
vx = _diff_conditional(vect.dot(i), x, h2, h3) \
/ (h1 * h2 * h3)
vy = _diff_conditional(vect.dot(j), y, h3, h1) \
/ (h1 * h2 * h3)
vz = _diff_conditional(vect.dot(k), z, h1, h2) \
/ (h1 * h2 * h3)
res = vx + vy + vz
if doit:
return res.doit()
return res
else:
if isinstance(vect, (Add, VectorAdd)):
return Add.fromiter(divergence(i, doit=doit) for i in vect.args)
elif isinstance(vect, (Mul, VectorMul)):
vector = [i for i in vect.args if isinstance(i, (Vector, Cross, Gradient))][0]
scalar = Mul.fromiter(i for i in vect.args if not isinstance(i, (Vector, Cross, Gradient)))
res = Dot(vector, gradient(scalar)) + scalar*divergence(vector, doit=doit)
if doit:
return res.doit()
return res
elif isinstance(vect, (Cross, Curl, Gradient)):
return Divergence(vect)
else:
raise Divergence(vect)
def gradient(scalar_field, coord_sys=None, doit=True):
"""
Returns the vector gradient of a scalar field computed wrt the
base scalars of the given coordinate system.
Parameters
==========
scalar_field : SymPy Expr
The scalar field to compute the gradient of
coord_sys : CoordSys3D
The coordinate system to calculate the gradient in
Deprecated since version 1.1
doit : bool
If True, the result is returned after calling .doit() on
each component. Else, the returned expression contains
Derivative instances
Examples
========
>>> from sympy.vector import CoordSys3D, gradient
>>> R = CoordSys3D('R')
>>> s1 = R.x*R.y*R.z
>>> gradient(s1)
R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> s2 = 5*R.x**2*R.z
>>> gradient(s2)
10*R.x*R.z*R.i + 5*R.x**2*R.k
"""
coord_sys = _get_coord_sys_from_expr(scalar_field, coord_sys)
if len(coord_sys) == 0:
return Vector.zero
elif len(coord_sys) == 1:
coord_sys = next(iter(coord_sys))
h1, h2, h3 = coord_sys.lame_coefficients()
i, j, k = coord_sys.base_vectors()
x, y, z = coord_sys.base_scalars()
vx = Derivative(scalar_field, x) / h1
vy = Derivative(scalar_field, y) / h2
vz = Derivative(scalar_field, z) / h3
if doit:
return (vx * i + vy * j + vz * k).doit()
return vx * i + vy * j + vz * k
else:
if isinstance(scalar_field, (Add, VectorAdd)):
return VectorAdd.fromiter(gradient(i) for i in scalar_field.args)
if isinstance(scalar_field, (Mul, VectorMul)):
s = _split_mul_args_wrt_coordsys(scalar_field)
return VectorAdd.fromiter(scalar_field / i * gradient(i) for i in s)
return Gradient(scalar_field)
class Laplacian(Expr):
"""
Represents unevaluated Laplacian.
Examples
========
>>> from sympy.vector import CoordSys3D, Laplacian
>>> R = CoordSys3D('R')
>>> v = 3*R.x**3*R.y**2*R.z**3
>>> Laplacian(v)
Laplacian(3*R.x**3*R.y**2*R.z**3)
"""
def __new__(cls, expr):
expr = sympify(expr)
obj = Expr.__new__(cls, expr)
obj._expr = expr
return obj
def doit(self, **kwargs):
from sympy.vector.functions import laplacian
return laplacian(self._expr)
def _diff_conditional(expr, base_scalar, coeff_1, coeff_2):
"""
First re-expresses expr in the system that base_scalar belongs to.
If base_scalar appears in the re-expressed form, differentiates
it wrt base_scalar.
Else, returns 0
"""
from sympy.vector.functions import express
new_expr = express(expr, base_scalar.system, variables=True)
arg = coeff_1 * coeff_2 * new_expr
return Derivative(arg, base_scalar) if arg else S.Zero
|
35fd12904d7c8574bb802d5ba5079031a0c213ef3fba7398bf89edf9bb494e9f | from sympy.vector.coordsysrect import CoordSys3D
from sympy.vector.deloperator import Del
from sympy.vector.scalar import BaseScalar
from sympy.vector.vector import Vector, BaseVector
from sympy.vector.operators import gradient, curl, divergence
from sympy.core.function import diff
from sympy.core.singleton import S
from sympy.integrals.integrals import integrate
from sympy.simplify.simplify import simplify
from sympy.core import sympify
from sympy.vector.dyadic import Dyadic
def express(expr, system, system2=None, variables=False):
"""
Global function for 'express' functionality.
Re-expresses a Vector, Dyadic or scalar(sympyfiable) in the given
coordinate system.
If 'variables' is True, then the coordinate variables (base scalars)
of other coordinate systems present in the vector/scalar field or
dyadic are also substituted in terms of the base scalars of the
given system.
Parameters
==========
expr : Vector/Dyadic/scalar(sympyfiable)
The expression to re-express in CoordSys3D 'system'
system: CoordSys3D
The coordinate system the expr is to be expressed in
system2: CoordSys3D
The other coordinate system required for re-expression
(only for a Dyadic Expr)
variables : boolean
Specifies whether to substitute the coordinate variables present
in expr, in terms of those of parameter system
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import Symbol, cos, sin
>>> N = CoordSys3D('N')
>>> q = Symbol('q')
>>> B = N.orient_new_axis('B', q, N.k)
>>> from sympy.vector import express
>>> express(B.i, N)
(cos(q))*N.i + (sin(q))*N.j
>>> express(N.x, B, variables=True)
B.x*cos(q) - B.y*sin(q)
>>> d = N.i.outer(N.i)
>>> express(d, B, N) == (cos(q))*(B.i|N.i) + (-sin(q))*(B.j|N.i)
True
"""
if expr in (0, Vector.zero):
return expr
if not isinstance(system, CoordSys3D):
raise TypeError("system should be a CoordSys3D \
instance")
if isinstance(expr, Vector):
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
# Given expr is a Vector
if variables:
# If variables attribute is True, substitute
# the coordinate variables in the Vector
system_list = []
for x in expr.atoms(BaseScalar, BaseVector):
if x.system != system:
system_list.append(x.system)
system_list = set(system_list)
subs_dict = {}
for f in system_list:
subs_dict.update(f.scalar_map(system))
expr = expr.subs(subs_dict)
# Re-express in this coordinate system
outvec = Vector.zero
parts = expr.separate()
for x in parts:
if x != system:
temp = system.rotation_matrix(x) * parts[x].to_matrix(x)
outvec += matrix_to_vector(temp, system)
else:
outvec += parts[x]
return outvec
elif isinstance(expr, Dyadic):
if system2 is None:
system2 = system
if not isinstance(system2, CoordSys3D):
raise TypeError("system2 should be a CoordSys3D \
instance")
outdyad = Dyadic.zero
var = variables
for k, v in expr.components.items():
outdyad += (express(v, system, variables=var) *
(express(k.args[0], system, variables=var) |
express(k.args[1], system2, variables=var)))
return outdyad
else:
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
if variables:
# Given expr is a scalar field
system_set = set()
expr = sympify(expr)
# Substitute all the coordinate variables
for x in expr.atoms(BaseScalar):
if x.system != system:
system_set.add(x.system)
subs_dict = {}
for f in system_set:
subs_dict.update(f.scalar_map(system))
return expr.subs(subs_dict)
return expr
def directional_derivative(field, direction_vector):
"""
Returns the directional derivative of a scalar or vector field computed
along a given vector in coordinate system which parameters are expressed.
Parameters
==========
field : Vector or Scalar
The scalar or vector field to compute the directional derivative of
direction_vector : Vector
The vector to calculated directional derivative along them.
Examples
========
>>> from sympy.vector import CoordSys3D, directional_derivative
>>> R = CoordSys3D('R')
>>> f1 = R.x*R.y*R.z
>>> v1 = 3*R.i + 4*R.j + R.k
>>> directional_derivative(f1, v1)
R.x*R.y + 4*R.x*R.z + 3*R.y*R.z
>>> f2 = 5*R.x**2*R.z
>>> directional_derivative(f2, v1)
5*R.x**2 + 30*R.x*R.z
"""
from sympy.vector.operators import _get_coord_sys_from_expr
coord_sys = _get_coord_sys_from_expr(field)
if len(coord_sys) > 0:
# TODO: This gets a random coordinate system in case of multiple ones:
coord_sys = next(iter(coord_sys))
field = express(field, coord_sys, variables=True)
i, j, k = coord_sys.base_vectors()
x, y, z = coord_sys.base_scalars()
out = Vector.dot(direction_vector, i) * diff(field, x)
out += Vector.dot(direction_vector, j) * diff(field, y)
out += Vector.dot(direction_vector, k) * diff(field, z)
if out == 0 and isinstance(field, Vector):
out = Vector.zero
return out
elif isinstance(field, Vector):
return Vector.zero
else:
return S.Zero
def laplacian(expr):
"""
Return the laplacian of the given field computed in terms of
the base scalars of the given coordinate system.
Parameters
==========
expr : SymPy Expr or Vector
expr denotes a scalar or vector field.
Examples
========
>>> from sympy.vector import CoordSys3D, laplacian
>>> R = CoordSys3D('R')
>>> f = R.x**2*R.y**5*R.z
>>> laplacian(f)
20*R.x**2*R.y**3*R.z + 2*R.y**5*R.z
>>> f = R.x**2*R.i + R.y**3*R.j + R.z**4*R.k
>>> laplacian(f)
2*R.i + 6*R.y*R.j + 12*R.z**2*R.k
"""
delop = Del()
if expr.is_Vector:
return (gradient(divergence(expr)) - curl(curl(expr))).doit()
return delop.dot(delop(expr)).doit()
def is_conservative(field):
"""
Checks if a field is conservative.
Parameters
==========
field : Vector
The field to check for conservative property
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector import is_conservative
>>> R = CoordSys3D('R')
>>> is_conservative(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
True
>>> is_conservative(R.z*R.j)
False
"""
# Field is conservative irrespective of system
# Take the first coordinate system in the result of the
# separate method of Vector
if not isinstance(field, Vector):
raise TypeError("field should be a Vector")
if field == Vector.zero:
return True
return curl(field).simplify() == Vector.zero
def is_solenoidal(field):
"""
Checks if a field is solenoidal.
Parameters
==========
field : Vector
The field to check for solenoidal property
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector import is_solenoidal
>>> R = CoordSys3D('R')
>>> is_solenoidal(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
True
>>> is_solenoidal(R.y * R.j)
False
"""
# Field is solenoidal irrespective of system
# Take the first coordinate system in the result of the
# separate method in Vector
if not isinstance(field, Vector):
raise TypeError("field should be a Vector")
if field == Vector.zero:
return True
return divergence(field).simplify() is S.Zero
def scalar_potential(field, coord_sys):
"""
Returns the scalar potential function of a field in a given
coordinate system (without the added integration constant).
Parameters
==========
field : Vector
The vector field whose scalar potential function is to be
calculated
coord_sys : CoordSys3D
The coordinate system to do the calculation in
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector import scalar_potential, gradient
>>> R = CoordSys3D('R')
>>> scalar_potential(R.k, R) == R.z
True
>>> scalar_field = 2*R.x**2*R.y*R.z
>>> grad_field = gradient(scalar_field)
>>> scalar_potential(grad_field, R)
2*R.x**2*R.y*R.z
"""
# Check whether field is conservative
if not is_conservative(field):
raise ValueError("Field is not conservative")
if field == Vector.zero:
return S.Zero
# Express the field exntirely in coord_sys
# Substitute coordinate variables also
if not isinstance(coord_sys, CoordSys3D):
raise TypeError("coord_sys must be a CoordSys3D")
field = express(field, coord_sys, variables=True)
dimensions = coord_sys.base_vectors()
scalars = coord_sys.base_scalars()
# Calculate scalar potential function
temp_function = integrate(field.dot(dimensions[0]), scalars[0])
for i, dim in enumerate(dimensions[1:]):
partial_diff = diff(temp_function, scalars[i + 1])
partial_diff = field.dot(dim) - partial_diff
temp_function += integrate(partial_diff, scalars[i + 1])
return temp_function
def scalar_potential_difference(field, coord_sys, point1, point2):
"""
Returns the scalar potential difference between two points in a
certain coordinate system, wrt a given field.
If a scalar field is provided, its values at the two points are
considered. If a conservative vector field is provided, the values
of its scalar potential function at the two points are used.
Returns (potential at point2) - (potential at point1)
The position vectors of the two Points are calculated wrt the
origin of the coordinate system provided.
Parameters
==========
field : Vector/Expr
The field to calculate wrt
coord_sys : CoordSys3D
The coordinate system to do the calculations in
point1 : Point
The initial Point in given coordinate system
position2 : Point
The second Point in the given coordinate system
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector import scalar_potential_difference
>>> R = CoordSys3D('R')
>>> P = R.origin.locate_new('P', R.x*R.i + R.y*R.j + R.z*R.k)
>>> vectfield = 4*R.x*R.y*R.i + 2*R.x**2*R.j
>>> scalar_potential_difference(vectfield, R, R.origin, P)
2*R.x**2*R.y
>>> Q = R.origin.locate_new('O', 3*R.i + R.j + 2*R.k)
>>> scalar_potential_difference(vectfield, R, P, Q)
-2*R.x**2*R.y + 18
"""
if not isinstance(coord_sys, CoordSys3D):
raise TypeError("coord_sys must be a CoordSys3D")
if isinstance(field, Vector):
# Get the scalar potential function
scalar_fn = scalar_potential(field, coord_sys)
else:
# Field is a scalar
scalar_fn = field
# Express positions in required coordinate system
origin = coord_sys.origin
position1 = express(point1.position_wrt(origin), coord_sys,
variables=True)
position2 = express(point2.position_wrt(origin), coord_sys,
variables=True)
# Get the two positions as substitution dicts for coordinate variables
subs_dict1 = {}
subs_dict2 = {}
scalars = coord_sys.base_scalars()
for i, x in enumerate(coord_sys.base_vectors()):
subs_dict1[scalars[i]] = x.dot(position1)
subs_dict2[scalars[i]] = x.dot(position2)
return scalar_fn.subs(subs_dict2) - scalar_fn.subs(subs_dict1)
def matrix_to_vector(matrix, system):
"""
Converts a vector in matrix form to a Vector instance.
It is assumed that the elements of the Matrix represent the
measure numbers of the components of the vector along basis
vectors of 'system'.
Parameters
==========
matrix : SymPy Matrix, Dimensions: (3, 1)
The matrix to be converted to a vector
system : CoordSys3D
The coordinate system the vector is to be defined in
Examples
========
>>> from sympy import ImmutableMatrix as Matrix
>>> m = Matrix([1, 2, 3])
>>> from sympy.vector import CoordSys3D, matrix_to_vector
>>> C = CoordSys3D('C')
>>> v = matrix_to_vector(m, C)
>>> v
C.i + 2*C.j + 3*C.k
>>> v.to_matrix(C) == m
True
"""
outvec = Vector.zero
vects = system.base_vectors()
for i, x in enumerate(matrix):
outvec += x * vects[i]
return outvec
def _path(from_object, to_object):
"""
Calculates the 'path' of objects starting from 'from_object'
to 'to_object', along with the index of the first common
ancestor in the tree.
Returns (index, list) tuple.
"""
if from_object._root != to_object._root:
raise ValueError("No connecting path found between " +
str(from_object) + " and " + str(to_object))
other_path = []
obj = to_object
while obj._parent is not None:
other_path.append(obj)
obj = obj._parent
other_path.append(obj)
object_set = set(other_path)
from_path = []
obj = from_object
while obj not in object_set:
from_path.append(obj)
obj = obj._parent
index = len(from_path)
i = other_path.index(obj)
while i >= 0:
from_path.append(other_path[i])
i -= 1
return index, from_path
def orthogonalize(*vlist, orthonormal=False):
"""
Takes a sequence of independent vectors and orthogonalizes them
using the Gram - Schmidt process. Returns a list of
orthogonal or orthonormal vectors.
Parameters
==========
vlist : sequence of independent vectors to be made orthogonal.
orthonormal : Optional parameter
Set to True if the vectors returned should be
orthonormal.
Default: False
Examples
========
>>> from sympy.vector.coordsysrect import CoordSys3D
>>> from sympy.vector.functions import orthogonalize
>>> C = CoordSys3D('C')
>>> i, j, k = C.base_vectors()
>>> v1 = i + 2*j
>>> v2 = 2*i + 3*j
>>> orthogonalize(v1, v2)
[C.i + 2*C.j, 2/5*C.i + (-1/5)*C.j]
References
==========
.. [1] https://en.wikipedia.org/wiki/Gram-Schmidt_process
"""
if not all(isinstance(vec, Vector) for vec in vlist):
raise TypeError('Each element must be of Type Vector')
ortho_vlist = []
for i, term in enumerate(vlist):
for j in range(i):
term -= ortho_vlist[j].projection(vlist[i])
# TODO : The following line introduces a performance issue
# and needs to be changed once a good solution for issue #10279 is
# found.
if simplify(term).equals(Vector.zero):
raise ValueError("Vector set not linearly independent")
ortho_vlist.append(term)
if orthonormal:
ortho_vlist = [vec.normalize() for vec in ortho_vlist]
return ortho_vlist
|
d2bd24024d571f6e837cf936da11a96b8ff40f4ea4f614b4cd6f8792e2f1769a | from sympy.core.singleton import S
from sympy.simplify.simplify import simplify
from sympy.core import Basic, diff
from sympy.core.sorting import default_sort_key
from sympy.matrices import Matrix
from sympy.vector import (CoordSys3D, Vector, ParametricRegion,
parametric_region_list, ImplicitRegion)
from sympy.vector.operators import _get_coord_sys_from_expr
from sympy.integrals import Integral, integrate
from sympy.utilities.iterables import topological_sort
from sympy.geometry.entity import GeometryEntity
class ParametricIntegral(Basic):
"""
Represents integral of a scalar or vector field
over a Parametric Region
Examples
========
>>> from sympy import cos, sin, pi
>>> from sympy.vector import CoordSys3D, ParametricRegion, ParametricIntegral
>>> from sympy.abc import r, t, theta, phi
>>> C = CoordSys3D('C')
>>> curve = ParametricRegion((3*t - 2, t + 1), (t, 1, 2))
>>> ParametricIntegral(C.x, curve)
5*sqrt(10)/2
>>> length = ParametricIntegral(1, curve)
>>> length
sqrt(10)
>>> semisphere = ParametricRegion((2*sin(phi)*cos(theta), 2*sin(phi)*sin(theta), 2*cos(phi)),\
(theta, 0, 2*pi), (phi, 0, pi/2))
>>> ParametricIntegral(C.z, semisphere)
8*pi
>>> ParametricIntegral(C.j + C.k, ParametricRegion((r*cos(theta), r*sin(theta)), r, theta))
0
"""
def __new__(cls, field, parametricregion):
coord_set = _get_coord_sys_from_expr(field)
if len(coord_set) == 0:
coord_sys = CoordSys3D('C')
elif len(coord_set) > 1:
raise ValueError
else:
coord_sys = next(iter(coord_set))
if parametricregion.dimensions == 0:
return S.Zero
base_vectors = coord_sys.base_vectors()
base_scalars = coord_sys.base_scalars()
parametricfield = field
r = Vector.zero
for i in range(len(parametricregion.definition)):
r += base_vectors[i]*parametricregion.definition[i]
if len(coord_set) != 0:
for i in range(len(parametricregion.definition)):
parametricfield = parametricfield.subs(base_scalars[i], parametricregion.definition[i])
if parametricregion.dimensions == 1:
parameter = parametricregion.parameters[0]
r_diff = diff(r, parameter)
lower, upper = parametricregion.limits[parameter][0], parametricregion.limits[parameter][1]
if isinstance(parametricfield, Vector):
integrand = simplify(r_diff.dot(parametricfield))
else:
integrand = simplify(r_diff.magnitude()*parametricfield)
result = integrate(integrand, (parameter, lower, upper))
elif parametricregion.dimensions == 2:
u, v = cls._bounds_case(parametricregion.parameters, parametricregion.limits)
r_u = diff(r, u)
r_v = diff(r, v)
normal_vector = simplify(r_u.cross(r_v))
if isinstance(parametricfield, Vector):
integrand = parametricfield.dot(normal_vector)
else:
integrand = parametricfield*normal_vector.magnitude()
integrand = simplify(integrand)
lower_u, upper_u = parametricregion.limits[u][0], parametricregion.limits[u][1]
lower_v, upper_v = parametricregion.limits[v][0], parametricregion.limits[v][1]
result = integrate(integrand, (u, lower_u, upper_u), (v, lower_v, upper_v))
else:
variables = cls._bounds_case(parametricregion.parameters, parametricregion.limits)
coeff = Matrix(parametricregion.definition).jacobian(variables).det()
integrand = simplify(parametricfield*coeff)
l = [(var, parametricregion.limits[var][0], parametricregion.limits[var][1]) for var in variables]
result = integrate(integrand, *l)
if not isinstance(result, Integral):
return result
else:
return super().__new__(cls, field, parametricregion)
@classmethod
def _bounds_case(cls, parameters, limits):
V = list(limits.keys())
E = list()
for p in V:
lower_p = limits[p][0]
upper_p = limits[p][1]
lower_p = lower_p.atoms()
upper_p = upper_p.atoms()
for q in V:
if p == q:
continue
if lower_p.issuperset({q}) or upper_p.issuperset({q}):
E.append((p, q))
if not E:
return parameters
else:
return topological_sort((V, E), key=default_sort_key)
@property
def field(self):
return self.args[0]
@property
def parametricregion(self):
return self.args[1]
def vector_integrate(field, *region):
"""
Compute the integral of a vector/scalar field
over a a region or a set of parameters.
Examples
========
>>> from sympy.vector import CoordSys3D, ParametricRegion, vector_integrate
>>> from sympy.abc import x, y, t
>>> C = CoordSys3D('C')
>>> region = ParametricRegion((t, t**2), (t, 1, 5))
>>> vector_integrate(C.x*C.i, region)
12
Integrals over some objects of geometry module can also be calculated.
>>> from sympy.geometry import Point, Circle, Triangle
>>> c = Circle(Point(0, 2), 5)
>>> vector_integrate(C.x**2 + C.y**2, c)
290*pi
>>> triangle = Triangle(Point(-2, 3), Point(2, 3), Point(0, 5))
>>> vector_integrate(3*C.x**2*C.y*C.i + C.j, triangle)
-8
Integrals over some simple implicit regions can be computed. But in most cases,
it takes too long to compute over them. This is due to the expressions of parametric
representation becoming large.
>>> from sympy.vector import ImplicitRegion
>>> c2 = ImplicitRegion((x, y), (x - 2)**2 + (y - 1)**2 - 9)
>>> vector_integrate(1, c2)
6*pi
Integral of fields with respect to base scalars:
>>> vector_integrate(12*C.y**3, (C.y, 1, 3))
240
>>> vector_integrate(C.x**2*C.z, C.x)
C.x**3*C.z/3
>>> vector_integrate(C.x*C.i - C.y*C.k, C.x)
(Integral(C.x, C.x))*C.i + (Integral(-C.y, C.x))*C.k
>>> _.doit()
C.x**2/2*C.i + (-C.x*C.y)*C.k
"""
if len(region) == 1:
if isinstance(region[0], ParametricRegion):
return ParametricIntegral(field, region[0])
if isinstance(region[0], ImplicitRegion):
region = parametric_region_list(region[0])[0]
return vector_integrate(field, region)
if isinstance(region[0], GeometryEntity):
regions_list = parametric_region_list(region[0])
result = 0
for reg in regions_list:
result += vector_integrate(field, reg)
return result
return integrate(field, *region)
|
b4f67203f69ab82513f9149e4ce9501cb9dcd92d93bcf7333dea98b5af3c0b5b | from sympy.core.numbers import Rational
from sympy.core.singleton import S
from sympy.core.symbol import symbols
from sympy.functions.elementary.complexes import sign
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.polys.polytools import gcd
from sympy.sets.sets import Complement
from sympy.core import Basic, Tuple, diff, expand, Eq, Integer
from sympy.core.sorting import ordered
from sympy.core.symbol import _symbol
from sympy.solvers import solveset, nonlinsolve, diophantine
from sympy.polys import total_degree
from sympy.geometry import Point
from sympy.ntheory.factor_ import core
class ImplicitRegion(Basic):
"""
Represents an implicit region in space.
Examples
========
>>> from sympy import Eq
>>> from sympy.abc import x, y, z, t
>>> from sympy.vector import ImplicitRegion
>>> ImplicitRegion((x, y), x**2 + y**2 - 4)
ImplicitRegion((x, y), x**2 + y**2 - 4)
>>> ImplicitRegion((x, y), Eq(y*x, 1))
ImplicitRegion((x, y), x*y - 1)
>>> parabola = ImplicitRegion((x, y), y**2 - 4*x)
>>> parabola.degree
2
>>> parabola.equation
-4*x + y**2
>>> parabola.rational_parametrization(t)
(4/t**2, 4/t)
>>> r = ImplicitRegion((x, y, z), Eq(z, x**2 + y**2))
>>> r.variables
(x, y, z)
>>> r.singular_points()
{(0, 0, 0)}
>>> r.regular_point()
(-10, -10, 200)
Parameters
==========
variables : tuple to map variables in implicit equation to base scalars.
equation : An expression or Eq denoting the implicit equation of the region.
"""
def __new__(cls, variables, equation):
if not isinstance(variables, Tuple):
variables = Tuple(*variables)
if isinstance(equation, Eq):
equation = equation.lhs - equation.rhs
return super().__new__(cls, variables, equation)
@property
def variables(self):
return self.args[0]
@property
def equation(self):
return self.args[1]
@property
def degree(self):
return total_degree(self.equation)
def regular_point(self):
"""
Returns a point on the implicit region.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.vector import ImplicitRegion
>>> circle = ImplicitRegion((x, y), (x + 2)**2 + (y - 3)**2 - 16)
>>> circle.regular_point()
(-2, -1)
>>> parabola = ImplicitRegion((x, y), x**2 - 4*y)
>>> parabola.regular_point()
(0, 0)
>>> r = ImplicitRegion((x, y, z), (x + y + z)**4)
>>> r.regular_point()
(-10, -10, 20)
References
==========
- Erik Hillgarter, "Rational Points on Conics", Diploma Thesis, RISC-Linz,
J. Kepler Universitat Linz, 1996. Availaible:
https://www3.risc.jku.at/publications/download/risc_1355/Rational%20Points%20on%20Conics.pdf
"""
equation = self.equation
if len(self.variables) == 1:
return (list(solveset(equation, self.variables[0], domain=S.Reals))[0],)
elif len(self.variables) == 2:
if self.degree == 2:
coeffs = a, b, c, d, e, f = conic_coeff(self.variables, equation)
if b**2 == 4*a*c:
x_reg, y_reg = self._regular_point_parabola(*coeffs)
else:
x_reg, y_reg = self._regular_point_ellipse(*coeffs)
return x_reg, y_reg
if len(self.variables) == 3:
x, y, z = self.variables
for x_reg in range(-10, 10):
for y_reg in range(-10, 10):
if not solveset(equation.subs({x: x_reg, y: y_reg}), self.variables[2], domain=S.Reals).is_empty:
return (x_reg, y_reg, list(solveset(equation.subs({x: x_reg, y: y_reg})))[0])
if len(self.singular_points()) != 0:
return list[self.singular_points()][0]
raise NotImplementedError()
def _regular_point_parabola(self, a, b, c, d, e, f):
ok = (a, d) != (0, 0) and (c, e) != (0, 0) and b**2 == 4*a*c and (a, c) != (0, 0)
if not ok:
raise ValueError("Rational Point on the conic does not exist")
if a != 0:
d_dash, f_dash = (4*a*e - 2*b*d, 4*a*f - d**2)
if d_dash != 0:
y_reg = -f_dash/d_dash
x_reg = -(d + b*y_reg)/(2*a)
else:
ok = False
elif c != 0:
d_dash, f_dash = (4*c*d - 2*b*e, 4*c*f - e**2)
if d_dash != 0:
x_reg = -f_dash/d_dash
y_reg = -(e + b*x_reg)/(2*c)
else:
ok = False
if ok:
return x_reg, y_reg
else:
raise ValueError("Rational Point on the conic does not exist")
def _regular_point_ellipse(self, a, b, c, d, e, f):
D = 4*a*c - b**2
ok = D
if not ok:
raise ValueError("Rational Point on the conic does not exist")
if a == 0 and c == 0:
K = -1
L = 4*(d*e - b*f)
elif c != 0:
K = D
L = 4*c**2*d**2 - 4*b*c*d*e + 4*a*c*e**2 + 4*b**2*c*f - 16*a*c**2*f
else:
K = D
L = 4*a**2*e**2 - 4*b*a*d*e + 4*b**2*a*f
ok = L != 0 and not(K > 0 and L < 0)
if not ok:
raise ValueError("Rational Point on the conic does not exist")
K = Rational(K).limit_denominator(10**12)
L = Rational(L).limit_denominator(10**12)
k1, k2 = K.p, K.q
l1, l2 = L.p, L.q
g = gcd(k2, l2)
a1 = (l2*k2)/g
b1 = (k1*l2)/g
c1 = -(l1*k2)/g
a2 = sign(a1)*core(abs(a1), 2)
r1 = sqrt(a1/a2)
b2 = sign(b1)*core(abs(b1), 2)
r2 = sqrt(b1/b2)
c2 = sign(c1)*core(abs(c1), 2)
r3 = sqrt(c1/c2)
g = gcd(gcd(a2, b2), c2)
a2 = a2/g
b2 = b2/g
c2 = c2/g
g1 = gcd(a2, b2)
a2 = a2/g1
b2 = b2/g1
c2 = c2*g1
g2 = gcd(a2,c2)
a2 = a2/g2
b2 = b2*g2
c2 = c2/g2
g3 = gcd(b2, c2)
a2 = a2*g3
b2 = b2/g3
c2 = c2/g3
x, y, z = symbols("x y z")
eq = a2*x**2 + b2*y**2 + c2*z**2
solutions = diophantine(eq)
if len(solutions) == 0:
raise ValueError("Rational Point on the conic does not exist")
flag = False
for sol in solutions:
syms = Tuple(*sol).free_symbols
rep = {s: 3 for s in syms}
sol_z = sol[2]
if sol_z == 0:
flag = True
continue
if not isinstance(sol_z, (int, Integer)):
syms_z = sol_z.free_symbols
if len(syms_z) == 1:
p = next(iter(syms_z))
p_values = Complement(S.Integers, solveset(Eq(sol_z, 0), p, S.Integers))
rep[p] = next(iter(p_values))
if len(syms_z) == 2:
p, q = list(ordered(syms_z))
for i in S.Integers:
subs_sol_z = sol_z.subs(p, i)
q_values = Complement(S.Integers, solveset(Eq(subs_sol_z, 0), q, S.Integers))
if not q_values.is_empty:
rep[p] = i
rep[q] = next(iter(q_values))
break
if len(syms) != 0:
x, y, z = tuple(s.subs(rep) for s in sol)
else:
x, y, z = sol
flag = False
break
if flag:
raise ValueError("Rational Point on the conic does not exist")
x = (x*g3)/r1
y = (y*g2)/r2
z = (z*g1)/r3
x = x/z
y = y/z
if a == 0 and c == 0:
x_reg = (x + y - 2*e)/(2*b)
y_reg = (x - y - 2*d)/(2*b)
elif c != 0:
x_reg = (x - 2*d*c + b*e)/K
y_reg = (y - b*x_reg - e)/(2*c)
else:
y_reg = (x - 2*e*a + b*d)/K
x_reg = (y - b*y_reg - d)/(2*a)
return x_reg, y_reg
def singular_points(self):
"""
Returns a set of singular points of the region.
The singular points are those points on the region
where all partial derivatives vanish.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.vector import ImplicitRegion
>>> I = ImplicitRegion((x, y), (y-1)**2 -x**3 + 2*x**2 -x)
>>> I.singular_points()
{(1, 1)}
"""
eq_list = [self.equation]
for var in self.variables:
eq_list += [diff(self.equation, var)]
return nonlinsolve(eq_list, list(self.variables))
def multiplicity(self, point):
"""
Returns the multiplicity of a singular point on the region.
A singular point (x,y) of region is said to be of multiplicity m
if all the partial derivatives off to order m - 1 vanish there.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.vector import ImplicitRegion
>>> I = ImplicitRegion((x, y, z), x**2 + y**3 - z**4)
>>> I.singular_points()
{(0, 0, 0)}
>>> I.multiplicity((0, 0, 0))
2
"""
if isinstance(point, Point):
point = point.args
modified_eq = self.equation
for i, var in enumerate(self.variables):
modified_eq = modified_eq.subs(var, var + point[i])
modified_eq = expand(modified_eq)
if len(modified_eq.args) != 0:
terms = modified_eq.args
m = min([total_degree(term) for term in terms])
else:
terms = modified_eq
m = total_degree(terms)
return m
def rational_parametrization(self, parameters=('t', 's'), reg_point=None):
"""
Returns the rational parametrization of implict region.
Examples
========
>>> from sympy import Eq
>>> from sympy.abc import x, y, z, s, t
>>> from sympy.vector import ImplicitRegion
>>> parabola = ImplicitRegion((x, y), y**2 - 4*x)
>>> parabola.rational_parametrization()
(4/t**2, 4/t)
>>> circle = ImplicitRegion((x, y), Eq(x**2 + y**2, 4))
>>> circle.rational_parametrization()
(4*t/(t**2 + 1), 4*t**2/(t**2 + 1) - 2)
>>> I = ImplicitRegion((x, y), x**3 + x**2 - y**2)
>>> I.rational_parametrization()
(t**2 - 1, t*(t**2 - 1))
>>> cubic_curve = ImplicitRegion((x, y), x**3 + x**2 - y**2)
>>> cubic_curve.rational_parametrization(parameters=(t))
(t**2 - 1, t*(t**2 - 1))
>>> sphere = ImplicitRegion((x, y, z), x**2 + y**2 + z**2 - 4)
>>> sphere.rational_parametrization(parameters=(t, s))
(-2 + 4/(s**2 + t**2 + 1), 4*s/(s**2 + t**2 + 1), 4*t/(s**2 + t**2 + 1))
For some conics, regular_points() is unable to find a point on curve.
To calulcate the parametric representation in such cases, user need
to determine a point on the region and pass it using reg_point.
>>> c = ImplicitRegion((x, y), (x - 1/2)**2 + (y)**2 - (1/4)**2)
>>> c.rational_parametrization(reg_point=(3/4, 0))
(0.75 - 0.5/(t**2 + 1), -0.5*t/(t**2 + 1))
References
==========
- Christoph M. Hoffmann, "Conversion Methods between Parametric and
Implicit Curves and Surfaces", Purdue e-Pubs, 1990. Available:
https://docs.lib.purdue.edu/cgi/viewcontent.cgi?article=1827&context=cstech
"""
equation = self.equation
degree = self.degree
if degree == 1:
if len(self.variables) == 1:
return (equation,)
elif len(self.variables) == 2:
x, y = self.variables
y_par = list(solveset(equation, y))[0]
return x, y_par
else:
raise NotImplementedError()
point = ()
# Finding the (n - 1) fold point of the monoid of degree
if degree == 2:
# For degree 2 curves, either a regular point or a singular point can be used.
if reg_point is not None:
# Using point provided by the user as regular point
point = reg_point
else:
if len(self.singular_points()) != 0:
point = list(self.singular_points())[0]
else:
point = self.regular_point()
if len(self.singular_points()) != 0:
singular_points = self.singular_points()
for spoint in singular_points:
syms = Tuple(*spoint).free_symbols
rep = {s: 2 for s in syms}
if len(syms) != 0:
spoint = tuple(s.subs(rep) for s in spoint)
if self.multiplicity(spoint) == degree - 1:
point = spoint
break
if len(point) == 0:
# The region in not a monoid
raise NotImplementedError()
modified_eq = equation
# Shifting the region such that fold point moves to origin
for i, var in enumerate(self.variables):
modified_eq = modified_eq.subs(var, var + point[i])
modified_eq = expand(modified_eq)
hn = hn_1 = 0
for term in modified_eq.args:
if total_degree(term) == degree:
hn += term
else:
hn_1 += term
hn_1 = -1*hn_1
if not isinstance(parameters, tuple):
parameters = (parameters,)
if len(self.variables) == 2:
parameter1 = parameters[0]
if parameter1 == 's':
# To avoid name conflict between parameters
s = _symbol('s_', real=True)
else:
s = _symbol('s', real=True)
t = _symbol(parameter1, real=True)
hn = hn.subs({self.variables[0]: s, self.variables[1]: t})
hn_1 = hn_1.subs({self.variables[0]: s, self.variables[1]: t})
x_par = (s*(hn_1/hn)).subs(s, 1) + point[0]
y_par = (t*(hn_1/hn)).subs(s, 1) + point[1]
return x_par, y_par
elif len(self.variables) == 3:
parameter1, parameter2 = parameters
if 'r' in parameters:
# To avoid name conflict between parameters
r = _symbol('r_', real=True)
else:
r = _symbol('r', real=True)
s = _symbol(parameter2, real=True)
t = _symbol(parameter1, real=True)
hn = hn.subs({self.variables[0]: r, self.variables[1]: s, self.variables[2]: t})
hn_1 = hn_1.subs({self.variables[0]: r, self.variables[1]: s, self.variables[2]: t})
x_par = (r*(hn_1/hn)).subs(r, 1) + point[0]
y_par = (s*(hn_1/hn)).subs(r, 1) + point[1]
z_par = (t*(hn_1/hn)).subs(r, 1) + point[2]
return x_par, y_par, z_par
raise NotImplementedError()
def conic_coeff(variables, equation):
if total_degree(equation) != 2:
raise ValueError()
x = variables[0]
y = variables[1]
equation = expand(equation)
a = equation.coeff(x**2)
b = equation.coeff(x*y)
c = equation.coeff(y**2)
d = equation.coeff(x, 1).coeff(y, 0)
e = equation.coeff(y, 1).coeff(x, 0)
f = equation.coeff(x, 0).coeff(y, 0)
return a, b, c, d, e, f
|
dd8c98d1f90535e951ea5762f176115a990512fa98e7eac583fbd3dcd16eb5ed | from functools import singledispatch
from sympy.core.numbers import pi
from sympy.functions.elementary.trigonometric import tan
from sympy.simplify import trigsimp
from sympy.core import Basic, Tuple
from sympy.core.symbol import _symbol
from sympy.solvers import solve
from sympy.geometry import Point, Segment, Curve, Ellipse, Polygon
from sympy.vector import ImplicitRegion
class ParametricRegion(Basic):
"""
Represents a parametric region in space.
Examples
========
>>> from sympy import cos, sin, pi
>>> from sympy.abc import r, theta, t, a, b, x, y
>>> from sympy.vector import ParametricRegion
>>> ParametricRegion((t, t**2), (t, -1, 2))
ParametricRegion((t, t**2), (t, -1, 2))
>>> ParametricRegion((x, y), (x, 3, 4), (y, 5, 6))
ParametricRegion((x, y), (x, 3, 4), (y, 5, 6))
>>> ParametricRegion((r*cos(theta), r*sin(theta)), (r, -2, 2), (theta, 0, pi))
ParametricRegion((r*cos(theta), r*sin(theta)), (r, -2, 2), (theta, 0, pi))
>>> ParametricRegion((a*cos(t), b*sin(t)), t)
ParametricRegion((a*cos(t), b*sin(t)), t)
>>> circle = ParametricRegion((r*cos(theta), r*sin(theta)), r, (theta, 0, pi))
>>> circle.parameters
(r, theta)
>>> circle.definition
(r*cos(theta), r*sin(theta))
>>> circle.limits
{theta: (0, pi)}
Dimension of a parametric region determines whether a region is a curve, surface
or volume region. It does not represent its dimensions in space.
>>> circle.dimensions
1
Parameters
==========
definition : tuple to define base scalars in terms of parameters.
bounds : Parameter or a tuple of length 3 to define parameter and corresponding lower and upper bound.
"""
def __new__(cls, definition, *bounds):
parameters = ()
limits = {}
if not isinstance(bounds, Tuple):
bounds = Tuple(*bounds)
for bound in bounds:
if isinstance(bound, (tuple, Tuple)):
if len(bound) != 3:
raise ValueError("Tuple should be in the form (parameter, lowerbound, upperbound)")
parameters += (bound[0],)
limits[bound[0]] = (bound[1], bound[2])
else:
parameters += (bound,)
if not isinstance(definition, (tuple, Tuple)):
definition = (definition,)
obj = super().__new__(cls, Tuple(*definition), *bounds)
obj._parameters = parameters
obj._limits = limits
return obj
@property
def definition(self):
return self.args[0]
@property
def limits(self):
return self._limits
@property
def parameters(self):
return self._parameters
@property
def dimensions(self):
return len(self.limits)
@singledispatch
def parametric_region_list(reg):
"""
Returns a list of ParametricRegion objects representing the geometric region.
Examples
========
>>> from sympy.abc import t
>>> from sympy.vector import parametric_region_list
>>> from sympy.geometry import Point, Curve, Ellipse, Segment, Polygon
>>> p = Point(2, 5)
>>> parametric_region_list(p)
[ParametricRegion((2, 5))]
>>> c = Curve((t**3, 4*t), (t, -3, 4))
>>> parametric_region_list(c)
[ParametricRegion((t**3, 4*t), (t, -3, 4))]
>>> e = Ellipse(Point(1, 3), 2, 3)
>>> parametric_region_list(e)
[ParametricRegion((2*cos(t) + 1, 3*sin(t) + 3), (t, 0, 2*pi))]
>>> s = Segment(Point(1, 3), Point(2, 6))
>>> parametric_region_list(s)
[ParametricRegion((t + 1, 3*t + 3), (t, 0, 1))]
>>> p1, p2, p3, p4 = [(0, 1), (2, -3), (5, 3), (-2, 3)]
>>> poly = Polygon(p1, p2, p3, p4)
>>> parametric_region_list(poly)
[ParametricRegion((2*t, 1 - 4*t), (t, 0, 1)), ParametricRegion((3*t + 2, 6*t - 3), (t, 0, 1)),\
ParametricRegion((5 - 7*t, 3), (t, 0, 1)), ParametricRegion((2*t - 2, 3 - 2*t), (t, 0, 1))]
"""
raise ValueError("SymPy cannot determine parametric representation of the region.")
@parametric_region_list.register(Point)
def _(obj):
return [ParametricRegion(obj.args)]
@parametric_region_list.register(Curve) # type: ignore
def _(obj):
definition = obj.arbitrary_point(obj.parameter).args
bounds = obj.limits
return [ParametricRegion(definition, bounds)]
@parametric_region_list.register(Ellipse) # type: ignore
def _(obj, parameter='t'):
definition = obj.arbitrary_point(parameter).args
t = _symbol(parameter, real=True)
bounds = (t, 0, 2*pi)
return [ParametricRegion(definition, bounds)]
@parametric_region_list.register(Segment) # type: ignore
def _(obj, parameter='t'):
t = _symbol(parameter, real=True)
definition = obj.arbitrary_point(t).args
for i in range(0, 3):
lower_bound = solve(definition[i] - obj.points[0].args[i], t)
upper_bound = solve(definition[i] - obj.points[1].args[i], t)
if len(lower_bound) == 1 and len(upper_bound) == 1:
bounds = t, lower_bound[0], upper_bound[0]
break
definition_tuple = obj.arbitrary_point(parameter).args
return [ParametricRegion(definition_tuple, bounds)]
@parametric_region_list.register(Polygon) # type: ignore
def _(obj, parameter='t'):
l = [parametric_region_list(side, parameter)[0] for side in obj.sides]
return l
@parametric_region_list.register(ImplicitRegion) # type: ignore
def _(obj, parameters=('t', 's')):
definition = obj.rational_parametrization(parameters)
bounds = []
for i in range(len(obj.variables) - 1):
# Each parameter is replaced by its tangent to simplify intergation
parameter = _symbol(parameters[i], real=True)
definition = [trigsimp(elem.subs(parameter, tan(parameter/2))) for elem in definition]
bounds.append((parameter, 0, 2*pi),)
definition = Tuple(*definition)
return [ParametricRegion(definition, *bounds)]
|
709c41c2c3486faef9b0250388667b8268309d4b484146ca195ed8c1073dc387 | from typing import Type
from sympy.vector.basisdependent import (BasisDependent, BasisDependentAdd,
BasisDependentMul, BasisDependentZero)
from sympy.core import S, Pow
from sympy.core.expr import AtomicExpr
from sympy.matrices.immutable import ImmutableDenseMatrix as Matrix
import sympy.vector
class Dyadic(BasisDependent):
"""
Super class for all Dyadic-classes.
References
==========
.. [1] https://en.wikipedia.org/wiki/Dyadic_tensor
.. [2] Kane, T., Levinson, D. Dynamics Theory and Applications. 1985
McGraw-Hill
"""
_op_priority = 13.0
_expr_type = None # type: Type[Dyadic]
_mul_func = None # type: Type[Dyadic]
_add_func = None # type: Type[Dyadic]
_zero_func = None # type: Type[Dyadic]
_base_func = None # type: Type[Dyadic]
zero = None # type: DyadicZero
@property
def components(self):
"""
Returns the components of this dyadic in the form of a
Python dictionary mapping BaseDyadic instances to the
corresponding measure numbers.
"""
# The '_components' attribute is defined according to the
# subclass of Dyadic the instance belongs to.
return self._components
def dot(self, other):
"""
Returns the dot product(also called inner product) of this
Dyadic, with another Dyadic or Vector.
If 'other' is a Dyadic, this returns a Dyadic. Else, it returns
a Vector (unless an error is encountered).
Parameters
==========
other : Dyadic/Vector
The other Dyadic or Vector to take the inner product with
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> D1 = N.i.outer(N.j)
>>> D2 = N.j.outer(N.j)
>>> D1.dot(D2)
(N.i|N.j)
>>> D1.dot(N.j)
N.i
"""
Vector = sympy.vector.Vector
if isinstance(other, BasisDependentZero):
return Vector.zero
elif isinstance(other, Vector):
outvec = Vector.zero
for k, v in self.components.items():
vect_dot = k.args[1].dot(other)
outvec += vect_dot * v * k.args[0]
return outvec
elif isinstance(other, Dyadic):
outdyad = Dyadic.zero
for k1, v1 in self.components.items():
for k2, v2 in other.components.items():
vect_dot = k1.args[1].dot(k2.args[0])
outer_product = k1.args[0].outer(k2.args[1])
outdyad += vect_dot * v1 * v2 * outer_product
return outdyad
else:
raise TypeError("Inner product is not defined for " +
str(type(other)) + " and Dyadics.")
def __and__(self, other):
return self.dot(other)
__and__.__doc__ = dot.__doc__
def cross(self, other):
"""
Returns the cross product between this Dyadic, and a Vector, as a
Vector instance.
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> d = N.i.outer(N.i)
>>> d.cross(N.j)
(N.i|N.k)
"""
Vector = sympy.vector.Vector
if other == Vector.zero:
return Dyadic.zero
elif isinstance(other, Vector):
outdyad = Dyadic.zero
for k, v in self.components.items():
cross_product = k.args[1].cross(other)
outer = k.args[0].outer(cross_product)
outdyad += v * outer
return outdyad
else:
raise TypeError(str(type(other)) + " not supported for " +
"cross with dyadics")
def __xor__(self, other):
return self.cross(other)
__xor__.__doc__ = cross.__doc__
def to_matrix(self, system, second_system=None):
"""
Returns the matrix form of the dyadic with respect to one or two
coordinate systems.
Parameters
==========
system : CoordSys3D
The coordinate system that the rows and columns of the matrix
correspond to. If a second system is provided, this
only corresponds to the rows of the matrix.
second_system : CoordSys3D, optional, default=None
The coordinate system that the columns of the matrix correspond
to.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> v = N.i + 2*N.j
>>> d = v.outer(N.i)
>>> d.to_matrix(N)
Matrix([
[1, 0, 0],
[2, 0, 0],
[0, 0, 0]])
>>> from sympy import Symbol
>>> q = Symbol('q')
>>> P = N.orient_new_axis('P', q, N.k)
>>> d.to_matrix(N, P)
Matrix([
[ cos(q), -sin(q), 0],
[2*cos(q), -2*sin(q), 0],
[ 0, 0, 0]])
"""
if second_system is None:
second_system = system
return Matrix([i.dot(self).dot(j) for i in system for j in
second_system]).reshape(3, 3)
def _div_helper(one, other):
""" Helper for division involving dyadics """
if isinstance(one, Dyadic) and isinstance(other, Dyadic):
raise TypeError("Cannot divide two dyadics")
elif isinstance(one, Dyadic):
return DyadicMul(one, Pow(other, S.NegativeOne))
else:
raise TypeError("Cannot divide by a dyadic")
class BaseDyadic(Dyadic, AtomicExpr):
"""
Class to denote a base dyadic tensor component.
"""
def __new__(cls, vector1, vector2):
Vector = sympy.vector.Vector
BaseVector = sympy.vector.BaseVector
VectorZero = sympy.vector.VectorZero
# Verify arguments
if not isinstance(vector1, (BaseVector, VectorZero)) or \
not isinstance(vector2, (BaseVector, VectorZero)):
raise TypeError("BaseDyadic cannot be composed of non-base " +
"vectors")
# Handle special case of zero vector
elif vector1 == Vector.zero or vector2 == Vector.zero:
return Dyadic.zero
# Initialize instance
obj = super().__new__(cls, vector1, vector2)
obj._base_instance = obj
obj._measure_number = 1
obj._components = {obj: S.One}
obj._sys = vector1._sys
obj._pretty_form = ('(' + vector1._pretty_form + '|' +
vector2._pretty_form + ')')
obj._latex_form = ('(' + vector1._latex_form + "{|}" +
vector2._latex_form + ')')
return obj
def _sympystr(self, printer):
return "({}|{})".format(
printer._print(self.args[0]), printer._print(self.args[1]))
class DyadicMul(BasisDependentMul, Dyadic):
""" Products of scalars and BaseDyadics """
def __new__(cls, *args, **options):
obj = BasisDependentMul.__new__(cls, *args, **options)
return obj
@property
def base_dyadic(self):
""" The BaseDyadic involved in the product. """
return self._base_instance
@property
def measure_number(self):
""" The scalar expression involved in the definition of
this DyadicMul.
"""
return self._measure_number
class DyadicAdd(BasisDependentAdd, Dyadic):
""" Class to hold dyadic sums """
def __new__(cls, *args, **options):
obj = BasisDependentAdd.__new__(cls, *args, **options)
return obj
def _sympystr(self, printer):
items = list(self.components.items())
items.sort(key=lambda x: x[0].__str__())
return " + ".join(printer._print(k * v) for k, v in items)
class DyadicZero(BasisDependentZero, Dyadic):
"""
Class to denote a zero dyadic
"""
_op_priority = 13.1
_pretty_form = '(0|0)'
_latex_form = r'(\mathbf{\hat{0}}|\mathbf{\hat{0}})'
def __new__(cls):
obj = BasisDependentZero.__new__(cls)
return obj
Dyadic._expr_type = Dyadic
Dyadic._mul_func = DyadicMul
Dyadic._add_func = DyadicAdd
Dyadic._zero_func = DyadicZero
Dyadic._base_func = BaseDyadic
Dyadic.zero = DyadicZero()
|
e7b10e8c711ffb0f6d2145b527c8f9a342f277c968bfd3b679bb11230debd1bc | """Parabolic geometrical entity.
Contains
* Parabola
"""
from sympy.core import S
from sympy.core.sorting import ordered
from sympy.core.symbol import _symbol, symbols
from sympy.geometry.entity import GeometryEntity, GeometrySet
from sympy.geometry.point import Point, Point2D
from sympy.geometry.line import Line, Line2D, Ray2D, Segment2D, LinearEntity3D
from sympy.geometry.ellipse import Ellipse
from sympy.functions import sign
from sympy.simplify import simplify
from sympy.solvers.solvers import solve
class Parabola(GeometrySet):
"""A parabolic GeometryEntity.
A parabola is declared with a point, that is called 'focus', and
a line, that is called 'directrix'.
Only vertical or horizontal parabolas are currently supported.
Parameters
==========
focus : Point
Default value is Point(0, 0)
directrix : Line
Attributes
==========
focus
directrix
axis of symmetry
focal length
p parameter
vertex
eccentricity
Raises
======
ValueError
When `focus` is not a two dimensional point.
When `focus` is a point of directrix.
NotImplementedError
When `directrix` is neither horizontal nor vertical.
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7,8)))
>>> p1.focus
Point2D(0, 0)
>>> p1.directrix
Line2D(Point2D(5, 8), Point2D(7, 8))
"""
def __new__(cls, focus=None, directrix=None, **kwargs):
if focus:
focus = Point(focus, dim=2)
else:
focus = Point(0, 0)
directrix = Line(directrix)
if (directrix.slope != 0 and directrix.slope != S.Infinity):
raise NotImplementedError('The directrix must be a horizontal'
' or vertical line')
if directrix.contains(focus):
raise ValueError('The focus must not be a point of directrix')
return GeometryEntity.__new__(cls, focus, directrix, **kwargs)
@property
def ambient_dimension(self):
"""Returns the ambient dimension of parabola.
Returns
=======
ambient_dimension : integer
Examples
========
>>> from sympy import Parabola, Point, Line
>>> f1 = Point(0, 0)
>>> p1 = Parabola(f1, Line(Point(5, 8), Point(7, 8)))
>>> p1.ambient_dimension
2
"""
return 2
@property
def axis_of_symmetry(self):
"""The axis of symmetry of the parabola.
Returns
=======
axis_of_symmetry : Line
See Also
========
sympy.geometry.line.Line
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.axis_of_symmetry
Line2D(Point2D(0, 0), Point2D(0, 1))
"""
return self.directrix.perpendicular_line(self.focus)
@property
def directrix(self):
"""The directrix of the parabola.
Returns
=======
directrix : Line
See Also
========
sympy.geometry.line.Line
Examples
========
>>> from sympy import Parabola, Point, Line
>>> l1 = Line(Point(5, 8), Point(7, 8))
>>> p1 = Parabola(Point(0, 0), l1)
>>> p1.directrix
Line2D(Point2D(5, 8), Point2D(7, 8))
"""
return self.args[1]
@property
def eccentricity(self):
"""The eccentricity of the parabola.
Returns
=======
eccentricity : number
A parabola may also be characterized as a conic section with an
eccentricity of 1. As a consequence of this, all parabolas are
similar, meaning that while they can be different sizes,
they are all the same shape.
See Also
========
https://en.wikipedia.org/wiki/Parabola
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.eccentricity
1
Notes
-----
The eccentricity for every Parabola is 1 by definition.
"""
return S.One
def equation(self, x='x', y='y'):
"""The equation of the parabola.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.equation()
-x**2 - 16*y + 64
>>> p1.equation('f')
-f**2 - 16*y + 64
>>> p1.equation(y='z')
-x**2 - 16*z + 64
"""
x = _symbol(x, real=True)
y = _symbol(y, real=True)
if (self.axis_of_symmetry.slope == 0):
t1 = 4 * (self.p_parameter) * (x - self.vertex.x)
t2 = (y - self.vertex.y)**2
else:
t1 = 4 * (self.p_parameter) * (y - self.vertex.y)
t2 = (x - self.vertex.x)**2
return t1 - t2
@property
def focal_length(self):
"""The focal length of the parabola.
Returns
=======
focal_lenght : number or symbolic expression
Notes
=====
The distance between the vertex and the focus
(or the vertex and directrix), measured along the axis
of symmetry, is the "focal length".
See Also
========
https://en.wikipedia.org/wiki/Parabola
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.focal_length
4
"""
distance = self.directrix.distance(self.focus)
focal_length = distance/2
return focal_length
@property
def focus(self):
"""The focus of the parabola.
Returns
=======
focus : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Parabola, Point, Line
>>> f1 = Point(0, 0)
>>> p1 = Parabola(f1, Line(Point(5, 8), Point(7, 8)))
>>> p1.focus
Point2D(0, 0)
"""
return self.args[0]
def intersection(self, o):
"""The intersection of the parabola and another geometrical entity `o`.
Parameters
==========
o : GeometryEntity, LinearEntity
Returns
=======
intersection : list of GeometryEntity objects
Examples
========
>>> from sympy import Parabola, Point, Ellipse, Line, Segment
>>> p1 = Point(0,0)
>>> l1 = Line(Point(1, -2), Point(-1,-2))
>>> parabola1 = Parabola(p1, l1)
>>> parabola1.intersection(Ellipse(Point(0, 0), 2, 5))
[Point2D(-2, 0), Point2D(2, 0)]
>>> parabola1.intersection(Line(Point(-7, 3), Point(12, 3)))
[Point2D(-4, 3), Point2D(4, 3)]
>>> parabola1.intersection(Segment((-12, -65), (14, -68)))
[]
"""
x, y = symbols('x y', real=True)
parabola_eq = self.equation()
if isinstance(o, Parabola):
if o in self:
return [o]
else:
return list(ordered([Point(i) for i in solve([parabola_eq, o.equation()], [x, y])]))
elif isinstance(o, Point2D):
if simplify(parabola_eq.subs([(x, o._args[0]), (y, o._args[1])])) == 0:
return [o]
else:
return []
elif isinstance(o, (Segment2D, Ray2D)):
result = solve([parabola_eq, Line2D(o.points[0], o.points[1]).equation()], [x, y])
return list(ordered([Point2D(i) for i in result if i in o]))
elif isinstance(o, (Line2D, Ellipse)):
return list(ordered([Point2D(i) for i in solve([parabola_eq, o.equation()], [x, y])]))
elif isinstance(o, LinearEntity3D):
raise TypeError('Entity must be two dimensional, not three dimensional')
else:
raise TypeError('Wrong type of argument were put')
@property
def p_parameter(self):
"""P is a parameter of parabola.
Returns
=======
p : number or symbolic expression
Notes
=====
The absolute value of p is the focal length. The sign on p tells
which way the parabola faces. Vertical parabolas that open up
and horizontal that open right, give a positive value for p.
Vertical parabolas that open down and horizontal that open left,
give a negative value for p.
See Also
========
http://www.sparknotes.com/math/precalc/conicsections/section2.rhtml
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.p_parameter
-4
"""
if self.axis_of_symmetry.slope == 0:
x = self.directrix.coefficients[2]
p = sign(self.focus.args[0] + x)
else:
y = self.directrix.coefficients[2]
p = sign(self.focus.args[1] + y)
return p * self.focal_length
@property
def vertex(self):
"""The vertex of the parabola.
Returns
=======
vertex : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.vertex
Point2D(0, 4)
"""
focus = self.focus
if (self.axis_of_symmetry.slope == 0):
vertex = Point(focus.args[0] - self.p_parameter, focus.args[1])
else:
vertex = Point(focus.args[0], focus.args[1] - self.p_parameter)
return vertex
|
08daafa3b014295407303b61b1774dc831445e4b36bb5d85b1395b0a5a019039 | """Curves in 2-dimensional Euclidean space.
Contains
========
Curve
"""
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.core import sympify, diff
from sympy.core.containers import Tuple
from sympy.core.symbol import _symbol
from sympy.geometry.entity import GeometryEntity, GeometrySet
from sympy.geometry.point import Point
from sympy.integrals import integrate
from sympy.utilities.iterables import is_sequence
from mpmath.libmp.libmpf import prec_to_dps
class Curve(GeometrySet):
"""A curve in space.
A curve is defined by parametric functions for the coordinates, a
parameter and the lower and upper bounds for the parameter value.
Parameters
==========
function : list of functions
limits : 3-tuple
Function parameter and lower and upper bounds.
Attributes
==========
functions
parameter
limits
Raises
======
ValueError
When `functions` are specified incorrectly.
When `limits` are specified incorrectly.
Examples
========
>>> from sympy import sin, cos, interpolate
>>> from sympy.abc import t, a
>>> from sympy.geometry import Curve
>>> C = Curve((sin(t), cos(t)), (t, 0, 2))
>>> C.functions
(sin(t), cos(t))
>>> C.limits
(t, 0, 2)
>>> C.parameter
t
>>> C = Curve((t, interpolate([1, 4, 9, 16], t)), (t, 0, 1)); C
Curve((t, t**2), (t, 0, 1))
>>> C.subs(t, 4)
Point2D(4, 16)
>>> C.arbitrary_point(a)
Point2D(a, a**2)
See Also
========
sympy.core.function.Function
sympy.polys.polyfuncs.interpolate
"""
def __new__(cls, function, limits):
fun = sympify(function)
if not is_sequence(fun) or len(fun) != 2:
raise ValueError("Function argument should be (x(t), y(t)) "
"but got %s" % str(function))
if not is_sequence(limits) or len(limits) != 3:
raise ValueError("Limit argument should be (t, tmin, tmax) "
"but got %s" % str(limits))
return GeometryEntity.__new__(cls, Tuple(*fun), Tuple(*limits))
def __call__(self, f):
return self.subs(self.parameter, f)
def _eval_subs(self, old, new):
if old == self.parameter:
return Point(*[f.subs(old, new) for f in self.functions])
def _eval_evalf(self, prec=15, **options):
f, (t, a, b) = self.args
dps = prec_to_dps(prec)
f = tuple([i.evalf(n=dps, **options) for i in f])
a, b = [i.evalf(n=dps, **options) for i in (a, b)]
return self.func(f, (t, a, b))
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the curve.
Parameters
==========
parameter : str or Symbol, optional
Default value is 't'.
The Curve's parameter is selected with None or self.parameter
otherwise the provided symbol is used.
Returns
=======
Point :
Returns a point in parametric form.
Raises
======
ValueError
When `parameter` already appears in the functions.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import s
>>> from sympy.geometry import Curve
>>> C = Curve([2*s, s**2], (s, 0, 2))
>>> C.arbitrary_point()
Point2D(2*t, t**2)
>>> C.arbitrary_point(C.parameter)
Point2D(2*s, s**2)
>>> C.arbitrary_point(None)
Point2D(2*s, s**2)
>>> C.arbitrary_point(Symbol('a'))
Point2D(2*a, a**2)
See Also
========
sympy.geometry.point.Point
"""
if parameter is None:
return Point(*self.functions)
tnew = _symbol(parameter, self.parameter, real=True)
t = self.parameter
if (tnew.name != t.name and
tnew.name in (f.name for f in self.free_symbols)):
raise ValueError('Symbol %s already appears in object '
'and cannot be used as a parameter.' % tnew.name)
return Point(*[w.subs(t, tnew) for w in self.functions])
@property
def free_symbols(self):
"""Return a set of symbols other than the bound symbols used to
parametrically define the Curve.
Returns
=======
set :
Set of all non-parameterized symbols.
Examples
========
>>> from sympy.abc import t, a
>>> from sympy.geometry import Curve
>>> Curve((t, t**2), (t, 0, 2)).free_symbols
set()
>>> Curve((t, t**2), (t, a, 2)).free_symbols
{a}
"""
free = set()
for a in self.functions + self.limits[1:]:
free |= a.free_symbols
free = free.difference({self.parameter})
return free
@property
def ambient_dimension(self):
"""The dimension of the curve.
Returns
=======
int :
the dimension of curve.
Examples
========
>>> from sympy.abc import t
>>> from sympy.geometry import Curve
>>> C = Curve((t, t**2), (t, 0, 2))
>>> C.ambient_dimension
2
"""
return len(self.args[0])
@property
def functions(self):
"""The functions specifying the curve.
Returns
=======
functions :
list of parameterized coordinate functions.
Examples
========
>>> from sympy.abc import t
>>> from sympy.geometry import Curve
>>> C = Curve((t, t**2), (t, 0, 2))
>>> C.functions
(t, t**2)
See Also
========
parameter
"""
return self.args[0]
@property
def limits(self):
"""The limits for the curve.
Returns
=======
limits : tuple
Contains parameter and lower and upper limits.
Examples
========
>>> from sympy.abc import t
>>> from sympy.geometry import Curve
>>> C = Curve([t, t**3], (t, -2, 2))
>>> C.limits
(t, -2, 2)
See Also
========
plot_interval
"""
return self.args[1]
@property
def parameter(self):
"""The curve function variable.
Returns
=======
Symbol :
returns a bound symbol.
Examples
========
>>> from sympy.abc import t
>>> from sympy.geometry import Curve
>>> C = Curve([t, t**2], (t, 0, 2))
>>> C.parameter
t
See Also
========
functions
"""
return self.args[1][0]
@property
def length(self):
"""The curve length.
Examples
========
>>> from sympy.geometry.curve import Curve
>>> from sympy.abc import t
>>> Curve((t, t), (t, 0, 1)).length
sqrt(2)
"""
integrand = sqrt(sum(diff(func, self.limits[0])**2 for func in self.functions))
return integrate(integrand, self.limits)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the curve.
Parameters
==========
parameter : str or Symbol, optional
Default value is 't';
otherwise the provided symbol is used.
Returns
=======
List :
the plot interval as below:
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Curve, sin
>>> from sympy.abc import x, s
>>> Curve((x, sin(x)), (x, 1, 2)).plot_interval()
[t, 1, 2]
>>> Curve((x, sin(x)), (x, 1, 2)).plot_interval(s)
[s, 1, 2]
See Also
========
limits : Returns limits of the parameter interval
"""
t = _symbol(parameter, self.parameter, real=True)
return [t] + list(self.limits[1:])
def rotate(self, angle=0, pt=None):
"""This function is used to rotate a curve along given point ``pt`` at given angle(in radian).
Parameters
==========
angle :
the angle at which the curve will be rotated(in radian) in counterclockwise direction.
default value of angle is 0.
pt : Point
the point along which the curve will be rotated.
If no point given, the curve will be rotated around origin.
Returns
=======
Curve :
returns a curve rotated at given angle along given point.
Examples
========
>>> from sympy.geometry.curve import Curve
>>> from sympy.abc import x
>>> from sympy import pi
>>> Curve((x, x), (x, 0, 1)).rotate(pi/2)
Curve((-x, x), (x, 0, 1))
"""
from sympy.matrices import Matrix, rot_axis3
if pt:
pt = -Point(pt, dim=2)
else:
pt = Point(0,0)
rv = self.translate(*pt.args)
f = list(rv.functions)
f.append(0)
f = Matrix(1, 3, f)
f *= rot_axis3(angle)
rv = self.func(f[0, :2].tolist()[0], self.limits)
pt = -pt
return rv.translate(*pt.args)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since Curve is not made up of Points.
Returns
=======
Curve :
returns scaled curve.
Examples
========
>>> from sympy.geometry.curve import Curve
>>> from sympy.abc import x
>>> Curve((x, x), (x, 0, 1)).scale(2)
Curve((2*x, x), (x, 0, 1))
"""
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
fx, fy = self.functions
return self.func((fx*x, fy*y), self.limits)
def translate(self, x=0, y=0):
"""Translate the Curve by (x, y).
Returns
=======
Curve :
returns a translated curve.
Examples
========
>>> from sympy.geometry.curve import Curve
>>> from sympy.abc import x
>>> Curve((x, x), (x, 0, 1)).translate(1, 2)
Curve((x + 1, x + 2), (x, 0, 1))
"""
fx, fy = self.functions
return self.func((fx + x, fy + y), self.limits)
|
54b6446efd9876eee6430a67f75728d0e2957a5cbff5901bf29449a111d5a52e | """Geometrical Points.
Contains
========
Point
Point2D
Point3D
When methods of Point require 1 or more points as arguments, they
can be passed as a sequence of coordinates or Points:
>>> from sympy.geometry.point import Point
>>> Point(1, 1).is_collinear((2, 2), (3, 4))
False
>>> Point(1, 1).is_collinear(Point(2, 2), Point(3, 4))
False
"""
import warnings
from sympy.core import S, sympify, Expr
from sympy.core.add import Add
from sympy.core.containers import Tuple
from sympy.core.numbers import Float
from sympy.core.parameters import global_parameters
from sympy.simplify import nsimplify, simplify
from sympy.geometry.exceptions import GeometryError
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.complexes import im
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.matrices import Matrix
from sympy.matrices.expressions import Transpose
from sympy.utilities.iterables import uniq, is_sequence
from sympy.utilities.misc import filldedent, func_name, Undecidable
from .entity import GeometryEntity
from mpmath.libmp.libmpf import prec_to_dps
class Point(GeometryEntity):
"""A point in a n-dimensional Euclidean space.
Parameters
==========
coords : sequence of n-coordinate values. In the special
case where n=2 or 3, a Point2D or Point3D will be created
as appropriate.
evaluate : if `True` (default), all floats are turn into
exact types.
dim : number of coordinates the point should have. If coordinates
are unspecified, they are padded with zeros.
on_morph : indicates what should happen when the number of
coordinates of a point need to be changed by adding or
removing zeros. Possible values are `'warn'`, `'error'`, or
`ignore` (default). No warning or error is given when `*args`
is empty and `dim` is given. An error is always raised when
trying to remove nonzero coordinates.
Attributes
==========
length
origin: A `Point` representing the origin of the
appropriately-dimensioned space.
Raises
======
TypeError : When instantiating with anything but a Point or sequence
ValueError : when instantiating with a sequence with length < 2 or
when trying to reduce dimensions if keyword `on_morph='error'` is
set.
See Also
========
sympy.geometry.line.Segment : Connects two Points
Examples
========
>>> from sympy.geometry import Point
>>> from sympy.abc import x
>>> Point(1, 2, 3)
Point3D(1, 2, 3)
>>> Point([1, 2])
Point2D(1, 2)
>>> Point(0, x)
Point2D(0, x)
>>> Point(dim=4)
Point(0, 0, 0, 0)
Floats are automatically converted to Rational unless the
evaluate flag is False:
>>> Point(0.5, 0.25)
Point2D(1/2, 1/4)
>>> Point(0.5, 0.25, evaluate=False)
Point2D(0.5, 0.25)
"""
is_Point = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
on_morph = kwargs.get('on_morph', 'ignore')
# unpack into coords
coords = args[0] if len(args) == 1 else args
# check args and handle quickly handle Point instances
if isinstance(coords, Point):
# even if we're mutating the dimension of a point, we
# don't reevaluate its coordinates
evaluate = False
if len(coords) == kwargs.get('dim', len(coords)):
return coords
if not is_sequence(coords):
raise TypeError(filldedent('''
Expecting sequence of coordinates, not `{}`'''
.format(func_name(coords))))
# A point where only `dim` is specified is initialized
# to zeros.
if len(coords) == 0 and kwargs.get('dim', None):
coords = (S.Zero,)*kwargs.get('dim')
coords = Tuple(*coords)
dim = kwargs.get('dim', len(coords))
if len(coords) < 2:
raise ValueError(filldedent('''
Point requires 2 or more coordinates or
keyword `dim` > 1.'''))
if len(coords) != dim:
message = ("Dimension of {} needs to be changed "
"from {} to {}.").format(coords, len(coords), dim)
if on_morph == 'ignore':
pass
elif on_morph == "error":
raise ValueError(message)
elif on_morph == 'warn':
warnings.warn(message)
else:
raise ValueError(filldedent('''
on_morph value should be 'error',
'warn' or 'ignore'.'''))
if any(coords[dim:]):
raise ValueError('Nonzero coordinates cannot be removed.')
if any(a.is_number and im(a) for a in coords):
raise ValueError('Imaginary coordinates are not permitted.')
if not all(isinstance(a, Expr) for a in coords):
raise TypeError('Coordinates must be valid SymPy expressions.')
# pad with zeros appropriately
coords = coords[:dim] + (S.Zero,)*(dim - len(coords))
# Turn any Floats into rationals and simplify
# any expressions before we instantiate
if evaluate:
coords = coords.xreplace({
f: simplify(nsimplify(f, rational=True))
for f in coords.atoms(Float)})
# return 2D or 3D instances
if len(coords) == 2:
kwargs['_nocheck'] = True
return Point2D(*coords, **kwargs)
elif len(coords) == 3:
kwargs['_nocheck'] = True
return Point3D(*coords, **kwargs)
# the general Point
return GeometryEntity.__new__(cls, *coords)
def __abs__(self):
"""Returns the distance between this point and the origin."""
origin = Point([0]*len(self))
return Point.distance(origin, self)
def __add__(self, other):
"""Add other to self by incrementing self's coordinates by
those of other.
Notes
=====
>>> from sympy.geometry.point import Point
When sequences of coordinates are passed to Point methods, they
are converted to a Point internally. This __add__ method does
not do that so if floating point values are used, a floating
point result (in terms of SymPy Floats) will be returned.
>>> Point(1, 2) + (.1, .2)
Point2D(1.1, 2.2)
If this is not desired, the `translate` method can be used or
another Point can be added:
>>> Point(1, 2).translate(.1, .2)
Point2D(11/10, 11/5)
>>> Point(1, 2) + Point(.1, .2)
Point2D(11/10, 11/5)
See Also
========
sympy.geometry.point.Point.translate
"""
try:
s, o = Point._normalize_dimension(self, Point(other, evaluate=False))
except TypeError:
raise GeometryError("Don't know how to add {} and a Point object".format(other))
coords = [simplify(a + b) for a, b in zip(s, o)]
return Point(coords, evaluate=False)
def __contains__(self, item):
return item in self.args
def __truediv__(self, divisor):
"""Divide point's coordinates by a factor."""
divisor = sympify(divisor)
coords = [simplify(x/divisor) for x in self.args]
return Point(coords, evaluate=False)
def __eq__(self, other):
if not isinstance(other, Point) or len(self.args) != len(other.args):
return False
return self.args == other.args
def __getitem__(self, key):
return self.args[key]
def __hash__(self):
return hash(self.args)
def __iter__(self):
return self.args.__iter__()
def __len__(self):
return len(self.args)
def __mul__(self, factor):
"""Multiply point's coordinates by a factor.
Notes
=====
>>> from sympy.geometry.point import Point
When multiplying a Point by a floating point number,
the coordinates of the Point will be changed to Floats:
>>> Point(1, 2)*0.1
Point2D(0.1, 0.2)
If this is not desired, the `scale` method can be used or
else only multiply or divide by integers:
>>> Point(1, 2).scale(1.1, 1.1)
Point2D(11/10, 11/5)
>>> Point(1, 2)*11/10
Point2D(11/10, 11/5)
See Also
========
sympy.geometry.point.Point.scale
"""
factor = sympify(factor)
coords = [simplify(x*factor) for x in self.args]
return Point(coords, evaluate=False)
def __rmul__(self, factor):
"""Multiply a factor by point's coordinates."""
return self.__mul__(factor)
def __neg__(self):
"""Negate the point."""
coords = [-x for x in self.args]
return Point(coords, evaluate=False)
def __sub__(self, other):
"""Subtract two points, or subtract a factor from this point's
coordinates."""
return self + [-x for x in other]
@classmethod
def _normalize_dimension(cls, *points, **kwargs):
"""Ensure that points have the same dimension.
By default `on_morph='warn'` is passed to the
`Point` constructor."""
# if we have a built-in ambient dimension, use it
dim = getattr(cls, '_ambient_dimension', None)
# override if we specified it
dim = kwargs.get('dim', dim)
# if no dim was given, use the highest dimensional point
if dim is None:
dim = max(i.ambient_dimension for i in points)
if all(i.ambient_dimension == dim for i in points):
return list(points)
kwargs['dim'] = dim
kwargs['on_morph'] = kwargs.get('on_morph', 'warn')
return [Point(i, **kwargs) for i in points]
@staticmethod
def affine_rank(*args):
"""The affine rank of a set of points is the dimension
of the smallest affine space containing all the points.
For example, if the points lie on a line (and are not all
the same) their affine rank is 1. If the points lie on a plane
but not a line, their affine rank is 2. By convention, the empty
set has affine rank -1."""
if len(args) == 0:
return -1
# make sure we're genuinely points
# and translate every point to the origin
points = Point._normalize_dimension(*[Point(i) for i in args])
origin = points[0]
points = [i - origin for i in points[1:]]
m = Matrix([i.args for i in points])
# XXX fragile -- what is a better way?
return m.rank(iszerofunc = lambda x:
abs(x.n(2)) < 1e-12 if x.is_number else x.is_zero)
@property
def ambient_dimension(self):
"""Number of components this point has."""
return getattr(self, '_ambient_dimension', len(self))
@classmethod
def are_coplanar(cls, *points):
"""Return True if there exists a plane in which all the points
lie. A trivial True value is returned if `len(points) < 3` or
all Points are 2-dimensional.
Parameters
==========
A set of points
Raises
======
ValueError : if less than 3 unique points are given
Returns
=======
boolean
Examples
========
>>> from sympy import Point3D
>>> p1 = Point3D(1, 2, 2)
>>> p2 = Point3D(2, 7, 2)
>>> p3 = Point3D(0, 0, 2)
>>> p4 = Point3D(1, 1, 2)
>>> Point3D.are_coplanar(p1, p2, p3, p4)
True
>>> p5 = Point3D(0, 1, 3)
>>> Point3D.are_coplanar(p1, p2, p3, p5)
False
"""
if len(points) <= 1:
return True
points = cls._normalize_dimension(*[Point(i) for i in points])
# quick exit if we are in 2D
if points[0].ambient_dimension == 2:
return True
points = list(uniq(points))
return Point.affine_rank(*points) <= 2
def distance(self, other):
"""The Euclidean distance between self and another GeometricEntity.
Returns
=======
distance : number or symbolic expression.
Raises
======
TypeError : if other is not recognized as a GeometricEntity or is a
GeometricEntity for which distance is not defined.
See Also
========
sympy.geometry.line.Segment.length
sympy.geometry.point.Point.taxicab_distance
Examples
========
>>> from sympy.geometry import Point, Line
>>> p1, p2 = Point(1, 1), Point(4, 5)
>>> l = Line((3, 1), (2, 2))
>>> p1.distance(p2)
5
>>> p1.distance(l)
sqrt(2)
The computed distance may be symbolic, too:
>>> from sympy.abc import x, y
>>> p3 = Point(x, y)
>>> p3.distance((0, 0))
sqrt(x**2 + y**2)
"""
if not isinstance(other, GeometryEntity):
try:
other = Point(other, dim=self.ambient_dimension)
except TypeError:
raise TypeError("not recognized as a GeometricEntity: %s" % type(other))
if isinstance(other, Point):
s, p = Point._normalize_dimension(self, Point(other))
return sqrt(Add(*((a - b)**2 for a, b in zip(s, p))))
distance = getattr(other, 'distance', None)
if distance is None:
raise TypeError("distance between Point and %s is not defined" % type(other))
return distance(self)
def dot(self, p):
"""Return dot product of self with another Point."""
if not is_sequence(p):
p = Point(p) # raise the error via Point
return Add(*(a*b for a, b in zip(self, p)))
def equals(self, other):
"""Returns whether the coordinates of self and other agree."""
# a point is equal to another point if all its components are equal
if not isinstance(other, Point) or len(self) != len(other):
return False
return all(a.equals(b) for a, b in zip(self, other))
def _eval_evalf(self, prec=15, **options):
"""Evaluate the coordinates of the point.
This method will, where possible, create and return a new Point
where the coordinates are evaluated as floating point numbers to
the precision indicated (default=15).
Parameters
==========
prec : int
Returns
=======
point : Point
Examples
========
>>> from sympy import Point, Rational
>>> p1 = Point(Rational(1, 2), Rational(3, 2))
>>> p1
Point2D(1/2, 3/2)
>>> p1.evalf()
Point2D(0.5, 1.5)
"""
dps = prec_to_dps(prec)
coords = [x.evalf(n=dps, **options) for x in self.args]
return Point(*coords, evaluate=False)
def intersection(self, other):
"""The intersection between this point and another GeometryEntity.
Parameters
==========
other : GeometryEntity or sequence of coordinates
Returns
=======
intersection : list of Points
Notes
=====
The return value will either be an empty list if there is no
intersection, otherwise it will contain this point.
Examples
========
>>> from sympy import Point
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, 0)
>>> p1.intersection(p2)
[]
>>> p1.intersection(p3)
[Point2D(0, 0)]
"""
if not isinstance(other, GeometryEntity):
other = Point(other)
if isinstance(other, Point):
if self == other:
return [self]
p1, p2 = Point._normalize_dimension(self, other)
if p1 == self and p1 == p2:
return [self]
return []
return other.intersection(self)
def is_collinear(self, *args):
"""Returns `True` if there exists a line
that contains `self` and `points`. Returns `False` otherwise.
A trivially True value is returned if no points are given.
Parameters
==========
args : sequence of Points
Returns
=======
is_collinear : boolean
See Also
========
sympy.geometry.line.Line
Examples
========
>>> from sympy import Point
>>> from sympy.abc import x
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> p3, p4, p5 = Point(2, 2), Point(x, x), Point(1, 2)
>>> Point.is_collinear(p1, p2, p3, p4)
True
>>> Point.is_collinear(p1, p2, p3, p5)
False
"""
points = (self,) + args
points = Point._normalize_dimension(*[Point(i) for i in points])
points = list(uniq(points))
return Point.affine_rank(*points) <= 1
def is_concyclic(self, *args):
"""Do `self` and the given sequence of points lie in a circle?
Returns True if the set of points are concyclic and
False otherwise. A trivial value of True is returned
if there are fewer than 2 other points.
Parameters
==========
args : sequence of Points
Returns
=======
is_concyclic : boolean
Examples
========
>>> from sympy import Point
Define 4 points that are on the unit circle:
>>> p1, p2, p3, p4 = Point(1, 0), (0, 1), (-1, 0), (0, -1)
>>> p1.is_concyclic() == p1.is_concyclic(p2, p3, p4) == True
True
Define a point not on that circle:
>>> p = Point(1, 1)
>>> p.is_concyclic(p1, p2, p3)
False
"""
points = (self,) + args
points = Point._normalize_dimension(*[Point(i) for i in points])
points = list(uniq(points))
if not Point.affine_rank(*points) <= 2:
return False
origin = points[0]
points = [p - origin for p in points]
# points are concyclic if they are coplanar and
# there is a point c so that ||p_i-c|| == ||p_j-c|| for all
# i and j. Rearranging this equation gives us the following
# condition: the matrix `mat` must not a pivot in the last
# column.
mat = Matrix([list(i) + [i.dot(i)] for i in points])
rref, pivots = mat.rref()
if len(origin) not in pivots:
return True
return False
@property
def is_nonzero(self):
"""True if any coordinate is nonzero, False if every coordinate is zero,
and None if it cannot be determined."""
is_zero = self.is_zero
if is_zero is None:
return None
return not is_zero
def is_scalar_multiple(self, p):
"""Returns whether each coordinate of `self` is a scalar
multiple of the corresponding coordinate in point p.
"""
s, o = Point._normalize_dimension(self, Point(p))
# 2d points happen a lot, so optimize this function call
if s.ambient_dimension == 2:
(x1, y1), (x2, y2) = s.args, o.args
rv = (x1*y2 - x2*y1).equals(0)
if rv is None:
raise Undecidable(filldedent(
'''Cannot determine if %s is a scalar multiple of
%s''' % (s, o)))
# if the vectors p1 and p2 are linearly dependent, then they must
# be scalar multiples of each other
m = Matrix([s.args, o.args])
return m.rank() < 2
@property
def is_zero(self):
"""True if every coordinate is zero, False if any coordinate is not zero,
and None if it cannot be determined."""
nonzero = [x.is_nonzero for x in self.args]
if any(nonzero):
return False
if any(x is None for x in nonzero):
return None
return True
@property
def length(self):
"""
Treating a Point as a Line, this returns 0 for the length of a Point.
Examples
========
>>> from sympy import Point
>>> p = Point(0, 1)
>>> p.length
0
"""
return S.Zero
def midpoint(self, p):
"""The midpoint between self and point p.
Parameters
==========
p : Point
Returns
=======
midpoint : Point
See Also
========
sympy.geometry.line.Segment.midpoint
Examples
========
>>> from sympy.geometry import Point
>>> p1, p2 = Point(1, 1), Point(13, 5)
>>> p1.midpoint(p2)
Point2D(7, 3)
"""
s, p = Point._normalize_dimension(self, Point(p))
return Point([simplify((a + b)*S.Half) for a, b in zip(s, p)])
@property
def origin(self):
"""A point of all zeros of the same ambient dimension
as the current point"""
return Point([0]*len(self), evaluate=False)
@property
def orthogonal_direction(self):
"""Returns a non-zero point that is orthogonal to the
line containing `self` and the origin.
Examples
========
>>> from sympy.geometry import Line, Point
>>> a = Point(1, 2, 3)
>>> a.orthogonal_direction
Point3D(-2, 1, 0)
>>> b = _
>>> Line(b, b.origin).is_perpendicular(Line(a, a.origin))
True
"""
dim = self.ambient_dimension
# if a coordinate is zero, we can put a 1 there and zeros elsewhere
if self[0].is_zero:
return Point([1] + (dim - 1)*[0])
if self[1].is_zero:
return Point([0,1] + (dim - 2)*[0])
# if the first two coordinates aren't zero, we can create a non-zero
# orthogonal vector by swapping them, negating one, and padding with zeros
return Point([-self[1], self[0]] + (dim - 2)*[0])
@staticmethod
def project(a, b):
"""Project the point `a` onto the line between the origin
and point `b` along the normal direction.
Parameters
==========
a : Point
b : Point
Returns
=======
p : Point
See Also
========
sympy.geometry.line.LinearEntity.projection
Examples
========
>>> from sympy.geometry import Line, Point
>>> a = Point(1, 2)
>>> b = Point(2, 5)
>>> z = a.origin
>>> p = Point.project(a, b)
>>> Line(p, a).is_perpendicular(Line(p, b))
True
>>> Point.is_collinear(z, p, b)
True
"""
a, b = Point._normalize_dimension(Point(a), Point(b))
if b.is_zero:
raise ValueError("Cannot project to the zero vector.")
return b*(a.dot(b) / b.dot(b))
def taxicab_distance(self, p):
"""The Taxicab Distance from self to point p.
Returns the sum of the horizontal and vertical distances to point p.
Parameters
==========
p : Point
Returns
=======
taxicab_distance : The sum of the horizontal
and vertical distances to point p.
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy.geometry import Point
>>> p1, p2 = Point(1, 1), Point(4, 5)
>>> p1.taxicab_distance(p2)
7
"""
s, p = Point._normalize_dimension(self, Point(p))
return Add(*(abs(a - b) for a, b in zip(s, p)))
def canberra_distance(self, p):
"""The Canberra Distance from self to point p.
Returns the weighted sum of horizontal and vertical distances to
point p.
Parameters
==========
p : Point
Returns
=======
canberra_distance : The weighted sum of horizontal and vertical
distances to point p. The weight used is the sum of absolute values
of the coordinates.
Examples
========
>>> from sympy.geometry import Point
>>> p1, p2 = Point(1, 1), Point(3, 3)
>>> p1.canberra_distance(p2)
1
>>> p1, p2 = Point(0, 0), Point(3, 3)
>>> p1.canberra_distance(p2)
2
Raises
======
ValueError when both vectors are zero.
See Also
========
sympy.geometry.point.Point.distance
"""
s, p = Point._normalize_dimension(self, Point(p))
if self.is_zero and p.is_zero:
raise ValueError("Cannot project to the zero vector.")
return Add(*((abs(a - b)/(abs(a) + abs(b))) for a, b in zip(s, p)))
@property
def unit(self):
"""Return the Point that is in the same direction as `self`
and a distance of 1 from the origin"""
return self / abs(self)
class Point2D(Point):
"""A point in a 2-dimensional Euclidean space.
Parameters
==========
coords : sequence of 2 coordinate values.
Attributes
==========
x
y
length
Raises
======
TypeError
When trying to add or subtract points with different dimensions.
When trying to create a point with more than two dimensions.
When `intersection` is called with object other than a Point.
See Also
========
sympy.geometry.line.Segment : Connects two Points
Examples
========
>>> from sympy.geometry import Point2D
>>> from sympy.abc import x
>>> Point2D(1, 2)
Point2D(1, 2)
>>> Point2D([1, 2])
Point2D(1, 2)
>>> Point2D(0, x)
Point2D(0, x)
Floats are automatically converted to Rational unless the
evaluate flag is False:
>>> Point2D(0.5, 0.25)
Point2D(1/2, 1/4)
>>> Point2D(0.5, 0.25, evaluate=False)
Point2D(0.5, 0.25)
"""
_ambient_dimension = 2
def __new__(cls, *args, _nocheck=False, **kwargs):
if not _nocheck:
kwargs['dim'] = 2
args = Point(*args, **kwargs)
return GeometryEntity.__new__(cls, *args)
def __contains__(self, item):
return item == self
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
return (self.x, self.y, self.x, self.y)
def rotate(self, angle, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
See Also
========
translate, scale
Examples
========
>>> from sympy import Point2D, pi
>>> t = Point2D(1, 0)
>>> t.rotate(pi/2)
Point2D(0, 1)
>>> t.rotate(pi/2, (2, 0))
Point2D(2, -1)
"""
c = cos(angle)
s = sin(angle)
rv = self
if pt is not None:
pt = Point(pt, dim=2)
rv -= pt
x, y = rv.args
rv = Point(c*x - s*y, s*x + c*y)
if pt is not None:
rv += pt
return rv
def scale(self, x=1, y=1, pt=None):
"""Scale the coordinates of the Point by multiplying by
``x`` and ``y`` after subtracting ``pt`` -- default is (0, 0) --
and then adding ``pt`` back again (i.e. ``pt`` is the point of
reference for the scaling).
See Also
========
rotate, translate
Examples
========
>>> from sympy import Point2D
>>> t = Point2D(1, 1)
>>> t.scale(2)
Point2D(2, 1)
>>> t.scale(2, 2)
Point2D(2, 2)
"""
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
return Point(self.x*x, self.y*y)
def transform(self, matrix):
"""Return the point after applying the transformation described
by the 3x3 Matrix, ``matrix``.
See Also
========
sympy.geometry.point.Point2D.rotate
sympy.geometry.point.Point2D.scale
sympy.geometry.point.Point2D.translate
"""
if not (matrix.is_Matrix and matrix.shape == (3, 3)):
raise ValueError("matrix must be a 3x3 matrix")
x, y = self.args
return Point(*(Matrix(1, 3, [x, y, 1])*matrix).tolist()[0][:2])
def translate(self, x=0, y=0):
"""Shift the Point by adding x and y to the coordinates of the Point.
See Also
========
sympy.geometry.point.Point2D.rotate, scale
Examples
========
>>> from sympy import Point2D
>>> t = Point2D(0, 1)
>>> t.translate(2)
Point2D(2, 1)
>>> t.translate(2, 2)
Point2D(2, 3)
>>> t + Point2D(2, 2)
Point2D(2, 3)
"""
return Point(self.x + x, self.y + y)
@property
def coordinates(self):
"""
Returns the two coordinates of the Point.
Examples
========
>>> from sympy import Point2D
>>> p = Point2D(0, 1)
>>> p.coordinates
(0, 1)
"""
return self.args
@property
def x(self):
"""
Returns the X coordinate of the Point.
Examples
========
>>> from sympy import Point2D
>>> p = Point2D(0, 1)
>>> p.x
0
"""
return self.args[0]
@property
def y(self):
"""
Returns the Y coordinate of the Point.
Examples
========
>>> from sympy import Point2D
>>> p = Point2D(0, 1)
>>> p.y
1
"""
return self.args[1]
class Point3D(Point):
"""A point in a 3-dimensional Euclidean space.
Parameters
==========
coords : sequence of 3 coordinate values.
Attributes
==========
x
y
z
length
Raises
======
TypeError
When trying to add or subtract points with different dimensions.
When `intersection` is called with object other than a Point.
Examples
========
>>> from sympy import Point3D
>>> from sympy.abc import x
>>> Point3D(1, 2, 3)
Point3D(1, 2, 3)
>>> Point3D([1, 2, 3])
Point3D(1, 2, 3)
>>> Point3D(0, x, 3)
Point3D(0, x, 3)
Floats are automatically converted to Rational unless the
evaluate flag is False:
>>> Point3D(0.5, 0.25, 2)
Point3D(1/2, 1/4, 2)
>>> Point3D(0.5, 0.25, 3, evaluate=False)
Point3D(0.5, 0.25, 3)
"""
_ambient_dimension = 3
def __new__(cls, *args, _nocheck=False, **kwargs):
if not _nocheck:
kwargs['dim'] = 3
args = Point(*args, **kwargs)
return GeometryEntity.__new__(cls, *args)
def __contains__(self, item):
return item == self
@staticmethod
def are_collinear(*points):
"""Is a sequence of points collinear?
Test whether or not a set of points are collinear. Returns True if
the set of points are collinear, or False otherwise.
Parameters
==========
points : sequence of Point
Returns
=======
are_collinear : boolean
See Also
========
sympy.geometry.line.Line3D
Examples
========
>>> from sympy import Point3D
>>> from sympy.abc import x
>>> p1, p2 = Point3D(0, 0, 0), Point3D(1, 1, 1)
>>> p3, p4, p5 = Point3D(2, 2, 2), Point3D(x, x, x), Point3D(1, 2, 6)
>>> Point3D.are_collinear(p1, p2, p3, p4)
True
>>> Point3D.are_collinear(p1, p2, p3, p5)
False
"""
return Point.is_collinear(*points)
def direction_cosine(self, point):
"""
Gives the direction cosine between 2 points
Parameters
==========
p : Point3D
Returns
=======
list
Examples
========
>>> from sympy import Point3D
>>> p1 = Point3D(1, 2, 3)
>>> p1.direction_cosine(Point3D(2, 3, 5))
[sqrt(6)/6, sqrt(6)/6, sqrt(6)/3]
"""
a = self.direction_ratio(point)
b = sqrt(Add(*(i**2 for i in a)))
return [(point.x - self.x) / b,(point.y - self.y) / b,
(point.z - self.z) / b]
def direction_ratio(self, point):
"""
Gives the direction ratio between 2 points
Parameters
==========
p : Point3D
Returns
=======
list
Examples
========
>>> from sympy import Point3D
>>> p1 = Point3D(1, 2, 3)
>>> p1.direction_ratio(Point3D(2, 3, 5))
[1, 1, 2]
"""
return [(point.x - self.x),(point.y - self.y),(point.z - self.z)]
def intersection(self, other):
"""The intersection between this point and another GeometryEntity.
Parameters
==========
other : GeometryEntity or sequence of coordinates
Returns
=======
intersection : list of Points
Notes
=====
The return value will either be an empty list if there is no
intersection, otherwise it will contain this point.
Examples
========
>>> from sympy import Point3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, 0, 0)
>>> p1.intersection(p2)
[]
>>> p1.intersection(p3)
[Point3D(0, 0, 0)]
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=3)
if isinstance(other, Point3D):
if self == other:
return [self]
return []
return other.intersection(self)
def scale(self, x=1, y=1, z=1, pt=None):
"""Scale the coordinates of the Point by multiplying by
``x`` and ``y`` after subtracting ``pt`` -- default is (0, 0) --
and then adding ``pt`` back again (i.e. ``pt`` is the point of
reference for the scaling).
See Also
========
translate
Examples
========
>>> from sympy import Point3D
>>> t = Point3D(1, 1, 1)
>>> t.scale(2)
Point3D(2, 1, 1)
>>> t.scale(2, 2)
Point3D(2, 2, 1)
"""
if pt:
pt = Point3D(pt)
return self.translate(*(-pt).args).scale(x, y, z).translate(*pt.args)
return Point3D(self.x*x, self.y*y, self.z*z)
def transform(self, matrix):
"""Return the point after applying the transformation described
by the 4x4 Matrix, ``matrix``.
See Also
========
sympy.geometry.point.Point3D.scale
sympy.geometry.point.Point3D.translate
"""
if not (matrix.is_Matrix and matrix.shape == (4, 4)):
raise ValueError("matrix must be a 4x4 matrix")
x, y, z = self.args
m = Transpose(matrix)
return Point3D(*(Matrix(1, 4, [x, y, z, 1])*m).tolist()[0][:3])
def translate(self, x=0, y=0, z=0):
"""Shift the Point by adding x and y to the coordinates of the Point.
See Also
========
scale
Examples
========
>>> from sympy import Point3D
>>> t = Point3D(0, 1, 1)
>>> t.translate(2)
Point3D(2, 1, 1)
>>> t.translate(2, 2)
Point3D(2, 3, 1)
>>> t + Point3D(2, 2, 2)
Point3D(2, 3, 3)
"""
return Point3D(self.x + x, self.y + y, self.z + z)
@property
def coordinates(self):
"""
Returns the three coordinates of the Point.
Examples
========
>>> from sympy import Point3D
>>> p = Point3D(0, 1, 2)
>>> p.coordinates
(0, 1, 2)
"""
return self.args
@property
def x(self):
"""
Returns the X coordinate of the Point.
Examples
========
>>> from sympy import Point3D
>>> p = Point3D(0, 1, 3)
>>> p.x
0
"""
return self.args[0]
@property
def y(self):
"""
Returns the Y coordinate of the Point.
Examples
========
>>> from sympy import Point3D
>>> p = Point3D(0, 1, 2)
>>> p.y
1
"""
return self.args[1]
@property
def z(self):
"""
Returns the Z coordinate of the Point.
Examples
========
>>> from sympy import Point3D
>>> p = Point3D(0, 1, 1)
>>> p.z
1
"""
return self.args[2]
|
18ed5f1d94e142a4540be37eb3e8ac338387de1de930f30faa21cfcc84f3a99f | """Geometrical Planes.
Contains
========
Plane
"""
from sympy.core import Dummy, Rational, S, Symbol
from sympy.core.symbol import _symbol
from sympy.functions.elementary.trigonometric import cos, sin, acos, asin, sqrt
from .entity import GeometryEntity
from .line import (Line, Ray, Segment, Line3D, LinearEntity, LinearEntity3D,
Ray3D, Segment3D)
from .point import Point, Point3D
from sympy.matrices import Matrix
from sympy.polys.polytools import cancel
from sympy.simplify.simplify import simplify
from sympy.solvers import solve, linsolve
from sympy.utilities.iterables import uniq, is_sequence
from sympy.utilities.misc import filldedent, func_name, Undecidable
from mpmath.libmp.libmpf import prec_to_dps
import random
class Plane(GeometryEntity):
"""
A plane is a flat, two-dimensional surface. A plane is the two-dimensional
analogue of a point (zero-dimensions), a line (one-dimension) and a solid
(three-dimensions). A plane can generally be constructed by two types of
inputs. They are three non-collinear points and a point and the plane's
normal vector.
Attributes
==========
p1
normal_vector
Examples
========
>>> from sympy import Plane, Point3D
>>> Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane((1, 1, 1), (2, 3, 4), (2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane(Point3D(1, 1, 1), normal_vector=(1,4,7))
Plane(Point3D(1, 1, 1), (1, 4, 7))
"""
def __new__(cls, p1, a=None, b=None, **kwargs):
p1 = Point3D(p1, dim=3)
if a and b:
p2 = Point(a, dim=3)
p3 = Point(b, dim=3)
if Point3D.are_collinear(p1, p2, p3):
raise ValueError('Enter three non-collinear points')
a = p1.direction_ratio(p2)
b = p1.direction_ratio(p3)
normal_vector = tuple(Matrix(a).cross(Matrix(b)))
else:
a = kwargs.pop('normal_vector', a)
evaluate = kwargs.get('evaluate', True)
if is_sequence(a) and len(a) == 3:
normal_vector = Point3D(a).args if evaluate else a
else:
raise ValueError(filldedent('''
Either provide 3 3D points or a point with a
normal vector expressed as a sequence of length 3'''))
if all(coord.is_zero for coord in normal_vector):
raise ValueError('Normal vector cannot be zero vector')
return GeometryEntity.__new__(cls, p1, normal_vector, **kwargs)
def __contains__(self, o):
x, y, z = map(Dummy, 'xyz')
k = self.equation(x, y, z)
if isinstance(o, (LinearEntity, LinearEntity3D)):
t = Dummy()
d = Point3D(o.arbitrary_point(t))
e = k.subs([(x, d.x), (y, d.y), (z, d.z)])
return e.equals(0)
try:
o = Point(o, dim=3, strict=True)
d = k.xreplace(dict(zip((x, y, z), o.args)))
return d.equals(0)
except TypeError:
return False
def _eval_evalf(self, prec=15, **options):
pt, tup = self.args
dps = prec_to_dps(prec)
pt = pt.evalf(n=dps, **options)
tup = tuple([i.evalf(n=dps, **options) for i in tup])
return self.func(pt, normal_vector=tup, evaluate=False)
def angle_between(self, o):
"""Angle between the plane and other geometric entity.
Parameters
==========
LinearEntity3D, Plane.
Returns
=======
angle : angle in radians
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the angle between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the angle.
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 2), normal_vector=(1, 2, 3))
>>> b = Line3D(Point3D(1, 3, 4), Point3D(2, 2, 2))
>>> a.angle_between(b)
-asin(sqrt(21)/6)
"""
if isinstance(o, LinearEntity3D):
a = Matrix(self.normal_vector)
b = Matrix(o.direction_ratio)
c = a.dot(b)
d = sqrt(sum([i**2 for i in self.normal_vector]))
e = sqrt(sum([i**2 for i in o.direction_ratio]))
return asin(c/(d*e))
if isinstance(o, Plane):
a = Matrix(self.normal_vector)
b = Matrix(o.normal_vector)
c = a.dot(b)
d = sqrt(sum([i**2 for i in self.normal_vector]))
e = sqrt(sum([i**2 for i in o.normal_vector]))
return acos(c/(d*e))
def arbitrary_point(self, u=None, v=None):
""" Returns an arbitrary point on the Plane. If given two
parameters, the point ranges over the entire plane. If given 1
or no parameters, returns a point with one parameter which,
when varying from 0 to 2*pi, moves the point in a circle of
radius 1 about p1 of the Plane.
Examples
========
>>> from sympy.geometry import Plane, Ray
>>> from sympy.abc import u, v, t, r
>>> p = Plane((1, 1, 1), normal_vector=(1, 0, 0))
>>> p.arbitrary_point(u, v)
Point3D(1, u + 1, v + 1)
>>> p.arbitrary_point(t)
Point3D(1, cos(t) + 1, sin(t) + 1)
While arbitrary values of u and v can move the point anywhere in
the plane, the single-parameter point can be used to construct a
ray whose arbitrary point can be located at angle t and radius
r from p.p1:
>>> Ray(p.p1, _).arbitrary_point(r)
Point3D(1, r*cos(t) + 1, r*sin(t) + 1)
Returns
=======
Point3D
"""
circle = v is None
if circle:
u = _symbol(u or 't', real=True)
else:
u = _symbol(u or 'u', real=True)
v = _symbol(v or 'v', real=True)
x, y, z = self.normal_vector
a, b, c = self.p1.args
# x1, y1, z1 is a nonzero vector parallel to the plane
if x.is_zero and y.is_zero:
x1, y1, z1 = S.One, S.Zero, S.Zero
else:
x1, y1, z1 = -y, x, S.Zero
# x2, y2, z2 is also parallel to the plane, and orthogonal to x1, y1, z1
x2, y2, z2 = tuple(Matrix((x, y, z)).cross(Matrix((x1, y1, z1))))
if circle:
x1, y1, z1 = (w/sqrt(x1**2 + y1**2 + z1**2) for w in (x1, y1, z1))
x2, y2, z2 = (w/sqrt(x2**2 + y2**2 + z2**2) for w in (x2, y2, z2))
p = Point3D(a + x1*cos(u) + x2*sin(u), \
b + y1*cos(u) + y2*sin(u), \
c + z1*cos(u) + z2*sin(u))
else:
p = Point3D(a + x1*u + x2*v, b + y1*u + y2*v, c + z1*u + z2*v)
return p
@staticmethod
def are_concurrent(*planes):
"""Is a sequence of Planes concurrent?
Two or more Planes are concurrent if their intersections
are a common line.
Parameters
==========
planes: list
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(5, 0, 0), normal_vector=(1, -1, 1))
>>> b = Plane(Point3D(0, -2, 0), normal_vector=(3, 1, 1))
>>> c = Plane(Point3D(0, -1, 0), normal_vector=(5, -1, 9))
>>> Plane.are_concurrent(a, b)
True
>>> Plane.are_concurrent(a, b, c)
False
"""
planes = list(uniq(planes))
for i in planes:
if not isinstance(i, Plane):
raise ValueError('All objects should be Planes but got %s' % i.func)
if len(planes) < 2:
return False
planes = list(planes)
first = planes.pop(0)
sol = first.intersection(planes[0])
if sol == []:
return False
else:
line = sol[0]
for i in planes[1:]:
l = first.intersection(i)
if not l or not l[0] in line:
return False
return True
def distance(self, o):
"""Distance between the plane and another geometric entity.
Parameters
==========
Point3D, LinearEntity3D, Plane.
Returns
=======
distance
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the distance between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the distance.
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.distance(b)
sqrt(3)
>>> c = Line3D(Point3D(2, 3, 1), Point3D(1, 2, 2))
>>> a.distance(c)
0
"""
if self.intersection(o) != []:
return S.Zero
if isinstance(o, (Segment3D, Ray3D)):
a, b = o.p1, o.p2
pi, = self.intersection(Line3D(a, b))
if pi in o:
return self.distance(pi)
elif a in Segment3D(pi, b):
return self.distance(a)
else:
assert isinstance(o, Segment3D) is True
return self.distance(b)
# following code handles `Point3D`, `LinearEntity3D`, `Plane`
a = o if isinstance(o, Point3D) else o.p1
n = Point3D(self.normal_vector).unit
d = (a - self.p1).dot(n)
return abs(d)
def equals(self, o):
"""
Returns True if self and o are the same mathematical entities.
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1, 2, 3), normal_vector=(1, 1, 1))
>>> b = Plane(Point3D(1, 2, 3), normal_vector=(2, 2, 2))
>>> c = Plane(Point3D(1, 2, 3), normal_vector=(-1, 4, 6))
>>> a.equals(a)
True
>>> a.equals(b)
True
>>> a.equals(c)
False
"""
if isinstance(o, Plane):
a = self.equation()
b = o.equation()
return simplify(a / b).is_constant()
else:
return False
def equation(self, x=None, y=None, z=None):
"""The equation of the Plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 2), Point3D(2, 4, 7), Point3D(3, 5, 1))
>>> a.equation()
-23*x + 11*y - 2*z + 16
>>> a = Plane(Point3D(1, 4, 2), normal_vector=(6, 6, 6))
>>> a.equation()
6*x + 6*y + 6*z - 42
"""
x, y, z = [i if i else Symbol(j, real=True) for i, j in zip((x, y, z), 'xyz')]
a = Point3D(x, y, z)
b = self.p1.direction_ratio(a)
c = self.normal_vector
return (sum(i*j for i, j in zip(b, c)))
def intersection(self, o):
""" The intersection with other geometrical entity.
Parameters
==========
Point, Point3D, LinearEntity, LinearEntity3D, Plane
Returns
=======
List
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 3), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.intersection(b)
[Point3D(1, 2, 3)]
>>> c = Line3D(Point3D(1, 4, 7), Point3D(2, 2, 2))
>>> a.intersection(c)
[Point3D(2, 2, 2)]
>>> d = Plane(Point3D(6, 0, 0), normal_vector=(2, -5, 3))
>>> e = Plane(Point3D(2, 0, 0), normal_vector=(3, 4, -3))
>>> d.intersection(e)
[Line3D(Point3D(78/23, -24/23, 0), Point3D(147/23, 321/23, 23))]
"""
if not isinstance(o, GeometryEntity):
o = Point(o, dim=3)
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
if isinstance(o, (LinearEntity, LinearEntity3D)):
# recast to 3D
p1, p2 = o.p1, o.p2
if isinstance(o, Segment):
o = Segment3D(p1, p2)
elif isinstance(o, Ray):
o = Ray3D(p1, p2)
elif isinstance(o, Line):
o = Line3D(p1, p2)
else:
raise ValueError('unhandled linear entity: %s' % o.func)
if o in self:
return [o]
else:
t = Dummy() # unnamed else it may clash with a symbol in o
a = Point3D(o.arbitrary_point(t))
p1, n = self.p1, Point3D(self.normal_vector)
# TODO: Replace solve with solveset, when this line is tested
c = solve((a - p1).dot(n), t)
if not c:
return []
else:
c = [i for i in c if i.is_real is not False]
if len(c) > 1:
c = [i for i in c if i.is_real]
if len(c) != 1:
raise Undecidable("not sure which point is real")
p = a.subs(t, c[0])
if p not in o:
return [] # e.g. a segment might not intersect a plane
return [p]
if isinstance(o, Plane):
if self.equals(o):
return [self]
if self.is_parallel(o):
return []
else:
x, y, z = map(Dummy, 'xyz')
a, b = Matrix([self.normal_vector]), Matrix([o.normal_vector])
c = list(a.cross(b))
d = self.equation(x, y, z)
e = o.equation(x, y, z)
result = list(linsolve([d, e], x, y, z))[0]
for i in (x, y, z): result = result.subs(i, 0)
return [Line3D(Point3D(result), direction_ratio=c)]
def is_coplanar(self, o):
""" Returns True if `o` is coplanar with self, else False.
Examples
========
>>> from sympy import Plane
>>> o = (0, 0, 0)
>>> p = Plane(o, (1, 1, 1))
>>> p2 = Plane(o, (2, 2, 2))
>>> p == p2
False
>>> p.is_coplanar(p2)
True
"""
if isinstance(o, Plane):
x, y, z = map(Dummy, 'xyz')
return not cancel(self.equation(x, y, z)/o.equation(x, y, z)).has(x, y, z)
if isinstance(o, Point3D):
return o in self
elif isinstance(o, LinearEntity3D):
return all(i in self for i in self)
elif isinstance(o, GeometryEntity): # XXX should only be handling 2D objects now
return all(i == 0 for i in self.normal_vector[:2])
def is_parallel(self, l):
"""Is the given geometric entity parallel to the plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(3,1,3), normal_vector=(4, 8, 12))
>>> a.is_parallel(b)
True
"""
if isinstance(l, LinearEntity3D):
a = l.direction_ratio
b = self.normal_vector
c = sum([i*j for i, j in zip(a, b)])
if c == 0:
return True
else:
return False
elif isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
if a.cross(b).is_zero_matrix:
return True
else:
return False
def is_perpendicular(self, l):
"""is the given geometric entity perpendicualar to the given plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(2, 2, 2), normal_vector=(-1, 2, -1))
>>> a.is_perpendicular(b)
True
"""
if isinstance(l, LinearEntity3D):
a = Matrix(l.direction_ratio)
b = Matrix(self.normal_vector)
if a.cross(b).is_zero_matrix:
return True
else:
return False
elif isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
if a.dot(b) == 0:
return True
else:
return False
else:
return False
@property
def normal_vector(self):
"""Normal vector of the given plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.normal_vector
(-1, 2, -1)
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 4, 7))
>>> a.normal_vector
(1, 4, 7)
"""
return self.args[1]
@property
def p1(self):
"""The only defining point of the plane. Others can be obtained from the
arbitrary_point method.
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.p1
Point3D(1, 1, 1)
"""
return self.args[0]
def parallel_plane(self, pt):
"""
Plane parallel to the given plane and passing through the point pt.
Parameters
==========
pt: Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1, 4, 6), normal_vector=(2, 4, 6))
>>> a.parallel_plane(Point3D(2, 3, 5))
Plane(Point3D(2, 3, 5), (2, 4, 6))
"""
a = self.normal_vector
return Plane(pt, normal_vector=a)
def perpendicular_line(self, pt):
"""A line perpendicular to the given plane.
Parameters
==========
pt: Point3D
Returns
=======
Line3D
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> a.perpendicular_line(Point3D(9, 8, 7))
Line3D(Point3D(9, 8, 7), Point3D(11, 12, 13))
"""
a = self.normal_vector
return Line3D(pt, direction_ratio=a)
def perpendicular_plane(self, *pts):
"""
Return a perpendicular passing through the given points. If the
direction ratio between the points is the same as the Plane's normal
vector then, to select from the infinite number of possible planes,
a third point will be chosen on the z-axis (or the y-axis
if the normal vector is already parallel to the z-axis). If less than
two points are given they will be supplied as follows: if no point is
given then pt1 will be self.p1; if a second point is not given it will
be a point through pt1 on a line parallel to the z-axis (if the normal
is not already the z-axis, otherwise on the line parallel to the
y-axis).
Parameters
==========
pts: 0, 1 or 2 Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D
>>> a, b = Point3D(0, 0, 0), Point3D(0, 1, 0)
>>> Z = (0, 0, 1)
>>> p = Plane(a, normal_vector=Z)
>>> p.perpendicular_plane(a, b)
Plane(Point3D(0, 0, 0), (1, 0, 0))
"""
if len(pts) > 2:
raise ValueError('No more than 2 pts should be provided.')
pts = list(pts)
if len(pts) == 0:
pts.append(self.p1)
if len(pts) == 1:
x, y, z = self.normal_vector
if x == y == 0:
dir = (0, 1, 0)
else:
dir = (0, 0, 1)
pts.append(pts[0] + Point3D(*dir))
p1, p2 = [Point(i, dim=3) for i in pts]
l = Line3D(p1, p2)
n = Line3D(p1, direction_ratio=self.normal_vector)
if l in n: # XXX should an error be raised instead?
# there are infinitely many perpendicular planes;
x, y, z = self.normal_vector
if x == y == 0:
# the z axis is the normal so pick a pt on the y-axis
p3 = Point3D(0, 1, 0) # case 1
else:
# else pick a pt on the z axis
p3 = Point3D(0, 0, 1) # case 2
# in case that point is already given, move it a bit
if p3 in l:
p3 *= 2 # case 3
else:
p3 = p1 + Point3D(*self.normal_vector) # case 4
return Plane(p1, p2, p3)
def projection_line(self, line):
"""Project the given line onto the plane through the normal plane
containing the line.
Parameters
==========
LinearEntity or LinearEntity3D
Returns
=======
Point3D, Line3D, Ray3D or Segment3D
Notes
=====
For the interaction between 2D and 3D lines(segments, rays), you should
convert the line to 3D by using this method. For example for finding the
intersection between a 2D and a 3D line, convert the 2D line to a 3D line
by projecting it on a required plane and then proceed to find the
intersection between those lines.
Examples
========
>>> from sympy import Plane, Line, Line3D, Point3D
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Line(Point3D(1, 1), Point3D(2, 2))
>>> a.projection_line(b)
Line3D(Point3D(4/3, 4/3, 1/3), Point3D(5/3, 5/3, -1/3))
>>> c = Line3D(Point3D(1, 1, 1), Point3D(2, 2, 2))
>>> a.projection_line(c)
Point3D(1, 1, 1)
"""
if not isinstance(line, (LinearEntity, LinearEntity3D)):
raise NotImplementedError('Enter a linear entity only')
a, b = self.projection(line.p1), self.projection(line.p2)
if a == b:
# projection does not imply intersection so for
# this case (line parallel to plane's normal) we
# return the projection point
return a
if isinstance(line, (Line, Line3D)):
return Line3D(a, b)
if isinstance(line, (Ray, Ray3D)):
return Ray3D(a, b)
if isinstance(line, (Segment, Segment3D)):
return Segment3D(a, b)
def projection(self, pt):
"""Project the given point onto the plane along the plane normal.
Parameters
==========
Point or Point3D
Returns
=======
Point3D
Examples
========
>>> from sympy import Plane, Point3D
>>> A = Plane(Point3D(1, 1, 2), normal_vector=(1, 1, 1))
The projection is along the normal vector direction, not the z
axis, so (1, 1) does not project to (1, 1, 2) on the plane A:
>>> b = Point3D(1, 1)
>>> A.projection(b)
Point3D(5/3, 5/3, 2/3)
>>> _ in A
True
But the point (1, 1, 2) projects to (1, 1) on the XY-plane:
>>> XY = Plane((0, 0, 0), (0, 0, 1))
>>> XY.projection((1, 1, 2))
Point3D(1, 1, 0)
"""
rv = Point(pt, dim=3)
if rv in self:
return rv
return self.intersection(Line3D(rv, rv + Point3D(self.normal_vector)))[0]
def random_point(self, seed=None):
""" Returns a random point on the Plane.
Returns
=======
Point3D
Examples
========
>>> from sympy import Plane
>>> p = Plane((1, 0, 0), normal_vector=(0, 1, 0))
>>> r = p.random_point(seed=42) # seed value is optional
>>> r.n(3)
Point3D(2.29, 0, -1.35)
The random point can be moved to lie on the circle of radius
1 centered on p1:
>>> c = p.p1 + (r - p.p1).unit
>>> c.distance(p.p1).equals(1)
True
"""
if seed is not None:
rng = random.Random(seed)
else:
rng = random
u, v = Dummy('u'), Dummy('v')
params = {
u: 2*Rational(rng.gauss(0, 1)) - 1,
v: 2*Rational(rng.gauss(0, 1)) - 1}
return self.arbitrary_point(u, v).subs(params)
def parameter_value(self, other, u, v=None):
"""Return the parameter(s) corresponding to the given point.
Examples
========
>>> from sympy import pi
>>> from sympy.geometry import Plane
>>> from sympy.abc import t, u, v
>>> p = Plane((2, 0, 0), (0, 0, 1), (0, 1, 0))
By default, the parameter value returned defines a point
that is a distance of 1 from the Plane's p1 value and
in line with the given point:
>>> on_circle = p.arbitrary_point(t).subs(t, pi/4)
>>> on_circle.distance(p.p1)
1
>>> p.parameter_value(on_circle, t)
{t: pi/4}
Moving the point twice as far from p1 does not change
the parameter value:
>>> off_circle = p.p1 + (on_circle - p.p1)*2
>>> off_circle.distance(p.p1)
2
>>> p.parameter_value(off_circle, t)
{t: pi/4}
If the 2-value parameter is desired, supply the two
parameter symbols and a replacement dictionary will
be returned:
>>> p.parameter_value(on_circle, u, v)
{u: sqrt(10)/10, v: sqrt(10)/30}
>>> p.parameter_value(off_circle, u, v)
{u: sqrt(10)/5, v: sqrt(10)/15}
"""
from sympy.geometry.point import Point
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other, Point):
raise ValueError("other must be a point")
if other == self.p1:
return other
if isinstance(u, Symbol) and v is None:
delta = self.arbitrary_point(u) - self.p1
eq = delta - (other - self.p1).unit
sol = solve(eq, u, dict=True)
elif isinstance(u, Symbol) and isinstance(v, Symbol):
pt = self.arbitrary_point(u, v)
sol = solve(pt - other, (u, v), dict=True)
else:
raise ValueError('expecting 1 or 2 symbols')
if not sol:
raise ValueError("Given point is not on %s" % func_name(self))
return sol[0] # {t: tval} or {u: uval, v: vval}
@property
def ambient_dimension(self):
return self.p1.ambient_dimension
|
444f3ad32d52b07f2318f164accc978e2ba03ac8c741ce9cfb44250e8bb0cf0f | """Elliptical geometrical entities.
Contains
* Ellipse
* Circle
"""
from sympy.core.expr import Expr
from sympy.core.relational import Eq
from sympy.core import S, pi, sympify
from sympy.core.evalf import N
from sympy.core.parameters import global_parameters
from sympy.core.logic import fuzzy_bool
from sympy.core.numbers import Rational, oo
from sympy.core.sorting import ordered
from sympy.core.symbol import Dummy, uniquely_named_symbol, _symbol
from sympy.simplify import simplify, trigsimp
from sympy.functions.elementary.miscellaneous import sqrt, Max
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.functions.special.elliptic_integrals import elliptic_e
from .entity import GeometryEntity, GeometrySet
from .exceptions import GeometryError
from .line import Line, Segment, Ray2D, Segment2D, Line2D, LinearEntity3D
from .point import Point, Point2D, Point3D
from .util import idiff, find
from sympy.polys import DomainError, Poly, PolynomialError
from sympy.polys.polyutils import _not_a_coeff, _nsort
from sympy.solvers import solve
from sympy.solvers.solveset import linear_coeffs
from sympy.utilities.misc import filldedent, func_name
from mpmath.libmp.libmpf import prec_to_dps
import random
class Ellipse(GeometrySet):
"""An elliptical GeometryEntity.
Parameters
==========
center : Point, optional
Default value is Point(0, 0)
hradius : number or SymPy expression, optional
vradius : number or SymPy expression, optional
eccentricity : number or SymPy expression, optional
Two of `hradius`, `vradius` and `eccentricity` must be supplied to
create an Ellipse. The third is derived from the two supplied.
Attributes
==========
center
hradius
vradius
area
circumference
eccentricity
periapsis
apoapsis
focus_distance
foci
Raises
======
GeometryError
When `hradius`, `vradius` and `eccentricity` are incorrectly supplied
as parameters.
TypeError
When `center` is not a Point.
See Also
========
Circle
Notes
-----
Constructed from a center and two radii, the first being the horizontal
radius (along the x-axis) and the second being the vertical radius (along
the y-axis).
When symbolic value for hradius and vradius are used, any calculation that
refers to the foci or the major or minor axis will assume that the ellipse
has its major radius on the x-axis. If this is not true then a manual
rotation is necessary.
Examples
========
>>> from sympy import Ellipse, Point, Rational
>>> e1 = Ellipse(Point(0, 0), 5, 1)
>>> e1.hradius, e1.vradius
(5, 1)
>>> e2 = Ellipse(Point(3, 1), hradius=3, eccentricity=Rational(4, 5))
>>> e2
Ellipse(Point2D(3, 1), 3, 9/5)
"""
def __contains__(self, o):
if isinstance(o, Point):
x = Dummy('x', real=True)
y = Dummy('y', real=True)
res = self.equation(x, y).subs({x: o.x, y: o.y})
return trigsimp(simplify(res)) is S.Zero
elif isinstance(o, Ellipse):
return self == o
return False
def __eq__(self, o):
"""Is the other GeometryEntity the same as this ellipse?"""
return isinstance(o, Ellipse) and (self.center == o.center and
self.hradius == o.hradius and
self.vradius == o.vradius)
def __hash__(self):
return super().__hash__()
def __new__(
cls, center=None, hradius=None, vradius=None, eccentricity=None, **kwargs):
hradius = sympify(hradius)
vradius = sympify(vradius)
eccentricity = sympify(eccentricity)
if center is None:
center = Point(0, 0)
else:
center = Point(center, dim=2)
if len(center) != 2:
raise ValueError('The center of "{}" must be a two dimensional point'.format(cls))
if len(list(filter(lambda x: x is not None, (hradius, vradius, eccentricity)))) != 2:
raise ValueError(filldedent('''
Exactly two arguments of "hradius", "vradius", and
"eccentricity" must not be None.'''))
if eccentricity is not None:
if eccentricity.is_negative:
raise GeometryError("Eccentricity of ellipse/circle should lie between [0, 1)")
elif hradius is None:
hradius = vradius / sqrt(1 - eccentricity**2)
elif vradius is None:
vradius = hradius * sqrt(1 - eccentricity**2)
if hradius == vradius:
return Circle(center, hradius, **kwargs)
if S.Zero in (hradius, vradius):
return Segment(Point(center[0] - hradius, center[1] - vradius), Point(center[0] + hradius, center[1] + vradius))
if hradius.is_real is False or vradius.is_real is False:
raise GeometryError("Invalid value encountered when computing hradius / vradius.")
return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG ellipse element for the Ellipse.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
c = N(self.center)
h, v = N(self.hradius), N(self.vradius)
return (
'<ellipse fill="{1}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" cx="{2}" cy="{3}" rx="{4}" ry="{5}"/>'
).format(2. * scale_factor, fill_color, c.x, c.y, h, v)
@property
def ambient_dimension(self):
return 2
@property
def apoapsis(self):
"""The apoapsis of the ellipse.
The greatest distance between the focus and the contour.
Returns
=======
apoapsis : number
See Also
========
periapsis : Returns shortest distance between foci and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.apoapsis
2*sqrt(2) + 3
"""
return self.major * (1 + self.eccentricity)
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the functions.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.arbitrary_point()
Point2D(3*cos(t), 2*sin(t))
"""
t = _symbol(parameter, real=True)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name))
return Point(self.center.x + self.hradius*cos(t),
self.center.y + self.vradius*sin(t))
@property
def area(self):
"""The area of the ellipse.
Returns
=======
area : number
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.area
3*pi
"""
return simplify(S.Pi * self.hradius * self.vradius)
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
h, v = self.hradius, self.vradius
return (self.center.x - h, self.center.y - v, self.center.x + h, self.center.y + v)
@property
def center(self):
"""The center of the ellipse.
Returns
=======
center : number
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.center
Point2D(0, 0)
"""
return self.args[0]
@property
def circumference(self):
"""The circumference of the ellipse.
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.circumference
12*elliptic_e(8/9)
"""
if self.eccentricity == 1:
# degenerate
return 4*self.major
elif self.eccentricity == 0:
# circle
return 2*pi*self.hradius
else:
return 4*self.major*elliptic_e(self.eccentricity**2)
@property
def eccentricity(self):
"""The eccentricity of the ellipse.
Returns
=======
eccentricity : number
Examples
========
>>> from sympy import Point, Ellipse, sqrt
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, sqrt(2))
>>> e1.eccentricity
sqrt(7)/3
"""
return self.focus_distance / self.major
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
-----
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Ellipse, S
>>> from sympy.abc import t
>>> e = Ellipse((0, 0), 3, 2)
>>> e.encloses_point((0, 0))
True
>>> e.encloses_point(e.arbitrary_point(t).subs(t, S.Half))
False
>>> e.encloses_point((4, 0))
False
"""
p = Point(p, dim=2)
if p in self:
return False
if len(self.foci) == 2:
# if the combined distance from the foci to p (h1 + h2) is less
# than the combined distance from the foci to the minor axis
# (which is the same as the major axis length) then p is inside
# the ellipse
h1, h2 = [f.distance(p) for f in self.foci]
test = 2*self.major - (h1 + h2)
else:
test = self.radius - self.center.distance(p)
return fuzzy_bool(test.is_positive)
def equation(self, x='x', y='y', _slope=None):
"""
Returns the equation of an ellipse aligned with the x and y axes;
when slope is given, the equation returned corresponds to an ellipse
with a major axis having that slope.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
_slope : Expr, optional
The slope of the major axis. Ignored when 'None'.
Returns
=======
equation : SymPy expression
See Also
========
arbitrary_point : Returns parameterized point on ellipse
Examples
========
>>> from sympy import Point, Ellipse, pi
>>> from sympy.abc import x, y
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> eq1 = e1.equation(x, y); eq1
y**2/4 + (x/3 - 1/3)**2 - 1
>>> eq2 = e1.equation(x, y, _slope=1); eq2
(-x + y + 1)**2/8 + (x + y - 1)**2/18 - 1
A point on e1 satisfies eq1. Let's use one on the x-axis:
>>> p1 = e1.center + Point(e1.major, 0)
>>> assert eq1.subs(x, p1.x).subs(y, p1.y) == 0
When rotated the same as the rotated ellipse, about the center
point of the ellipse, it will satisfy the rotated ellipse's
equation, too:
>>> r1 = p1.rotate(pi/4, e1.center)
>>> assert eq2.subs(x, r1.x).subs(y, r1.y) == 0
References
==========
.. [1] https://math.stackexchange.com/questions/108270/what-is-the-equation-of-an-ellipse-that-is-not-aligned-with-the-axis
.. [2] https://en.wikipedia.org/wiki/Ellipse#Equation_of_a_shifted_ellipse
"""
x = _symbol(x, real=True)
y = _symbol(y, real=True)
dx = x - self.center.x
dy = y - self.center.y
if _slope is not None:
L = (dy - _slope*dx)**2
l = (_slope*dy + dx)**2
h = 1 + _slope**2
b = h*self.major**2
a = h*self.minor**2
return l/b + L/a - 1
else:
t1 = (dx/self.hradius)**2
t2 = (dy/self.vradius)**2
return t1 + t2 - 1
def evolute(self, x='x', y='y'):
"""The equation of evolute of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.evolute()
2**(2/3)*y**(2/3) + (3*x - 3)**(2/3) - 5**(2/3)
"""
if len(self.args) != 3:
raise NotImplementedError('Evolute of arbitrary Ellipse is not supported.')
x = _symbol(x, real=True)
y = _symbol(y, real=True)
t1 = (self.hradius*(x - self.center.x))**Rational(2, 3)
t2 = (self.vradius*(y - self.center.y))**Rational(2, 3)
return t1 + t2 - (self.hradius**2 - self.vradius**2)**Rational(2, 3)
@property
def foci(self):
"""The foci of the ellipse.
Notes
-----
The foci can only be calculated if the major/minor axes are known.
Raises
======
ValueError
When the major and minor axis cannot be determined.
See Also
========
sympy.geometry.point.Point
focus_distance : Returns the distance between focus and center
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.foci
(Point2D(-2*sqrt(2), 0), Point2D(2*sqrt(2), 0))
"""
c = self.center
hr, vr = self.hradius, self.vradius
if hr == vr:
return (c, c)
# calculate focus distance manually, since focus_distance calls this
# routine
fd = sqrt(self.major**2 - self.minor**2)
if hr == self.minor:
# foci on the y-axis
return (c + Point(0, -fd), c + Point(0, fd))
elif hr == self.major:
# foci on the x-axis
return (c + Point(-fd, 0), c + Point(fd, 0))
@property
def focus_distance(self):
"""The focal distance of the ellipse.
The distance between the center and one focus.
Returns
=======
focus_distance : number
See Also
========
foci
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.focus_distance
2*sqrt(2)
"""
return Point.distance(self.center, self.foci[0])
@property
def hradius(self):
"""The horizontal radius of the ellipse.
Returns
=======
hradius : number
See Also
========
vradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.hradius
3
"""
return self.args[1]
def intersection(self, o):
"""The intersection of this ellipse and another geometrical entity
`o`.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntity objects
Notes
-----
Currently supports intersections with Point, Line, Segment, Ray,
Circle and Ellipse types.
See Also
========
sympy.geometry.entity.GeometryEntity
Examples
========
>>> from sympy import Ellipse, Point, Line
>>> e = Ellipse(Point(0, 0), 5, 7)
>>> e.intersection(Point(0, 0))
[]
>>> e.intersection(Point(5, 0))
[Point2D(5, 0)]
>>> e.intersection(Line(Point(0,0), Point(0, 1)))
[Point2D(0, -7), Point2D(0, 7)]
>>> e.intersection(Line(Point(5,0), Point(5, 1)))
[Point2D(5, 0)]
>>> e.intersection(Line(Point(6,0), Point(6, 1)))
[]
>>> e = Ellipse(Point(-1, 0), 4, 3)
>>> e.intersection(Ellipse(Point(1, 0), 4, 3))
[Point2D(0, -3*sqrt(15)/4), Point2D(0, 3*sqrt(15)/4)]
>>> e.intersection(Ellipse(Point(5, 0), 4, 3))
[Point2D(2, -3*sqrt(7)/4), Point2D(2, 3*sqrt(7)/4)]
>>> e.intersection(Ellipse(Point(100500, 0), 4, 3))
[]
>>> e.intersection(Ellipse(Point(0, 0), 3, 4))
[Point2D(3, 0), Point2D(-363/175, -48*sqrt(111)/175), Point2D(-363/175, 48*sqrt(111)/175)]
>>> e.intersection(Ellipse(Point(-1, 0), 3, 4))
[Point2D(-17/5, -12/5), Point2D(-17/5, 12/5), Point2D(7/5, -12/5), Point2D(7/5, 12/5)]
"""
# TODO: Replace solve with nonlinsolve, when nonlinsolve will be able to solve in real domain
x = Dummy('x', real=True)
y = Dummy('y', real=True)
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, (Segment2D, Ray2D)):
ellipse_equation = self.equation(x, y)
result = solve([ellipse_equation, Line(o.points[0], o.points[1]).equation(x, y)], [x, y])
return list(ordered([Point(i) for i in result if i in o]))
elif isinstance(o, Polygon):
return o.intersection(self)
elif isinstance(o, (Ellipse, Line2D)):
if o == self:
return self
else:
ellipse_equation = self.equation(x, y)
return list(ordered([Point(i) for i in solve([ellipse_equation, o.equation(x, y)], [x, y])]))
elif isinstance(o, LinearEntity3D):
raise TypeError('Entity must be two dimensional, not three dimensional')
else:
raise TypeError('Intersection not handled for %s' % func_name(o))
def is_tangent(self, o):
"""Is `o` tangent to the ellipse?
Parameters
==========
o : GeometryEntity
An Ellipse, LinearEntity or Polygon
Raises
======
NotImplementedError
When the wrong type of argument is supplied.
Returns
=======
is_tangent: boolean
True if o is tangent to the ellipse, False otherwise.
See Also
========
tangent_lines
Examples
========
>>> from sympy import Point, Ellipse, Line
>>> p0, p1, p2 = Point(0, 0), Point(3, 0), Point(3, 3)
>>> e1 = Ellipse(p0, 3, 2)
>>> l1 = Line(p1, p2)
>>> e1.is_tangent(l1)
True
"""
if isinstance(o, Point2D):
return False
elif isinstance(o, Ellipse):
intersect = self.intersection(o)
if isinstance(intersect, Ellipse):
return True
elif intersect:
return all((self.tangent_lines(i)[0]).equals(o.tangent_lines(i)[0]) for i in intersect)
else:
return False
elif isinstance(o, Line2D):
hit = self.intersection(o)
if not hit:
return False
if len(hit) == 1:
return True
# might return None if it can't decide
return hit[0].equals(hit[1])
elif isinstance(o, Ray2D):
intersect = self.intersection(o)
if len(intersect) == 1:
return intersect[0] != o.source and not self.encloses_point(o.source)
else:
return False
elif isinstance(o, (Segment2D, Polygon)):
all_tangents = False
segments = o.sides if isinstance(o, Polygon) else [o]
for segment in segments:
intersect = self.intersection(segment)
if len(intersect) == 1:
if not any(intersect[0] in i for i in segment.points) \
and not any(self.encloses_point(i) for i in segment.points):
all_tangents = True
continue
else:
return False
else:
return all_tangents
return all_tangents
elif isinstance(o, (LinearEntity3D, Point3D)):
raise TypeError('Entity must be two dimensional, not three dimensional')
else:
raise TypeError('Is_tangent not handled for %s' % func_name(o))
@property
def major(self):
"""Longer axis of the ellipse (if it can be determined) else hradius.
Returns
=======
major : number or expression
See Also
========
hradius, vradius, minor
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.major
3
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).major
a
>>> Ellipse(p1, b, a).major
b
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).major
m + 1
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = b - a < 0
if o == True:
return a
elif o == False:
return b
return self.hradius
@property
def minor(self):
"""Shorter axis of the ellipse (if it can be determined) else vradius.
Returns
=======
minor : number or expression
See Also
========
hradius, vradius, major
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.minor
1
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).minor
b
>>> Ellipse(p1, b, a).minor
a
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).minor
m
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = a - b < 0
if o == True:
return a
elif o == False:
return b
return self.vradius
def normal_lines(self, p, prec=None):
"""Normal lines between `p` and the ellipse.
Parameters
==========
p : Point
Returns
=======
normal_lines : list with 1, 2 or 4 Lines
Examples
========
>>> from sympy import Point, Ellipse
>>> e = Ellipse((0, 0), 2, 3)
>>> c = e.center
>>> e.normal_lines(c + Point(1, 0))
[Line2D(Point2D(0, 0), Point2D(1, 0))]
>>> e.normal_lines(c)
[Line2D(Point2D(0, 0), Point2D(0, 1)), Line2D(Point2D(0, 0), Point2D(1, 0))]
Off-axis points require the solution of a quartic equation. This
often leads to very large expressions that may be of little practical
use. An approximate solution of `prec` digits can be obtained by
passing in the desired value:
>>> e.normal_lines((3, 3), prec=2)
[Line2D(Point2D(-0.81, -2.7), Point2D(0.19, -1.2)),
Line2D(Point2D(1.5, -2.0), Point2D(2.5, -2.7))]
Whereas the above solution has an operation count of 12, the exact
solution has an operation count of 2020.
"""
p = Point(p, dim=2)
# XXX change True to something like self.angle == 0 if the arbitrarily
# rotated ellipse is introduced.
# https://github.com/sympy/sympy/issues/2815)
if True:
rv = []
if p.x == self.center.x:
rv.append(Line(self.center, slope=oo))
if p.y == self.center.y:
rv.append(Line(self.center, slope=0))
if rv:
# at these special orientations of p either 1 or 2 normals
# exist and we are done
return rv
# find the 4 normal points and construct lines through them with
# the corresponding slope
x, y = Dummy('x', real=True), Dummy('y', real=True)
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
norm = -1/dydx
slope = Line(p, (x, y)).slope
seq = slope - norm
# TODO: Replace solve with solveset, when this line is tested
yis = solve(seq, y)[0]
xeq = eq.subs(y, yis).as_numer_denom()[0].expand()
if len(xeq.free_symbols) == 1:
try:
# this is so much faster, it's worth a try
xsol = Poly(xeq, x).real_roots()
except (DomainError, PolynomialError, NotImplementedError):
# TODO: Replace solve with solveset, when these lines are tested
xsol = _nsort(solve(xeq, x), separated=True)[0]
points = [Point(i, solve(eq.subs(x, i), y)[0]) for i in xsol]
else:
raise NotImplementedError(
'intersections for the general ellipse are not supported')
slopes = [norm.subs(zip((x, y), pt.args)) for pt in points]
if prec is not None:
points = [pt.n(prec) for pt in points]
slopes = [i if _not_a_coeff(i) else i.n(prec) for i in slopes]
return [Line(pt, slope=s) for pt, s in zip(points, slopes)]
@property
def periapsis(self):
"""The periapsis of the ellipse.
The shortest distance between the focus and the contour.
Returns
=======
periapsis : number
See Also
========
apoapsis : Returns greatest distance between focus and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.periapsis
3 - 2*sqrt(2)
"""
return self.major * (1 - self.eccentricity)
@property
def semilatus_rectum(self):
"""
Calculates the semi-latus rectum of the Ellipse.
Semi-latus rectum is defined as one half of the chord through a
focus parallel to the conic section directrix of a conic section.
Returns
=======
semilatus_rectum : number
See Also
========
apoapsis : Returns greatest distance between focus and contour
periapsis : The shortest distance between the focus and the contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.semilatus_rectum
1/3
References
==========
.. [1] http://mathworld.wolfram.com/SemilatusRectum.html
.. [2] https://en.wikipedia.org/wiki/Ellipse#Semi-latus_rectum
"""
return self.major * (1 - self.eccentricity ** 2)
def auxiliary_circle(self):
"""Returns a Circle whose diameter is the major axis of the ellipse.
Examples
========
>>> from sympy import Ellipse, Point, symbols
>>> c = Point(1, 2)
>>> Ellipse(c, 8, 7).auxiliary_circle()
Circle(Point2D(1, 2), 8)
>>> a, b = symbols('a b')
>>> Ellipse(c, a, b).auxiliary_circle()
Circle(Point2D(1, 2), Max(a, b))
"""
return Circle(self.center, Max(self.hradius, self.vradius))
def director_circle(self):
"""
Returns a Circle consisting of all points where two perpendicular
tangent lines to the ellipse cross each other.
Returns
=======
Circle
A director circle returned as a geometric object.
Examples
========
>>> from sympy import Ellipse, Point, symbols
>>> c = Point(3,8)
>>> Ellipse(c, 7, 9).director_circle()
Circle(Point2D(3, 8), sqrt(130))
>>> a, b = symbols('a b')
>>> Ellipse(c, a, b).director_circle()
Circle(Point2D(3, 8), sqrt(a**2 + b**2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Director_circle
"""
return Circle(self.center, sqrt(self.hradius**2 + self.vradius**2))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.plot_interval()
[t, -pi, pi]
"""
t = _symbol(parameter, real=True)
return [t, -S.Pi, S.Pi]
def random_point(self, seed=None):
"""A random point on the ellipse.
Returns
=======
point : Point
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.random_point() # gives some random point
Point2D(...)
>>> p1 = e1.random_point(seed=0); p1.n(2)
Point2D(2.1, 1.4)
Notes
=====
When creating a random point, one may simply replace the
parameter with a random number. When doing so, however, the
random number should be made a Rational or else the point
may not test as being in the ellipse:
>>> from sympy.abc import t
>>> from sympy import Rational
>>> arb = e1.arbitrary_point(t); arb
Point2D(3*cos(t), 2*sin(t))
>>> arb.subs(t, .1) in e1
False
>>> arb.subs(t, Rational(.1)) in e1
True
>>> arb.subs(t, Rational('.1')) in e1
True
See Also
========
sympy.geometry.point.Point
arbitrary_point : Returns parameterized point on ellipse
"""
t = _symbol('t', real=True)
x, y = self.arbitrary_point(t).args
# get a random value in [-1, 1) corresponding to cos(t)
# and confirm that it will test as being in the ellipse
if seed is not None:
rng = random.Random(seed)
else:
rng = random
# simplify this now or else the Float will turn s into a Float
r = Rational(rng.random())
c = 2*r - 1
s = sqrt(1 - c**2)
return Point(x.subs(cos(t), c), y.subs(sin(t), s))
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point2D(1, 0), -1)
>>> from sympy import Ellipse, Line, Point
>>> Ellipse(Point(3, 4), 1, 3).reflect(Line(Point(0, -4), Point(5, 0)))
Traceback (most recent call last):
...
NotImplementedError:
General Ellipse is not supported but the equation of the reflected
Ellipse is given by the zeros of: f(x, y) = (9*x/41 + 40*y/41 +
37/41)**2 + (40*x/123 - 3*y/41 - 364/123)**2 - 1
Notes
=====
Until the general ellipse (with no axis parallel to the x-axis) is
supported a NotImplemented error is raised and the equation whose
zeros define the rotated ellipse is given.
"""
if line.slope in (0, oo):
c = self.center
c = c.reflect(line)
return self.func(c, -self.hradius, self.vradius)
else:
x, y = [uniquely_named_symbol(
name, (self, line), modify=lambda s: '_' + s, real=True)
for name in 'xy']
expr = self.equation(x, y)
p = Point(x, y).reflect(line)
result = expr.subs(zip((x, y), p.args
), simultaneous=True)
raise NotImplementedError(filldedent(
'General Ellipse is not supported but the equation '
'of the reflected Ellipse is given by the zeros of: ' +
"f(%s, %s) = %s" % (str(x), str(y), str(result))))
def rotate(self, angle=0, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
Note: since the general ellipse is not supported, only rotations that
are integer multiples of pi/2 are allowed.
Examples
========
>>> from sympy import Ellipse, pi
>>> Ellipse((1, 0), 2, 1).rotate(pi/2)
Ellipse(Point2D(0, 1), 1, 2)
>>> Ellipse((1, 0), 2, 1).rotate(pi)
Ellipse(Point2D(-1, 0), 2, 1)
"""
if self.hradius == self.vradius:
return self.func(self.center.rotate(angle, pt), self.hradius)
if (angle/S.Pi).is_integer:
return super().rotate(angle, pt)
if (2*angle/S.Pi).is_integer:
return self.func(self.center.rotate(angle, pt), self.vradius, self.hradius)
# XXX see https://github.com/sympy/sympy/issues/2815 for general ellipes
raise NotImplementedError('Only rotations of pi/2 are currently supported for Ellipse.')
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the major and minor
axes which must be scaled and they are not GeometryEntities.
Examples
========
>>> from sympy import Ellipse
>>> Ellipse((0, 0), 2, 1).scale(2, 4)
Circle(Point2D(0, 0), 4)
>>> Ellipse((0, 0), 2, 1).scale(2)
Ellipse(Point2D(0, 0), 4, 1)
"""
c = self.center
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
h = self.hradius
v = self.vradius
return self.func(c.scale(x, y), hradius=h*x, vradius=v*y)
def tangent_lines(self, p):
"""Tangent lines between `p` and the ellipse.
If `p` is on the ellipse, returns the tangent line through point `p`.
Otherwise, returns the tangent line(s) from `p` to the ellipse, or
None if no tangent line is possible (e.g., `p` inside ellipse).
Parameters
==========
p : Point
Returns
=======
tangent_lines : list with 1 or 2 Lines
Raises
======
NotImplementedError
Can only find tangent lines for a point, `p`, on the ellipse.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Line
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.tangent_lines(Point(3, 0))
[Line2D(Point2D(3, 0), Point2D(3, -12))]
"""
p = Point(p, dim=2)
if self.encloses_point(p):
return []
if p in self:
delta = self.center - p
rise = (self.vradius**2)*delta.x
run = -(self.hradius**2)*delta.y
p2 = Point(simplify(p.x + run),
simplify(p.y + rise))
return [Line(p, p2)]
else:
if len(self.foci) == 2:
f1, f2 = self.foci
maj = self.hradius
test = (2*maj -
Point.distance(f1, p) -
Point.distance(f2, p))
else:
test = self.radius - Point.distance(self.center, p)
if test.is_number and test.is_positive:
return []
# else p is outside the ellipse or we can't tell. In case of the
# latter, the solutions returned will only be valid if
# the point is not inside the ellipse; if it is, nan will result.
x, y = Dummy('x'), Dummy('y')
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
slope = Line(p, Point(x, y)).slope
# TODO: Replace solve with solveset, when this line is tested
tangent_points = solve([slope - dydx, eq], [x, y])
# handle horizontal and vertical tangent lines
if len(tangent_points) == 1:
if tangent_points[0][
0] == p.x or tangent_points[0][1] == p.y:
return [Line(p, p + Point(1, 0)), Line(p, p + Point(0, 1))]
else:
return [Line(p, p + Point(0, 1)), Line(p, tangent_points[0])]
# others
return [Line(p, tangent_points[0]), Line(p, tangent_points[1])]
@property
def vradius(self):
"""The vertical radius of the ellipse.
Returns
=======
vradius : number
See Also
========
hradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.vradius
1
"""
return self.args[2]
def second_moment_of_area(self, point=None):
"""Returns the second moment and product moment area of an ellipse.
Parameters
==========
point : Point, two-tuple of sympifiable objects, or None(default=None)
point is the point about which second moment of area is to be found.
If "point=None" it will be calculated about the axis passing through the
centroid of the ellipse.
Returns
=======
I_xx, I_yy, I_xy : number or SymPy expression
I_xx, I_yy are second moment of area of an ellise.
I_xy is product moment of area of an ellipse.
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.second_moment_of_area()
(3*pi/4, 27*pi/4, 0)
References
==========
.. [1] https://en.wikipedia.org/wiki/List_of_second_moments_of_area
"""
I_xx = (S.Pi*(self.hradius)*(self.vradius**3))/4
I_yy = (S.Pi*(self.hradius**3)*(self.vradius))/4
I_xy = 0
if point is None:
return I_xx, I_yy, I_xy
# parallel axis theorem
I_xx = I_xx + self.area*((point[1] - self.center.y)**2)
I_yy = I_yy + self.area*((point[0] - self.center.x)**2)
I_xy = I_xy + self.area*(point[0] - self.center.x)*(point[1] - self.center.y)
return I_xx, I_yy, I_xy
def polar_second_moment_of_area(self):
"""Returns the polar second moment of area of an Ellipse
It is a constituent of the second moment of area, linked through
the perpendicular axis theorem. While the planar second moment of
area describes an object's resistance to deflection (bending) when
subjected to a force applied to a plane parallel to the central
axis, the polar second moment of area describes an object's
resistance to deflection when subjected to a moment applied in a
plane perpendicular to the object's central axis (i.e. parallel to
the cross-section)
Examples
========
>>> from sympy import symbols, Circle, Ellipse
>>> c = Circle((5, 5), 4)
>>> c.polar_second_moment_of_area()
128*pi
>>> a, b = symbols('a, b')
>>> e = Ellipse((0, 0), a, b)
>>> e.polar_second_moment_of_area()
pi*a**3*b/4 + pi*a*b**3/4
References
==========
.. [1] https://en.wikipedia.org/wiki/Polar_moment_of_inertia
"""
second_moment = self.second_moment_of_area()
return second_moment[0] + second_moment[1]
def section_modulus(self, point=None):
"""Returns a tuple with the section modulus of an ellipse
Section modulus is a geometric property of an ellipse defined as the
ratio of second moment of area to the distance of the extreme end of
the ellipse from the centroidal axis.
Parameters
==========
point : Point, two-tuple of sympifyable objects, or None(default=None)
point is the point at which section modulus is to be found.
If "point=None" section modulus will be calculated for the
point farthest from the centroidal axis of the ellipse.
Returns
=======
S_x, S_y: numbers or SymPy expressions
S_x is the section modulus with respect to the x-axis
S_y is the section modulus with respect to the y-axis
A negative sign indicates that the section modulus is
determined for a point below the centroidal axis.
Examples
========
>>> from sympy import Symbol, Ellipse, Circle, Point2D
>>> d = Symbol('d', positive=True)
>>> c = Circle((0, 0), d/2)
>>> c.section_modulus()
(pi*d**3/32, pi*d**3/32)
>>> e = Ellipse(Point2D(0, 0), 2, 4)
>>> e.section_modulus()
(8*pi, 4*pi)
>>> e.section_modulus((2, 2))
(16*pi, 4*pi)
References
==========
.. [1] https://en.wikipedia.org/wiki/Section_modulus
"""
x_c, y_c = self.center
if point is None:
# taking x and y as maximum distances from centroid
x_min, y_min, x_max, y_max = self.bounds
y = max(y_c - y_min, y_max - y_c)
x = max(x_c - x_min, x_max - x_c)
else:
# taking x and y as distances of the given point from the center
point = Point2D(point)
y = point.y - y_c
x = point.x - x_c
second_moment = self.second_moment_of_area()
S_x = second_moment[0]/y
S_y = second_moment[1]/x
return S_x, S_y
class Circle(Ellipse):
"""A circle in space.
Constructed simply from a center and a radius, from three
non-collinear points, or the equation of a circle.
Parameters
==========
center : Point
radius : number or SymPy expression
points : sequence of three Points
equation : equation of a circle
Attributes
==========
radius (synonymous with hradius, vradius, major and minor)
circumference
equation
Raises
======
GeometryError
When the given equation is not that of a circle.
When trying to construct circle from incorrect parameters.
See Also
========
Ellipse, sympy.geometry.point.Point
Examples
========
>>> from sympy import Eq
>>> from sympy.geometry import Point, Circle
>>> from sympy.abc import x, y, a, b
A circle constructed from a center and radius:
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.hradius, c1.vradius, c1.radius
(5, 5, 5)
A circle constructed from three points:
>>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0))
>>> c2.hradius, c2.vradius, c2.radius, c2.center
(sqrt(2)/2, sqrt(2)/2, sqrt(2)/2, Point2D(1/2, 1/2))
A circle can be constructed from an equation in the form
`a*x**2 + by**2 + gx + hy + c = 0`, too:
>>> Circle(x**2 + y**2 - 25)
Circle(Point2D(0, 0), 5)
If the variables corresponding to x and y are named something
else, their name or symbol can be supplied:
>>> Circle(Eq(a**2 + b**2, 25), x='a', y=b)
Circle(Point2D(0, 0), 5)
"""
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
if len(args) == 1 and isinstance(args[0], (Expr, Eq)):
x = kwargs.get('x', 'x')
y = kwargs.get('y', 'y')
equation = args[0]
if isinstance(equation, Eq):
equation = equation.lhs - equation.rhs
x = find(x, equation)
y = find(y, equation)
try:
a, b, c, d, e = linear_coeffs(equation, x**2, y**2, x, y)
except ValueError:
raise GeometryError("The given equation is not that of a circle.")
if S.Zero in (a, b) or a != b:
raise GeometryError("The given equation is not that of a circle.")
center_x = -c/a/2
center_y = -d/b/2
r2 = (center_x**2) + (center_y**2) - e
return Circle((center_x, center_y), sqrt(r2), evaluate=evaluate)
else:
c, r = None, None
if len(args) == 3:
args = [Point(a, dim=2, evaluate=evaluate) for a in args]
t = Triangle(*args)
if not isinstance(t, Triangle):
return t
c = t.circumcenter
r = t.circumradius
elif len(args) == 2:
# Assume (center, radius) pair
c = Point(args[0], dim=2, evaluate=evaluate)
r = args[1]
# this will prohibit imaginary radius
try:
r = Point(r, 0, evaluate=evaluate).x
except ValueError:
raise GeometryError("Circle with imaginary radius is not permitted")
if not (c is None or r is None):
if r == 0:
return c
return GeometryEntity.__new__(cls, c, r, **kwargs)
raise GeometryError("Circle.__new__ received unknown arguments")
def _eval_evalf(self, prec=15, **options):
pt, r = self.args
dps = prec_to_dps(prec)
pt = pt.evalf(n=dps, **options)
r = r.evalf(n=dps, **options)
return self.func(pt, r, evaluate=False)
@property
def circumference(self):
"""The circumference of the circle.
Returns
=======
circumference : number or SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.circumference
12*pi
"""
return 2 * S.Pi * self.radius
def equation(self, x='x', y='y'):
"""The equation of the circle.
Parameters
==========
x : str or Symbol, optional
Default value is 'x'.
y : str or Symbol, optional
Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.equation()
x**2 + y**2 - 25
"""
x = _symbol(x, real=True)
y = _symbol(y, real=True)
t1 = (x - self.center.x)**2
t2 = (y - self.center.y)**2
return t1 + t2 - self.major**2
def intersection(self, o):
"""The intersection of this circle with another geometrical entity.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntities
Examples
========
>>> from sympy import Point, Circle, Line, Ray
>>> p1, p2, p3 = Point(0, 0), Point(5, 5), Point(6, 0)
>>> p4 = Point(5, 0)
>>> c1 = Circle(p1, 5)
>>> c1.intersection(p2)
[]
>>> c1.intersection(p4)
[Point2D(5, 0)]
>>> c1.intersection(Ray(p1, p2))
[Point2D(5*sqrt(2)/2, 5*sqrt(2)/2)]
>>> c1.intersection(Line(p2, p3))
[]
"""
return Ellipse.intersection(self, o)
@property
def radius(self):
"""The radius of the circle.
Returns
=======
radius : number or SymPy expression
See Also
========
Ellipse.major, Ellipse.minor, Ellipse.hradius, Ellipse.vradius
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.radius
6
"""
return self.args[1]
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point2D(1, 0), -1)
"""
c = self.center
c = c.reflect(line)
return self.func(c, -self.radius)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle
>>> Circle((0, 0), 1).scale(2, 2)
Circle(Point2D(0, 0), 2)
>>> Circle((0, 0), 1).scale(2, 4)
Ellipse(Point2D(0, 0), 2, 4)
"""
c = self.center
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
c = c.scale(x, y)
x, y = [abs(i) for i in (x, y)]
if x == y:
return self.func(c, x*self.radius)
h = v = self.radius
return Ellipse(c, hradius=h*x, vradius=v*y)
@property
def vradius(self):
"""
This Ellipse property is an alias for the Circle's radius.
Whereas hradius, major and minor can use Ellipse's conventions,
the vradius does not exist for a circle. It is always a positive
value in order that the Circle, like Polygons, will have an
area that can be positive or negative as determined by the sign
of the hradius.
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.vradius
6
"""
return abs(self.radius)
from .polygon import Polygon, Triangle
|
a4a5f82af496e7d25e00f411b63dadefa0b356f0ea93659a4c67429fb59255e1 | """The definition of the base geometrical entity with attributes common to
all derived geometrical entities.
Contains
========
GeometryEntity
GeometricSet
Notes
=====
A GeometryEntity is any object that has special geometric properties.
A GeometrySet is a superclass of any GeometryEntity that can also
be viewed as a sympy.sets.Set. In particular, points are the only
GeometryEntity not considered a Set.
Rn is a GeometrySet representing n-dimensional Euclidean space. R2 and
R3 are currently the only ambient spaces implemented.
"""
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.evalf import EvalfMixin, N
from sympy.core.numbers import oo
from sympy.core.symbol import Dummy
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import cos, sin, atan
from sympy.matrices import eye
from sympy.multipledispatch import dispatch
from sympy.sets import Set, Union, FiniteSet
from sympy.sets.handlers.intersection import intersection_sets
from sympy.sets.handlers.union import union_sets
from sympy.utilities.misc import func_name
from sympy.utilities.iterables import is_sequence
# How entities are ordered; used by __cmp__ in GeometryEntity
ordering_of_classes = [
"Point2D",
"Point3D",
"Point",
"Segment2D",
"Ray2D",
"Line2D",
"Segment3D",
"Line3D",
"Ray3D",
"Segment",
"Ray",
"Line",
"Plane",
"Triangle",
"RegularPolygon",
"Polygon",
"Circle",
"Ellipse",
"Curve",
"Parabola"
]
class GeometryEntity(Basic, EvalfMixin):
"""The base class for all geometrical entities.
This class doesn't represent any particular geometric entity, it only
provides the implementation of some methods common to all subclasses.
"""
def __cmp__(self, other):
"""Comparison of two GeometryEntities."""
n1 = self.__class__.__name__
n2 = other.__class__.__name__
c = (n1 > n2) - (n1 < n2)
if not c:
return 0
i1 = -1
for cls in self.__class__.__mro__:
try:
i1 = ordering_of_classes.index(cls.__name__)
break
except ValueError:
i1 = -1
if i1 == -1:
return c
i2 = -1
for cls in other.__class__.__mro__:
try:
i2 = ordering_of_classes.index(cls.__name__)
break
except ValueError:
i2 = -1
if i2 == -1:
return c
return (i1 > i2) - (i1 < i2)
def __contains__(self, other):
"""Subclasses should implement this method for anything more complex than equality."""
if type(self) == type(other):
return self == other
raise NotImplementedError()
def __getnewargs__(self):
"""Returns a tuple that will be passed to __new__ on unpickling."""
return tuple(self.args)
def __ne__(self, o):
"""Test inequality of two geometrical entities."""
return not self == o
def __new__(cls, *args, **kwargs):
# Points are sequences, but they should not
# be converted to Tuples, so use this detection function instead.
def is_seq_and_not_point(a):
# we cannot use isinstance(a, Point) since we cannot import Point
if hasattr(a, 'is_Point') and a.is_Point:
return False
return is_sequence(a)
args = [Tuple(*a) if is_seq_and_not_point(a) else sympify(a) for a in args]
return Basic.__new__(cls, *args)
def __radd__(self, a):
"""Implementation of reverse add method."""
return a.__add__(self)
def __rtruediv__(self, a):
"""Implementation of reverse division method."""
return a.__truediv__(self)
def __repr__(self):
"""String representation of a GeometryEntity that can be evaluated
by sympy."""
return type(self).__name__ + repr(self.args)
def __rmul__(self, a):
"""Implementation of reverse multiplication method."""
return a.__mul__(self)
def __rsub__(self, a):
"""Implementation of reverse subtraction method."""
return a.__sub__(self)
def __str__(self):
"""String representation of a GeometryEntity."""
from sympy.printing import sstr
return type(self).__name__ + sstr(self.args)
def _eval_subs(self, old, new):
from sympy.geometry.point import Point, Point3D
if is_sequence(old) or is_sequence(new):
if isinstance(self, Point3D):
old = Point3D(old)
new = Point3D(new)
else:
old = Point(old)
new = Point(new)
return self._subs(old, new)
def _repr_svg_(self):
"""SVG representation of a GeometryEntity suitable for IPython"""
try:
bounds = self.bounds
except (NotImplementedError, TypeError):
# if we have no SVG representation, return None so IPython
# will fall back to the next representation
return None
if not all(x.is_number and x.is_finite for x in bounds):
return None
svg_top = '''<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
width="{1}" height="{2}" viewBox="{0}"
preserveAspectRatio="xMinYMin meet">
<defs>
<marker id="markerCircle" markerWidth="8" markerHeight="8"
refx="5" refy="5" markerUnits="strokeWidth">
<circle cx="5" cy="5" r="1.5" style="stroke: none; fill:#000000;"/>
</marker>
<marker id="markerArrow" markerWidth="13" markerHeight="13" refx="2" refy="4"
orient="auto" markerUnits="strokeWidth">
<path d="M2,2 L2,6 L6,4" style="fill: #000000;" />
</marker>
<marker id="markerReverseArrow" markerWidth="13" markerHeight="13" refx="6" refy="4"
orient="auto" markerUnits="strokeWidth">
<path d="M6,2 L6,6 L2,4" style="fill: #000000;" />
</marker>
</defs>'''
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = map(N, bounds)
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = xmin - .5, ymin -.5, xmax + .5, ymax + .5
else:
# Expand bounds by a fraction of the data ranges
expand = 0.1 # or 10%; this keeps arrowheads in view (R plots use 4%)
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100., dx]), 300])
height = min([max([100., dy]), 300])
scale_factor = 1. if max(width, height) == 0 else max(dx, dy) / max(width, height)
try:
svg = self._svg(scale_factor)
except (NotImplementedError, TypeError):
# if we have no SVG representation, return None so IPython
# will fall back to the next representation
return None
view_box = "{} {} {} {}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{})".format(ymax + ymin)
svg_top = svg_top.format(view_box, width, height)
return svg_top + (
'<g transform="{}">{}</g></svg>'
).format(transform, svg)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the GeometryEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
raise NotImplementedError()
def _sympy_(self):
return self
@property
def ambient_dimension(self):
"""What is the dimension of the space that the object is contained in?"""
raise NotImplementedError()
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
raise NotImplementedError()
def encloses(self, o):
"""
Return True if o is inside (not on or outside) the boundaries of self.
The object will be decomposed into Points and individual Entities need
only define an encloses_point method for their class.
See Also
========
sympy.geometry.ellipse.Ellipse.encloses_point
sympy.geometry.polygon.Polygon.encloses_point
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t2 = Polygon(*RegularPolygon(Point(0, 0), 2, 3).vertices)
>>> t2.encloses(t)
True
>>> t.encloses(t2)
False
"""
from sympy.geometry.point import Point
from sympy.geometry.line import Segment, Ray, Line
from sympy.geometry.ellipse import Ellipse
from sympy.geometry.polygon import Polygon, RegularPolygon
if isinstance(o, Point):
return self.encloses_point(o)
elif isinstance(o, Segment):
return all(self.encloses_point(x) for x in o.points)
elif isinstance(o, (Ray, Line)):
return False
elif isinstance(o, Ellipse):
return self.encloses_point(o.center) and \
self.encloses_point(
Point(o.center.x + o.hradius, o.center.y)) and \
not self.intersection(o)
elif isinstance(o, Polygon):
if isinstance(o, RegularPolygon):
if not self.encloses_point(o.center):
return False
return all(self.encloses_point(v) for v in o.vertices)
raise NotImplementedError()
def equals(self, o):
return self == o
def intersection(self, o):
"""
Returns a list of all of the intersections of self with o.
Notes
=====
An entity is not required to implement this method.
If two different types of entities can intersect, the item with
higher index in ordering_of_classes should implement
intersections with anything having a lower index.
See Also
========
sympy.geometry.util.intersection
"""
raise NotImplementedError()
def is_similar(self, other):
"""Is this geometrical entity similar to another geometrical entity?
Two entities are similar if a uniform scaling (enlarging or
shrinking) of one of the entities will allow one to obtain the other.
Notes
=====
This method is not intended to be used directly but rather
through the `are_similar` function found in util.py.
An entity is not required to implement this method.
If two different types of entities can be similar, it is only
required that one of them be able to determine this.
See Also
========
scale
"""
raise NotImplementedError()
def reflect(self, line):
"""
Reflects an object across a line.
Parameters
==========
line: Line
Examples
========
>>> from sympy import pi, sqrt, Line, RegularPolygon
>>> l = Line((0, pi), slope=sqrt(2))
>>> pent = RegularPolygon((1, 2), 1, 5)
>>> rpent = pent.reflect(l)
>>> rpent
RegularPolygon(Point2D(-2*sqrt(2)*pi/3 - 1/3 + 4*sqrt(2)/3, 2/3 + 2*sqrt(2)/3 + 2*pi/3), -1, 5, -atan(2*sqrt(2)) + 3*pi/5)
>>> from sympy import pi, Line, Circle, Point
>>> l = Line((0, pi), slope=1)
>>> circ = Circle(Point(0, 0), 5)
>>> rcirc = circ.reflect(l)
>>> rcirc
Circle(Point2D(-pi, pi), -5)
"""
from sympy.geometry.point import Point
g = self
l = line
o = Point(0, 0)
if l.slope.is_zero:
y = l.args[0].y
if not y: # x-axis
return g.scale(y=-1)
reps = [(p, p.translate(y=2*(y - p.y))) for p in g.atoms(Point)]
elif l.slope is oo:
x = l.args[0].x
if not x: # y-axis
return g.scale(x=-1)
reps = [(p, p.translate(x=2*(x - p.x))) for p in g.atoms(Point)]
else:
if not hasattr(g, 'reflect') and not all(
isinstance(arg, Point) for arg in g.args):
raise NotImplementedError(
'reflect undefined or non-Point args in %s' % g)
a = atan(l.slope)
c = l.coefficients
d = -c[-1]/c[1] # y-intercept
# apply the transform to a single point
x, y = Dummy(), Dummy()
xf = Point(x, y)
xf = xf.translate(y=-d).rotate(-a, o).scale(y=-1
).rotate(a, o).translate(y=d)
# replace every point using that transform
reps = [(p, xf.xreplace({x: p.x, y: p.y})) for p in g.atoms(Point)]
return g.xreplace(dict(reps))
def rotate(self, angle, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
The default pt is the origin, Point(0, 0)
See Also
========
scale, translate
Examples
========
>>> from sympy import Point, RegularPolygon, Polygon, pi
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t # vertex on x axis
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.rotate(pi/2) # vertex on y axis now
Triangle(Point2D(0, 1), Point2D(-sqrt(3)/2, -1/2), Point2D(sqrt(3)/2, -1/2))
"""
newargs = []
for a in self.args:
if isinstance(a, GeometryEntity):
newargs.append(a.rotate(angle, pt))
else:
newargs.append(a)
return type(self)(*newargs)
def scale(self, x=1, y=1, pt=None):
"""Scale the object by multiplying the x,y-coordinates by x and y.
If pt is given, the scaling is done relative to that point; the
object is shifted by -pt, scaled, and shifted by pt.
See Also
========
rotate, translate
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.scale(2)
Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)/2), Point2D(-1, -sqrt(3)/2))
>>> t.scale(2, 2)
Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)), Point2D(-1, -sqrt(3)))
"""
from sympy.geometry.point import Point
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
return type(self)(*[a.scale(x, y) for a in self.args]) # if this fails, override this class
def translate(self, x=0, y=0):
"""Shift the object by adding to the x,y-coordinates the values x and y.
See Also
========
rotate, scale
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.translate(2)
Triangle(Point2D(3, 0), Point2D(3/2, sqrt(3)/2), Point2D(3/2, -sqrt(3)/2))
>>> t.translate(2, 2)
Triangle(Point2D(3, 2), Point2D(3/2, sqrt(3)/2 + 2), Point2D(3/2, 2 - sqrt(3)/2))
"""
newargs = []
for a in self.args:
if isinstance(a, GeometryEntity):
newargs.append(a.translate(x, y))
else:
newargs.append(a)
return self.func(*newargs)
def parameter_value(self, other, t):
"""Return the parameter corresponding to the given point.
Evaluating an arbitrary point of the entity at this parameter
value will return the given point.
Examples
========
>>> from sympy import Line, Point
>>> from sympy.abc import t
>>> a = Point(0, 0)
>>> b = Point(2, 2)
>>> Line(a, b).parameter_value((1, 1), t)
{t: 1/2}
>>> Line(a, b).arbitrary_point(t).subs(_)
Point2D(1, 1)
"""
from sympy.geometry.point import Point
from sympy.solvers.solvers import solve
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other, Point):
raise ValueError("other must be a point")
T = Dummy('t', real=True)
sol = solve(self.arbitrary_point(T) - other, T, dict=True)
if not sol:
raise ValueError("Given point is not on %s" % func_name(self))
return {t: sol[0][T]}
class GeometrySet(GeometryEntity, Set):
"""Parent class of all GeometryEntity that are also Sets
(compatible with sympy.sets)
"""
def _contains(self, other):
"""sympy.sets uses the _contains method, so include it for compatibility."""
if isinstance(other, Set) and other.is_FiniteSet:
return all(self.__contains__(i) for i in other)
return self.__contains__(other)
@dispatch(GeometrySet, Set) # type:ignore # noqa:F811
def union_sets(self, o): # noqa:F811
""" Returns the union of self and o
for use with sympy.sets.Set, if possible. """
# if its a FiniteSet, merge any points
# we contain and return a union with the rest
if o.is_FiniteSet:
other_points = [p for p in o if not self._contains(p)]
if len(other_points) == len(o):
return None
return Union(self, FiniteSet(*other_points))
if self._contains(o):
return self
return None
@dispatch(GeometrySet, Set) # type: ignore # noqa:F811
def intersection_sets(self, o): # noqa:F811
""" Returns a sympy.sets.Set of intersection objects,
if possible. """
from sympy.geometry import Point
try:
# if o is a FiniteSet, find the intersection directly
# to avoid infinite recursion
if o.is_FiniteSet:
inter = FiniteSet(*(p for p in o if self.contains(p)))
else:
inter = self.intersection(o)
except NotImplementedError:
# sympy.sets.Set.reduce expects None if an object
# doesn't know how to simplify
return None
# put the points in a FiniteSet
points = FiniteSet(*[p for p in inter if isinstance(p, Point)])
non_points = [p for p in inter if not isinstance(p, Point)]
return Union(*(non_points + [points]))
def translate(x, y):
"""Return the matrix to translate a 2-D point by x and y."""
rv = eye(3)
rv[2, 0] = x
rv[2, 1] = y
return rv
def scale(x, y, pt=None):
"""Return the matrix to multiply a 2-D point's coordinates by x and y.
If pt is given, the scaling is done relative to that point."""
rv = eye(3)
rv[0, 0] = x
rv[1, 1] = y
if pt:
from sympy.geometry.point import Point
pt = Point(pt, dim=2)
tr1 = translate(*(-pt).args)
tr2 = translate(*pt.args)
return tr1*rv*tr2
return rv
def rotate(th):
"""Return the matrix to rotate a 2-D point about the origin by ``angle``.
The angle is measured in radians. To Point a point about a point other
then the origin, translate the Point, do the rotation, and
translate it back:
>>> from sympy.geometry.entity import rotate, translate
>>> from sympy import Point, pi
>>> rot_about_11 = translate(-1, -1)*rotate(pi/2)*translate(1, 1)
>>> Point(1, 1).transform(rot_about_11)
Point2D(1, 1)
>>> Point(0, 0).transform(rot_about_11)
Point2D(2, 0)
"""
s = sin(th)
rv = eye(3)*cos(th)
rv[0, 1] = s
rv[1, 0] = -s
rv[2, 2] = 1
return rv
|
08514cb8a514a4a832851963f397f0daab22674266935db27bbae2ace733361b | """Utility functions for geometrical entities.
Contains
========
intersection
convex_hull
closest_points
farthest_points
are_coplanar
are_similar
"""
from .point import Point, Point2D
from sympy.core.containers import OrderedSet
from sympy.core.function import Function
from sympy.core.sorting import ordered
from sympy.core.symbol import Symbol
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.solvers.solvers import solve
from sympy.utilities.iterables import is_sequence
def find(x, equation):
"""
Checks whether the parameter 'x' is present in 'equation' or not.
If it is present then it returns the passed parameter 'x' as a free
symbol, else, it returns a ValueError.
"""
free = equation.free_symbols
xs = [i for i in free if (i.name if isinstance(x, str) else i) == x]
if not xs:
raise ValueError('could not find %s' % x)
if len(xs) != 1:
raise ValueError('ambiguous %s' % x)
return xs[0]
def _ordered_points(p):
"""Return the tuple of points sorted numerically according to args"""
return tuple(sorted(p, key=lambda x: x.args))
def are_coplanar(*e):
""" Returns True if the given entities are coplanar otherwise False
Parameters
==========
e: entities to be checked for being coplanar
Returns
=======
Boolean
Examples
========
>>> from sympy import Point3D, Line3D
>>> from sympy.geometry.util import are_coplanar
>>> a = Line3D(Point3D(5, 0, 0), Point3D(1, -1, 1))
>>> b = Line3D(Point3D(0, -2, 0), Point3D(3, 1, 1))
>>> c = Line3D(Point3D(0, -1, 0), Point3D(5, -1, 9))
>>> are_coplanar(a, b, c)
False
"""
from sympy.geometry.line import LinearEntity3D
from sympy.geometry.entity import GeometryEntity
from sympy.geometry.point import Point3D
from sympy.geometry.plane import Plane
# XXX update tests for coverage
e = set(e)
# first work with a Plane if present
for i in list(e):
if isinstance(i, Plane):
e.remove(i)
return all(p.is_coplanar(i) for p in e)
if all(isinstance(i, Point3D) for i in e):
if len(e) < 3:
return False
# remove pts that are collinear with 2 pts
a, b = e.pop(), e.pop()
for i in list(e):
if Point3D.are_collinear(a, b, i):
e.remove(i)
if not e:
return False
else:
# define a plane
p = Plane(a, b, e.pop())
for i in e:
if i not in p:
return False
return True
else:
pt3d = []
for i in e:
if isinstance(i, Point3D):
pt3d.append(i)
elif isinstance(i, LinearEntity3D):
pt3d.extend(i.args)
elif isinstance(i, GeometryEntity): # XXX we should have a GeometryEntity3D class so we can tell the difference between 2D and 3D -- here we just want to deal with 2D objects; if new 3D objects are encountered that we didn't handle above, an error should be raised
# all 2D objects have some Point that defines them; so convert those points to 3D pts by making z=0
for p in i.args:
if isinstance(p, Point):
pt3d.append(Point3D(*(p.args + (0,))))
return are_coplanar(*pt3d)
def are_similar(e1, e2):
"""Are two geometrical entities similar.
Can one geometrical entity be uniformly scaled to the other?
Parameters
==========
e1 : GeometryEntity
e2 : GeometryEntity
Returns
=======
are_similar : boolean
Raises
======
GeometryError
When `e1` and `e2` cannot be compared.
Notes
=====
If the two objects are equal then they are similar.
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar
Examples
========
>>> from sympy import Point, Circle, Triangle, are_similar
>>> c1, c2 = Circle(Point(0, 0), 4), Circle(Point(1, 4), 3)
>>> t1 = Triangle(Point(0, 0), Point(1, 0), Point(0, 1))
>>> t2 = Triangle(Point(0, 0), Point(2, 0), Point(0, 2))
>>> t3 = Triangle(Point(0, 0), Point(3, 0), Point(0, 1))
>>> are_similar(t1, t2)
True
>>> are_similar(t1, t3)
False
"""
from .exceptions import GeometryError
if e1 == e2:
return True
is_similar1 = getattr(e1, 'is_similar', None)
if is_similar1:
return is_similar1(e2)
is_similar2 = getattr(e2, 'is_similar', None)
if is_similar2:
return is_similar2(e1)
n1 = e1.__class__.__name__
n2 = e2.__class__.__name__
raise GeometryError(
"Cannot test similarity between %s and %s" % (n1, n2))
def centroid(*args):
"""Find the centroid (center of mass) of the collection containing only Points,
Segments or Polygons. The centroid is the weighted average of the individual centroid
where the weights are the lengths (of segments) or areas (of polygons).
Overlapping regions will add to the weight of that region.
If there are no objects (or a mixture of objects) then None is returned.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment,
sympy.geometry.polygon.Polygon
Examples
========
>>> from sympy import Point, Segment, Polygon
>>> from sympy.geometry.util import centroid
>>> p = Polygon((0, 0), (10, 0), (10, 10))
>>> q = p.translate(0, 20)
>>> p.centroid, q.centroid
(Point2D(20/3, 10/3), Point2D(20/3, 70/3))
>>> centroid(p, q)
Point2D(20/3, 40/3)
>>> p, q = Segment((0, 0), (2, 0)), Segment((0, 0), (2, 2))
>>> centroid(p, q)
Point2D(1, 2 - sqrt(2))
>>> centroid(Point(0, 0), Point(2, 0))
Point2D(1, 0)
Stacking 3 polygons on top of each other effectively triples the
weight of that polygon:
>>> p = Polygon((0, 0), (1, 0), (1, 1), (0, 1))
>>> q = Polygon((1, 0), (3, 0), (3, 1), (1, 1))
>>> centroid(p, q)
Point2D(3/2, 1/2)
>>> centroid(p, p, p, q) # centroid x-coord shifts left
Point2D(11/10, 1/2)
Stacking the squares vertically above and below p has the same
effect:
>>> centroid(p, p.translate(0, 1), p.translate(0, -1), q)
Point2D(11/10, 1/2)
"""
from sympy.geometry import Polygon, Segment, Point
if args:
if all(isinstance(g, Point) for g in args):
c = Point(0, 0)
for g in args:
c += g
den = len(args)
elif all(isinstance(g, Segment) for g in args):
c = Point(0, 0)
L = 0
for g in args:
l = g.length
c += g.midpoint*l
L += l
den = L
elif all(isinstance(g, Polygon) for g in args):
c = Point(0, 0)
A = 0
for g in args:
a = g.area
c += g.centroid*a
A += a
den = A
c /= den
return c.func(*[i.simplify() for i in c.args])
def closest_points(*args):
"""Return the subset of points from a set of points that were
the closest to each other in the 2D plane.
Parameters
==========
args : a collection of Points on 2D plane.
Notes
=====
This can only be performed on a set of points whose coordinates can
be ordered on the number line. If there are no ties then a single
pair of Points will be in the set.
Examples
========
>>> from sympy.geometry import closest_points, Triangle
>>> Triangle(sss=(3, 4, 5)).args
(Point2D(0, 0), Point2D(3, 0), Point2D(3, 4))
>>> closest_points(*_)
{(Point2D(0, 0), Point2D(3, 0))}
References
==========
.. [1] http://www.cs.mcgill.ca/~cs251/ClosestPair/ClosestPairPS.html
.. [2] Sweep line algorithm
https://en.wikipedia.org/wiki/Sweep_line_algorithm
"""
from collections import deque
from math import sqrt as _sqrt
p = [Point2D(i) for i in set(args)]
if len(p) < 2:
raise ValueError('At least 2 distinct points must be given.')
try:
p.sort(key=lambda x: x.args)
except TypeError:
raise ValueError("The points could not be sorted.")
if not all(i.is_Rational for j in p for i in j.args):
def hypot(x, y):
arg = x*x + y*y
if arg.is_Rational:
return _sqrt(arg)
return sqrt(arg)
else:
from math import hypot
rv = [(0, 1)]
best_dist = hypot(p[1].x - p[0].x, p[1].y - p[0].y)
i = 2
left = 0
box = deque([0, 1])
while i < len(p):
while left < i and p[i][0] - p[left][0] > best_dist:
box.popleft()
left += 1
for j in box:
d = hypot(p[i].x - p[j].x, p[i].y - p[j].y)
if d < best_dist:
rv = [(j, i)]
elif d == best_dist:
rv.append((j, i))
else:
continue
best_dist = d
box.append(i)
i += 1
return {tuple([p[i] for i in pair]) for pair in rv}
def convex_hull(*args, polygon=True):
"""The convex hull surrounding the Points contained in the list of entities.
Parameters
==========
args : a collection of Points, Segments and/or Polygons
Optional parameters
===================
polygon : Boolean. If True, returns a Polygon, if false a tuple, see below.
Default is True.
Returns
=======
convex_hull : Polygon if ``polygon`` is True else as a tuple `(U, L)` where
``L`` and ``U`` are the lower and upper hulls, respectively.
Notes
=====
This can only be performed on a set of points whose coordinates can
be ordered on the number line.
See Also
========
sympy.geometry.point.Point, sympy.geometry.polygon.Polygon
Examples
========
>>> from sympy.geometry import convex_hull
>>> points = [(1, 1), (1, 2), (3, 1), (-5, 2), (15, 4)]
>>> convex_hull(*points)
Polygon(Point2D(-5, 2), Point2D(1, 1), Point2D(3, 1), Point2D(15, 4))
>>> convex_hull(*points, **dict(polygon=False))
([Point2D(-5, 2), Point2D(15, 4)],
[Point2D(-5, 2), Point2D(1, 1), Point2D(3, 1), Point2D(15, 4)])
References
==========
.. [1] https://en.wikipedia.org/wiki/Graham_scan
.. [2] Andrew's Monotone Chain Algorithm
(A.M. Andrew,
"Another Efficient Algorithm for Convex Hulls in Two Dimensions", 1979)
http://geomalgorithms.com/a10-_hull-1.html
"""
from .entity import GeometryEntity
from .point import Point
from .line import Segment
from .polygon import Polygon
p = OrderedSet()
for e in args:
if not isinstance(e, GeometryEntity):
try:
e = Point(e)
except NotImplementedError:
raise ValueError('%s is not a GeometryEntity and cannot be made into Point' % str(e))
if isinstance(e, Point):
p.add(e)
elif isinstance(e, Segment):
p.update(e.points)
elif isinstance(e, Polygon):
p.update(e.vertices)
else:
raise NotImplementedError(
'Convex hull for %s not implemented.' % type(e))
# make sure all our points are of the same dimension
if any(len(x) != 2 for x in p):
raise ValueError('Can only compute the convex hull in two dimensions')
p = list(p)
if len(p) == 1:
return p[0] if polygon else (p[0], None)
elif len(p) == 2:
s = Segment(p[0], p[1])
return s if polygon else (s, None)
def _orientation(p, q, r):
'''Return positive if p-q-r are clockwise, neg if ccw, zero if
collinear.'''
return (q.y - p.y)*(r.x - p.x) - (q.x - p.x)*(r.y - p.y)
# scan to find upper and lower convex hulls of a set of 2d points.
U = []
L = []
try:
p.sort(key=lambda x: x.args)
except TypeError:
raise ValueError("The points could not be sorted.")
for p_i in p:
while len(U) > 1 and _orientation(U[-2], U[-1], p_i) <= 0:
U.pop()
while len(L) > 1 and _orientation(L[-2], L[-1], p_i) >= 0:
L.pop()
U.append(p_i)
L.append(p_i)
U.reverse()
convexHull = tuple(L + U[1:-1])
if len(convexHull) == 2:
s = Segment(convexHull[0], convexHull[1])
return s if polygon else (s, None)
if polygon:
return Polygon(*convexHull)
else:
U.reverse()
return (U, L)
def farthest_points(*args):
"""Return the subset of points from a set of points that were
the furthest apart from each other in the 2D plane.
Parameters
==========
args : a collection of Points on 2D plane.
Notes
=====
This can only be performed on a set of points whose coordinates can
be ordered on the number line. If there are no ties then a single
pair of Points will be in the set.
Examples
========
>>> from sympy.geometry import farthest_points, Triangle
>>> Triangle(sss=(3, 4, 5)).args
(Point2D(0, 0), Point2D(3, 0), Point2D(3, 4))
>>> farthest_points(*_)
{(Point2D(0, 0), Point2D(3, 4))}
References
==========
.. [1] http://code.activestate.com/recipes/117225-convex-hull-and-diameter-of-2d-point-sets/
.. [2] Rotating Callipers Technique
https://en.wikipedia.org/wiki/Rotating_calipers
"""
from math import sqrt as _sqrt
def rotatingCalipers(Points):
U, L = convex_hull(*Points, **dict(polygon=False))
if L is None:
if isinstance(U, Point):
raise ValueError('At least two distinct points must be given.')
yield U.args
else:
i = 0
j = len(L) - 1
while i < len(U) - 1 or j > 0:
yield U[i], L[j]
# if all the way through one side of hull, advance the other side
if i == len(U) - 1:
j -= 1
elif j == 0:
i += 1
# still points left on both lists, compare slopes of next hull edges
# being careful to avoid divide-by-zero in slope calculation
elif (U[i+1].y - U[i].y) * (L[j].x - L[j-1].x) > \
(L[j].y - L[j-1].y) * (U[i+1].x - U[i].x):
i += 1
else:
j -= 1
p = [Point2D(i) for i in set(args)]
if not all(i.is_Rational for j in p for i in j.args):
def hypot(x, y):
arg = x*x + y*y
if arg.is_Rational:
return _sqrt(arg)
return sqrt(arg)
else:
from math import hypot
rv = []
diam = 0
for pair in rotatingCalipers(args):
h, q = _ordered_points(pair)
d = hypot(h.x - q.x, h.y - q.y)
if d > diam:
rv = [(h, q)]
elif d == diam:
rv.append((h, q))
else:
continue
diam = d
return set(rv)
def idiff(eq, y, x, n=1):
"""Return ``dy/dx`` assuming that ``eq == 0``.
Parameters
==========
y : the dependent variable or a list of dependent variables (with y first)
x : the variable that the derivative is being taken with respect to
n : the order of the derivative (default is 1)
Examples
========
>>> from sympy.abc import x, y, a
>>> from sympy.geometry.util import idiff
>>> circ = x**2 + y**2 - 4
>>> idiff(circ, y, x)
-x/y
>>> idiff(circ, y, x, 2).simplify()
(-x**2 - y**2)/y**3
Here, ``a`` is assumed to be independent of ``x``:
>>> idiff(x + a + y, y, x)
-1
Now the x-dependence of ``a`` is made explicit by listing ``a`` after
``y`` in a list.
>>> idiff(x + a + y, [y, a], x)
-Derivative(a, x) - 1
See Also
========
sympy.core.function.Derivative: represents unevaluated derivatives
sympy.core.function.diff: explicitly differentiates wrt symbols
"""
if is_sequence(y):
dep = set(y)
y = y[0]
elif isinstance(y, Symbol):
dep = {y}
elif isinstance(y, Function):
pass
else:
raise ValueError("expecting x-dependent symbol(s) or function(s) but got: %s" % y)
f = {s: Function(s.name)(x) for s in eq.free_symbols
if s != x and s in dep}
if isinstance(y, Symbol):
dydx = Function(y.name)(x).diff(x)
else:
dydx = y.diff(x)
eq = eq.subs(f)
derivs = {}
for i in range(n):
yp = solve(eq.diff(x), dydx)[0].subs(derivs)
if i == n - 1:
return yp.subs([(v, k) for k, v in f.items()])
derivs[dydx] = yp
eq = dydx - yp
dydx = dydx.diff(x)
def intersection(*entities, pairwise=False, **kwargs):
"""The intersection of a collection of GeometryEntity instances.
Parameters
==========
entities : sequence of GeometryEntity
pairwise (keyword argument) : Can be either True or False
Returns
=======
intersection : list of GeometryEntity
Raises
======
NotImplementedError
When unable to calculate intersection.
Notes
=====
The intersection of any geometrical entity with itself should return
a list with one item: the entity in question.
An intersection requires two or more entities. If only a single
entity is given then the function will return an empty list.
It is possible for `intersection` to miss intersections that one
knows exists because the required quantities were not fully
simplified internally.
Reals should be converted to Rationals, e.g. Rational(str(real_num))
or else failures due to floating point issues may result.
Case 1: When the keyword argument 'pairwise' is False (default value):
In this case, the function returns a list of intersections common to
all entities.
Case 2: When the keyword argument 'pairwise' is True:
In this case, the functions returns a list intersections that occur
between any pair of entities.
See Also
========
sympy.geometry.entity.GeometryEntity.intersection
Examples
========
>>> from sympy.geometry import Ray, Circle, intersection
>>> c = Circle((0, 1), 1)
>>> intersection(c, c.center)
[]
>>> right = Ray((0, 0), (1, 0))
>>> up = Ray((0, 0), (0, 1))
>>> intersection(c, right, up)
[Point2D(0, 0)]
>>> intersection(c, right, up, pairwise=True)
[Point2D(0, 0), Point2D(0, 2)]
>>> left = Ray((1, 0), (0, 0))
>>> intersection(right, left)
[Segment2D(Point2D(0, 0), Point2D(1, 0))]
"""
from .entity import GeometryEntity
from .point import Point
if len(entities) <= 1:
return []
# entities may be an immutable tuple
entities = list(entities)
for i, e in enumerate(entities):
if not isinstance(e, GeometryEntity):
entities[i] = Point(e)
if not pairwise:
# find the intersection common to all objects
res = entities[0].intersection(entities[1])
for entity in entities[2:]:
newres = []
for x in res:
newres.extend(x.intersection(entity))
res = newres
return res
# find all pairwise intersections
ans = []
for j in range(0, len(entities)):
for k in range(j + 1, len(entities)):
ans.extend(intersection(entities[j], entities[k]))
return list(ordered(set(ans)))
|
b084014eb0a5de2741a346a04f894ec5d1ae73bcac312234a51261cc3eca9220 | """Line-like geometrical entities.
Contains
========
LinearEntity
Line
Ray
Segment
LinearEntity2D
Line2D
Ray2D
Segment2D
LinearEntity3D
Line3D
Ray3D
Segment3D
"""
from sympy.core.containers import Tuple
from sympy.core.evalf import N
from sympy.core.expr import Expr
from sympy.core.numbers import Rational, oo
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import _symbol, Dummy, uniquely_named_symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (_pi_coeff as pi_coeff, acos, tan, atan2)
from .entity import GeometryEntity, GeometrySet
from .exceptions import GeometryError
from .point import Point, Point3D
from .util import find, intersection
from sympy.logic.boolalg import And
from sympy.matrices import Matrix
from sympy.sets.sets import Intersection
from sympy.simplify.simplify import simplify
from sympy.solvers.solveset import linear_coeffs
from sympy.utilities.decorator import deprecated
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import Undecidable, filldedent
import random
class LinearEntity(GeometrySet):
"""A base class for all linear entities (Line, Ray and Segment)
in n-dimensional Euclidean space.
Attributes
==========
ambient_dimension
direction
length
p1
p2
points
Notes
=====
This is an abstract class and is not meant to be instantiated.
See Also
========
sympy.geometry.entity.GeometryEntity
"""
def __new__(cls, p1, p2=None, **kwargs):
p1, p2 = Point._normalize_dimension(p1, p2)
if p1 == p2:
# sometimes we return a single point if we are not given two unique
# points. This is done in the specific subclass
raise ValueError(
"%s.__new__ requires two unique Points." % cls.__name__)
if len(p1) != len(p2):
raise ValueError(
"%s.__new__ requires two Points of equal dimension." % cls.__name__)
return GeometryEntity.__new__(cls, p1, p2, **kwargs)
def __contains__(self, other):
"""Return a definitive answer or else raise an error if it cannot
be determined that other is on the boundaries of self."""
result = self.contains(other)
if result is not None:
return result
else:
raise Undecidable(
"Cannot decide whether '%s' contains '%s'" % (self, other))
def _span_test(self, other):
"""Test whether the point `other` lies in the positive span of `self`.
A point x is 'in front' of a point y if x.dot(y) >= 0. Return
-1 if `other` is behind `self.p1`, 0 if `other` is `self.p1` and
and 1 if `other` is in front of `self.p1`."""
if self.p1 == other:
return 0
rel_pos = other - self.p1
d = self.direction
if d.dot(rel_pos) > 0:
return 1
return -1
@property
def ambient_dimension(self):
"""A property method that returns the dimension of LinearEntity
object.
Parameters
==========
p1 : LinearEntity
Returns
=======
dimension : integer
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> l1 = Line(p1, p2)
>>> l1.ambient_dimension
2
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0, 0), Point(1, 1, 1)
>>> l1 = Line(p1, p2)
>>> l1.ambient_dimension
3
"""
return len(self.p1)
def angle_between(l1, l2):
"""Return the non-reflex angle formed by rays emanating from
the origin with directions the same as the direction vectors
of the linear entities.
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
angle : angle in radians
Notes
=====
From the dot product of vectors v1 and v2 it is known that:
``dot(v1, v2) = |v1|*|v2|*cos(A)``
where A is the angle formed between the two vectors. We can
get the directional vectors of the two lines and readily
find the angle between the two using the above formula.
See Also
========
is_perpendicular, Ray2D.closing_angle
Examples
========
>>> from sympy import Line
>>> e = Line((0, 0), (1, 0))
>>> ne = Line((0, 0), (1, 1))
>>> sw = Line((1, 1), (0, 0))
>>> ne.angle_between(e)
pi/4
>>> sw.angle_between(e)
3*pi/4
To obtain the non-obtuse angle at the intersection of lines, use
the ``smallest_angle_between`` method:
>>> sw.smallest_angle_between(e)
pi/4
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(-1, 2, 0)
>>> l1, l2 = Line3D(p1, p2), Line3D(p2, p3)
>>> l1.angle_between(l2)
acos(-sqrt(2)/3)
>>> l1.smallest_angle_between(l2)
acos(sqrt(2)/3)
"""
if not isinstance(l1, LinearEntity) and not isinstance(l2, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
v1, v2 = l1.direction, l2.direction
return acos(v1.dot(v2)/(abs(v1)*abs(v2)))
def smallest_angle_between(l1, l2):
"""Return the smallest angle formed at the intersection of the
lines containing the linear entities.
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
angle : angle in radians
See Also
========
angle_between, is_perpendicular, Ray2D.closing_angle
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(0, 4), Point(2, -2)
>>> l1, l2 = Line(p1, p2), Line(p1, p3)
>>> l1.smallest_angle_between(l2)
pi/4
See Also
========
angle_between, Ray2D.closing_angle
"""
if not isinstance(l1, LinearEntity) and not isinstance(l2, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
v1, v2 = l1.direction, l2.direction
return acos(abs(v1.dot(v2))/(abs(v1)*abs(v2)))
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the Line.
Parameters
==========
parameter : str, optional
The name of the parameter which will be used for the parametric
point. The default value is 't'. When this parameter is 0, the
first point used to define the line will be returned, and when
it is 1 the second point will be returned.
Returns
=======
point : Point
Raises
======
ValueError
When ``parameter`` already appears in the Line's definition.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(1, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.arbitrary_point()
Point2D(4*t + 1, 3*t)
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(1, 0, 0), Point3D(5, 3, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.arbitrary_point()
Point3D(4*t + 1, 3*t, t)
"""
t = _symbol(parameter, real=True)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('''
Symbol %s already appears in object
and cannot be used as a parameter.
''' % t.name))
# multiply on the right so the variable gets
# combined with the coordinates of the point
return self.p1 + (self.p2 - self.p1)*t
@staticmethod
def are_concurrent(*lines):
"""Is a sequence of linear entities concurrent?
Two or more linear entities are concurrent if they all
intersect at a single point.
Parameters
==========
lines : a sequence of linear entities.
Returns
=======
True : if the set of linear entities intersect in one point
False : otherwise.
See Also
========
sympy.geometry.util.intersection
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> p3, p4 = Point(-2, -2), Point(0, 2)
>>> l1, l2, l3 = Line(p1, p2), Line(p1, p3), Line(p1, p4)
>>> Line.are_concurrent(l1, l2, l3)
True
>>> l4 = Line(p2, p3)
>>> Line.are_concurrent(l2, l3, l4)
False
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 5, 2)
>>> p3, p4 = Point3D(-2, -2, -2), Point3D(0, 2, 1)
>>> l1, l2, l3 = Line3D(p1, p2), Line3D(p1, p3), Line3D(p1, p4)
>>> Line3D.are_concurrent(l1, l2, l3)
True
>>> l4 = Line3D(p2, p3)
>>> Line3D.are_concurrent(l2, l3, l4)
False
"""
common_points = Intersection(*lines)
if common_points.is_FiniteSet and len(common_points) == 1:
return True
return False
def contains(self, other):
"""Subclasses should implement this method and should return
True if other is on the boundaries of self;
False if not on the boundaries of self;
None if a determination cannot be made."""
raise NotImplementedError()
@property
def direction(self):
"""The direction vector of the LinearEntity.
Returns
=======
p : a Point; the ray from the origin to this point is the
direction of `self`
Examples
========
>>> from sympy.geometry import Line
>>> a, b = (1, 1), (1, 3)
>>> Line(a, b).direction
Point2D(0, 2)
>>> Line(b, a).direction
Point2D(0, -2)
This can be reported so the distance from the origin is 1:
>>> Line(b, a).direction.unit
Point2D(0, -1)
See Also
========
sympy.geometry.point.Point.unit
"""
return self.p2 - self.p1
def intersection(self, other):
"""The intersection with another geometrical entity.
Parameters
==========
o : Point or LinearEntity
Returns
=======
intersection : list of geometrical entities
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line, Segment
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(7, 7)
>>> l1 = Line(p1, p2)
>>> l1.intersection(p3)
[Point2D(7, 7)]
>>> p4, p5 = Point(5, 0), Point(0, 3)
>>> l2 = Line(p4, p5)
>>> l1.intersection(l2)
[Point2D(15/8, 15/8)]
>>> p6, p7 = Point(0, 5), Point(2, 6)
>>> s1 = Segment(p6, p7)
>>> l1.intersection(s1)
[]
>>> from sympy import Point3D, Line3D, Segment3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(7, 7, 7)
>>> l1 = Line3D(p1, p2)
>>> l1.intersection(p3)
[Point3D(7, 7, 7)]
>>> l1 = Line3D(Point3D(4,19,12), Point3D(5,25,17))
>>> l2 = Line3D(Point3D(-3, -15, -19), direction_ratio=[2,8,8])
>>> l1.intersection(l2)
[Point3D(1, 1, -3)]
>>> p6, p7 = Point3D(0, 5, 2), Point3D(2, 6, 3)
>>> s1 = Segment3D(p6, p7)
>>> l1.intersection(s1)
[]
"""
def intersect_parallel_rays(ray1, ray2):
if ray1.direction.dot(ray2.direction) > 0:
# rays point in the same direction
# so return the one that is "in front"
return [ray2] if ray1._span_test(ray2.p1) >= 0 else [ray1]
else:
# rays point in opposite directions
st = ray1._span_test(ray2.p1)
if st < 0:
return []
elif st == 0:
return [ray2.p1]
return [Segment(ray1.p1, ray2.p1)]
def intersect_parallel_ray_and_segment(ray, seg):
st1, st2 = ray._span_test(seg.p1), ray._span_test(seg.p2)
if st1 < 0 and st2 < 0:
return []
elif st1 >= 0 and st2 >= 0:
return [seg]
elif st1 >= 0: # st2 < 0:
return [Segment(ray.p1, seg.p1)]
else: # st1 < 0 and st2 >= 0:
return [Segment(ray.p1, seg.p2)]
def intersect_parallel_segments(seg1, seg2):
if seg1.contains(seg2):
return [seg2]
if seg2.contains(seg1):
return [seg1]
# direct the segments so they're oriented the same way
if seg1.direction.dot(seg2.direction) < 0:
seg2 = Segment(seg2.p2, seg2.p1)
# order the segments so seg1 is "behind" seg2
if seg1._span_test(seg2.p1) < 0:
seg1, seg2 = seg2, seg1
if seg2._span_test(seg1.p2) < 0:
return []
return [Segment(seg2.p1, seg1.p2)]
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if other.is_Point:
if self.contains(other):
return [other]
else:
return []
elif isinstance(other, LinearEntity):
# break into cases based on whether
# the lines are parallel, non-parallel intersecting, or skew
pts = Point._normalize_dimension(self.p1, self.p2, other.p1, other.p2)
rank = Point.affine_rank(*pts)
if rank == 1:
# we're collinear
if isinstance(self, Line):
return [other]
if isinstance(other, Line):
return [self]
if isinstance(self, Ray) and isinstance(other, Ray):
return intersect_parallel_rays(self, other)
if isinstance(self, Ray) and isinstance(other, Segment):
return intersect_parallel_ray_and_segment(self, other)
if isinstance(self, Segment) and isinstance(other, Ray):
return intersect_parallel_ray_and_segment(other, self)
if isinstance(self, Segment) and isinstance(other, Segment):
return intersect_parallel_segments(self, other)
elif rank == 2:
# we're in the same plane
l1 = Line(*pts[:2])
l2 = Line(*pts[2:])
# check to see if we're parallel. If we are, we can't
# be intersecting, since the collinear case was already
# handled
if l1.direction.is_scalar_multiple(l2.direction):
return []
# find the intersection as if everything were lines
# by solving the equation t*d + p1 == s*d' + p1'
m = Matrix([l1.direction, -l2.direction]).transpose()
v = Matrix([l2.p1 - l1.p1]).transpose()
# we cannot use m.solve(v) because that only works for square matrices
m_rref, pivots = m.col_insert(2, v).rref(simplify=True)
# rank == 2 ensures we have 2 pivots, but let's check anyway
if len(pivots) != 2:
raise GeometryError("Failed when solving Mx=b when M={} and b={}".format(m, v))
coeff = m_rref[0, 2]
line_intersection = l1.direction*coeff + self.p1
# if we're both lines, we can skip a containment check
if isinstance(self, Line) and isinstance(other, Line):
return [line_intersection]
if ((isinstance(self, Line) or
self.contains(line_intersection)) and
other.contains(line_intersection)):
return [line_intersection]
return []
else:
# we're skew
return []
return other.intersection(self)
def is_parallel(l1, l2):
"""Are two linear entities parallel?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are parallel,
False : otherwise.
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> p3, p4 = Point(3, 4), Point(6, 7)
>>> l1, l2 = Line(p1, p2), Line(p3, p4)
>>> Line.is_parallel(l1, l2)
True
>>> p5 = Point(6, 6)
>>> l3 = Line(p3, p5)
>>> Line.is_parallel(l1, l3)
False
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 4, 5)
>>> p3, p4 = Point3D(2, 1, 1), Point3D(8, 9, 11)
>>> l1, l2 = Line3D(p1, p2), Line3D(p3, p4)
>>> Line3D.is_parallel(l1, l2)
True
>>> p5 = Point3D(6, 6, 6)
>>> l3 = Line3D(p3, p5)
>>> Line3D.is_parallel(l1, l3)
False
"""
if not isinstance(l1, LinearEntity) and not isinstance(l2, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
return l1.direction.is_scalar_multiple(l2.direction)
def is_perpendicular(l1, l2):
"""Are two linear entities perpendicular?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are perpendicular,
False : otherwise.
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(-1, 1)
>>> l1, l2 = Line(p1, p2), Line(p1, p3)
>>> l1.is_perpendicular(l2)
True
>>> p4 = Point(5, 3)
>>> l3 = Line(p1, p4)
>>> l1.is_perpendicular(l3)
False
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(-1, 2, 0)
>>> l1, l2 = Line3D(p1, p2), Line3D(p2, p3)
>>> l1.is_perpendicular(l2)
False
>>> p4 = Point3D(5, 3, 7)
>>> l3 = Line3D(p1, p4)
>>> l1.is_perpendicular(l3)
False
"""
if not isinstance(l1, LinearEntity) and not isinstance(l2, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
return S.Zero.equals(l1.direction.dot(l2.direction))
def is_similar(self, other):
"""
Return True if self and other are contained in the same line.
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 1), Point(3, 4), Point(2, 3)
>>> l1 = Line(p1, p2)
>>> l2 = Line(p1, p3)
>>> l1.is_similar(l2)
True
"""
l = Line(self.p1, self.p2)
return l.contains(other)
@property
def length(self):
"""
The length of the line.
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> l1 = Line(p1, p2)
>>> l1.length
oo
"""
return S.Infinity
@property
def p1(self):
"""The first defining point of a linear entity.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.p1
Point2D(0, 0)
"""
return self.args[0]
@property
def p2(self):
"""The second defining point of a linear entity.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.p2
Point2D(5, 3)
"""
return self.args[1]
def parallel_line(self, p):
"""Create a new Line parallel to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point
Returns
=======
line : Line
See Also
========
is_parallel
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(2, 3), Point(-2, 2)
>>> l1 = Line(p1, p2)
>>> l2 = l1.parallel_line(p3)
>>> p3 in l2
True
>>> l1.is_parallel(l2)
True
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(2, 3, 4), Point3D(-2, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> l2 = l1.parallel_line(p3)
>>> p3 in l2
True
>>> l1.is_parallel(l2)
True
"""
p = Point(p, dim=self.ambient_dimension)
return Line(p, p + self.direction)
def perpendicular_line(self, p):
"""Create a new Line perpendicular to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point
Returns
=======
line : Line
See Also
========
sympy.geometry.line.LinearEntity.is_perpendicular, perpendicular_segment
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(2, 3), Point(-2, 2)
>>> l1 = Line(p1, p2)
>>> l2 = l1.perpendicular_line(p3)
>>> p3 in l2
True
>>> l1.is_perpendicular(l2)
True
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(2, 3, 4), Point3D(-2, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> l2 = l1.perpendicular_line(p3)
>>> p3 in l2
True
>>> l1.is_perpendicular(l2)
True
"""
p = Point(p, dim=self.ambient_dimension)
if p in self:
p = p + self.direction.orthogonal_direction
return Line(p, self.projection(p))
def perpendicular_segment(self, p):
"""Create a perpendicular line segment from `p` to this line.
The enpoints of the segment are ``p`` and the closest point in
the line containing self. (If self is not a line, the point might
not be in self.)
Parameters
==========
p : Point
Returns
=======
segment : Segment
Notes
=====
Returns `p` itself if `p` is on this linear entity.
See Also
========
perpendicular_line
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, 2)
>>> l1 = Line(p1, p2)
>>> s1 = l1.perpendicular_segment(p3)
>>> l1.is_perpendicular(s1)
True
>>> p3 in s1
True
>>> l1.perpendicular_segment(Point(4, 0))
Segment2D(Point2D(4, 0), Point2D(2, 2))
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> s1 = l1.perpendicular_segment(p3)
>>> l1.is_perpendicular(s1)
True
>>> p3 in s1
True
>>> l1.perpendicular_segment(Point3D(4, 0, 0))
Segment3D(Point3D(4, 0, 0), Point3D(4/3, 4/3, 4/3))
"""
p = Point(p, dim=self.ambient_dimension)
if p in self:
return p
l = self.perpendicular_line(p)
# The intersection should be unique, so unpack the singleton
p2, = Intersection(Line(self.p1, self.p2), l)
return Segment(p, p2)
@property
def points(self):
"""The two points used to define this linear entity.
Returns
=======
points : tuple of Points
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 11)
>>> l1 = Line(p1, p2)
>>> l1.points
(Point2D(0, 0), Point2D(5, 11))
"""
return (self.p1, self.p2)
def projection(self, other):
"""Project a point, line, ray, or segment onto this linear entity.
Parameters
==========
other : Point or LinearEntity (Line, Ray, Segment)
Returns
=======
projection : Point or LinearEntity (Line, Ray, Segment)
The return type matches the type of the parameter ``other``.
Raises
======
GeometryError
When method is unable to perform projection.
Notes
=====
A projection involves taking the two points that define
the linear entity and projecting those points onto a
Line and then reforming the linear entity using these
projections.
A point P is projected onto a line L by finding the point
on L that is closest to P. This point is the intersection
of L and the line perpendicular to L that passes through P.
See Also
========
sympy.geometry.point.Point, perpendicular_line
Examples
========
>>> from sympy import Point, Line, Segment, Rational
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(Rational(1, 2), 0)
>>> l1 = Line(p1, p2)
>>> l1.projection(p3)
Point2D(1/4, 1/4)
>>> p4, p5 = Point(10, 0), Point(12, 1)
>>> s1 = Segment(p4, p5)
>>> l1.projection(s1)
Segment2D(Point2D(5, 5), Point2D(13/2, 13/2))
>>> p1, p2, p3 = Point(0, 0, 1), Point(1, 1, 2), Point(2, 0, 1)
>>> l1 = Line(p1, p2)
>>> l1.projection(p3)
Point3D(2/3, 2/3, 5/3)
>>> p4, p5 = Point(10, 0, 1), Point(12, 1, 3)
>>> s1 = Segment(p4, p5)
>>> l1.projection(s1)
Segment3D(Point3D(10/3, 10/3, 13/3), Point3D(5, 5, 6))
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
def proj_point(p):
return Point.project(p - self.p1, self.direction) + self.p1
if isinstance(other, Point):
return proj_point(other)
elif isinstance(other, LinearEntity):
p1, p2 = proj_point(other.p1), proj_point(other.p2)
# test to see if we're degenerate
if p1 == p2:
return p1
projected = other.__class__(p1, p2)
projected = Intersection(self, projected)
# if we happen to have intersected in only a point, return that
if projected.is_FiniteSet and len(projected) == 1:
# projected is a set of size 1, so unpack it in `a`
a, = projected
return a
# order args so projection is in the same direction as self
if self.direction.dot(projected.direction) < 0:
p1, p2 = projected.args
projected = projected.func(p2, p1)
return projected
raise GeometryError(
"Do not know how to project %s onto %s" % (other, self))
def random_point(self, seed=None):
"""A random point on a LinearEntity.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line, Ray, Segment
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> line = Line(p1, p2)
>>> r = line.random_point(seed=42) # seed value is optional
>>> r.n(3)
Point2D(-0.72, -0.432)
>>> r in line
True
>>> Ray(p1, p2).random_point(seed=42).n(3)
Point2D(0.72, 0.432)
>>> Segment(p1, p2).random_point(seed=42).n(3)
Point2D(3.2, 1.92)
"""
if seed is not None:
rng = random.Random(seed)
else:
rng = random
t = Dummy()
pt = self.arbitrary_point(t)
if isinstance(self, Ray):
v = abs(rng.gauss(0, 1))
elif isinstance(self, Segment):
v = rng.random()
elif isinstance(self, Line):
v = rng.gauss(0, 1)
else:
raise NotImplementedError('unhandled line type')
return pt.subs(t, Rational(v))
def bisectors(self, other):
"""Returns the perpendicular lines which pass through the intersections
of self and other that are in the same plane.
Parameters
==========
line : Line3D
Returns
=======
list: two Line instances
Examples
========
>>> from sympy.geometry import Point3D, Line3D
>>> r1 = Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0))
>>> r2 = Line3D(Point3D(0, 0, 0), Point3D(0, 1, 0))
>>> r1.bisectors(r2)
[Line3D(Point3D(0, 0, 0), Point3D(1, 1, 0)), Line3D(Point3D(0, 0, 0), Point3D(1, -1, 0))]
"""
if not isinstance(other, LinearEntity):
raise GeometryError("Expecting LinearEntity, not %s" % other)
l1, l2 = self, other
# make sure dimensions match or else a warning will rise from
# intersection calculation
if l1.p1.ambient_dimension != l2.p1.ambient_dimension:
if isinstance(l1, Line2D):
l1, l2 = l2, l1
_, p1 = Point._normalize_dimension(l1.p1, l2.p1, on_morph='ignore')
_, p2 = Point._normalize_dimension(l1.p2, l2.p2, on_morph='ignore')
l2 = Line(p1, p2)
point = intersection(l1, l2)
# Three cases: Lines may intersect in a point, may be equal or may not intersect.
if not point:
raise GeometryError("The lines do not intersect")
else:
pt = point[0]
if isinstance(pt, Line):
# Intersection is a line because both lines are coincident
return [self]
d1 = l1.direction.unit
d2 = l2.direction.unit
bis1 = Line(pt, pt + d1 + d2)
bis2 = Line(pt, pt + d1 - d2)
return [bis1, bis2]
class Line(LinearEntity):
"""An infinite line in space.
A 2D line is declared with two distinct points, point and slope, or
an equation. A 3D line may be defined with a point and a direction ratio.
Parameters
==========
p1 : Point
p2 : Point
slope : SymPy expression
direction_ratio : list
equation : equation of a line
Notes
=====
`Line` will automatically subclass to `Line2D` or `Line3D` based
on the dimension of `p1`. The `slope` argument is only relevant
for `Line2D` and the `direction_ratio` argument is only relevant
for `Line3D`.
See Also
========
sympy.geometry.point.Point
sympy.geometry.line.Line2D
sympy.geometry.line.Line3D
Examples
========
>>> from sympy import Point, Eq
>>> from sympy.geometry import Line, Segment
>>> from sympy.abc import x, y, a, b
>>> L = Line(Point(2,3), Point(3,5))
>>> L
Line2D(Point2D(2, 3), Point2D(3, 5))
>>> L.points
(Point2D(2, 3), Point2D(3, 5))
>>> L.equation()
-2*x + y + 1
>>> L.coefficients
(-2, 1, 1)
Instantiate with keyword ``slope``:
>>> Line(Point(0, 0), slope=0)
Line2D(Point2D(0, 0), Point2D(1, 0))
Instantiate with another linear object
>>> s = Segment((0, 0), (0, 1))
>>> Line(s).equation()
x
The line corresponding to an equation in the for `ax + by + c = 0`,
can be entered:
>>> Line(3*x + y + 18)
Line2D(Point2D(0, -18), Point2D(1, -21))
If `x` or `y` has a different name, then they can be specified, too,
as a string (to match the name) or symbol:
>>> Line(Eq(3*a + b, -18), x='a', y=b)
Line2D(Point2D(0, -18), Point2D(1, -21))
"""
def __new__(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], (Expr, Eq)):
missing = uniquely_named_symbol('?', args).name
if not kwargs:
x = 'x'
y = 'y'
else:
x = kwargs.pop('x', missing)
y = kwargs.pop('y', missing)
if kwargs:
raise ValueError('expecting only x and y as keywords')
equation = args[0]
if isinstance(equation, Eq):
equation = equation.lhs - equation.rhs
def find_or_missing(x):
try:
return find(x, equation)
except ValueError:
return missing
x = find_or_missing(x)
y = find_or_missing(y)
a, b, c = linear_coeffs(equation, x, y)
if b:
return Line((0, -c/b), slope=-a/b)
if a:
return Line((-c/a, 0), slope=oo)
raise ValueError('not found in equation: %s' % (set('xy') - {x, y}))
else:
if len(args) > 0:
p1 = args[0]
if len(args) > 1:
p2 = args[1]
else:
p2 = None
if isinstance(p1, LinearEntity):
if p2:
raise ValueError('If p1 is a LinearEntity, p2 must be None.')
dim = len(p1.p1)
else:
p1 = Point(p1)
dim = len(p1)
if p2 is not None or isinstance(p2, Point) and p2.ambient_dimension != dim:
p2 = Point(p2)
if dim == 2:
return Line2D(p1, p2, **kwargs)
elif dim == 3:
return Line3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def contains(self, other):
"""
Return True if `other` is on this Line, or False otherwise.
Examples
========
>>> from sympy import Line,Point
>>> p1, p2 = Point(0, 1), Point(3, 4)
>>> l = Line(p1, p2)
>>> l.contains(p1)
True
>>> l.contains((0, 1))
True
>>> l.contains((0, 0))
False
>>> a = (0, 0, 0)
>>> b = (1, 1, 1)
>>> c = (2, 2, 2)
>>> l1 = Line(a, b)
>>> l2 = Line(b, a)
>>> l1 == l2
False
>>> l1 in l2
True
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if isinstance(other, Point):
return Point.is_collinear(other, self.p1, self.p2)
if isinstance(other, LinearEntity):
return Point.is_collinear(self.p1, self.p2, other.p1, other.p2)
return False
def distance(self, other):
"""
Finds the shortest distance between a line and a point.
Raises
======
NotImplementedError is raised if `other` is not a Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> s = Line(p1, p2)
>>> s.distance(Point(-1, 1))
sqrt(2)
>>> s.distance((-1, 2))
3*sqrt(2)/2
>>> p1, p2 = Point(0, 0, 0), Point(1, 1, 1)
>>> s = Line(p1, p2)
>>> s.distance(Point(-1, 1, 1))
2*sqrt(6)/3
>>> s.distance((-1, 1, 1))
2*sqrt(6)/3
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if self.contains(other):
return S.Zero
return self.perpendicular_segment(other).length
@deprecated(useinstead="equals", issue=12860, deprecated_since_version="1.0")
def equal(self, other):
return self.equals(other)
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Line):
return False
return Point.is_collinear(self.p1, other.p1, self.p2, other.p2)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of line. Gives
values that will produce a line that is +/- 5 units long (where a
unit is the distance between the two points that define the line).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.plot_interval()
[t, -5, 5]
"""
t = _symbol(parameter, real=True)
return [t, -5, 5]
class Ray(LinearEntity):
"""A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point
The source of the Ray
p2 : Point or radian value
This point determines the direction in which the Ray propagates.
If given as an angle it is interpreted in radians with the positive
direction being ccw.
Attributes
==========
source
See Also
========
sympy.geometry.line.Ray2D
sympy.geometry.line.Ray3D
sympy.geometry.point.Point
sympy.geometry.line.Line
Notes
=====
`Ray` will automatically subclass to `Ray2D` or `Ray3D` based on the
dimension of `p1`.
Examples
========
>>> from sympy import Point, pi
>>> from sympy.geometry import Ray
>>> r = Ray(Point(2, 3), Point(3, 5))
>>> r
Ray2D(Point2D(2, 3), Point2D(3, 5))
>>> r.points
(Point2D(2, 3), Point2D(3, 5))
>>> r.source
Point2D(2, 3)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.slope
2
>>> Ray(Point(0, 0), angle=pi/4).slope
1
"""
def __new__(cls, p1, p2=None, **kwargs):
p1 = Point(p1)
if p2 is not None:
p1, p2 = Point._normalize_dimension(p1, Point(p2))
dim = len(p1)
if dim == 2:
return Ray2D(p1, p2, **kwargs)
elif dim == 3:
return Ray3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the LinearEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = (N(self.p1), N(self.p2))
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {}".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" '
'marker-start="url(#markerCircle)" marker-end="url(#markerArrow)"/>'
).format(2.*scale_factor, path, fill_color)
def contains(self, other):
"""
Is other GeometryEntity contained in this Ray?
Examples
========
>>> from sympy import Ray,Point,Segment
>>> p1, p2 = Point(0, 0), Point(4, 4)
>>> r = Ray(p1, p2)
>>> r.contains(p1)
True
>>> r.contains((1, 1))
True
>>> r.contains((1, 3))
False
>>> s = Segment((1, 1), (2, 2))
>>> r.contains(s)
True
>>> s = Segment((1, 2), (2, 5))
>>> r.contains(s)
False
>>> r1 = Ray((2, 2), (3, 3))
>>> r.contains(r1)
True
>>> r1 = Ray((2, 2), (3, 5))
>>> r.contains(r1)
False
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if isinstance(other, Point):
if Point.is_collinear(self.p1, self.p2, other):
# if we're in the direction of the ray, our
# direction vector dot the ray's direction vector
# should be non-negative
return bool((self.p2 - self.p1).dot(other - self.p1) >= S.Zero)
return False
elif isinstance(other, Ray):
if Point.is_collinear(self.p1, self.p2, other.p1, other.p2):
return bool((self.p2 - self.p1).dot(other.p2 - other.p1) > S.Zero)
return False
elif isinstance(other, Segment):
return other.p1 in self and other.p2 in self
# No other known entity can be contained in a Ray
return False
def distance(self, other):
"""
Finds the shortest distance between the ray and a point.
Raises
======
NotImplementedError is raised if `other` is not a Point
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> s = Ray(p1, p2)
>>> s.distance(Point(-1, -1))
sqrt(2)
>>> s.distance((-1, 2))
3*sqrt(2)/2
>>> p1, p2 = Point(0, 0, 0), Point(1, 1, 2)
>>> s = Ray(p1, p2)
>>> s
Ray3D(Point3D(0, 0, 0), Point3D(1, 1, 2))
>>> s.distance(Point(-1, -1, 2))
4*sqrt(3)/3
>>> s.distance((-1, -1, 2))
4*sqrt(3)/3
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if self.contains(other):
return S.Zero
proj = Line(self.p1, self.p2).projection(other)
if self.contains(proj):
return abs(other - proj)
else:
return abs(other - self.source)
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Ray):
return False
return self.source == other.source and other.p2 in self
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ray. Gives
values that will produce a ray that is 10 units long (where a unit is
the distance between the two points that define the ray).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Ray, pi
>>> r = Ray((0, 0), angle=pi/4)
>>> r.plot_interval()
[t, 0, 10]
"""
t = _symbol(parameter, real=True)
return [t, 0, 10]
@property
def source(self):
"""The point from which the ray emanates.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2 = Point(0, 0), Point(4, 1)
>>> r1 = Ray(p1, p2)
>>> r1.source
Point2D(0, 0)
>>> p1, p2 = Point(0, 0, 0), Point(4, 1, 5)
>>> r1 = Ray(p2, p1)
>>> r1.source
Point3D(4, 1, 5)
"""
return self.p1
class Segment(LinearEntity):
"""A line segment in space.
Parameters
==========
p1 : Point
p2 : Point
Attributes
==========
length : number or SymPy expression
midpoint : Point
See Also
========
sympy.geometry.line.Segment2D
sympy.geometry.line.Segment3D
sympy.geometry.point.Point
sympy.geometry.line.Line
Notes
=====
If 2D or 3D points are used to define `Segment`, it will
be automatically subclassed to `Segment2D` or `Segment3D`.
Examples
========
>>> from sympy import Point
>>> from sympy.geometry import Segment
>>> Segment((1, 0), (1, 1)) # tuples are interpreted as pts
Segment2D(Point2D(1, 0), Point2D(1, 1))
>>> s = Segment(Point(4, 3), Point(1, 1))
>>> s.points
(Point2D(4, 3), Point2D(1, 1))
>>> s.slope
2/3
>>> s.length
sqrt(13)
>>> s.midpoint
Point2D(5/2, 2)
>>> Segment((1, 0, 0), (1, 1, 1)) # tuples are interpreted as pts
Segment3D(Point3D(1, 0, 0), Point3D(1, 1, 1))
>>> s = Segment(Point(4, 3, 9), Point(1, 1, 7)); s
Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s.points
(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s.length
sqrt(17)
>>> s.midpoint
Point3D(5/2, 2, 8)
"""
def __new__(cls, p1, p2, **kwargs):
p1, p2 = Point._normalize_dimension(Point(p1), Point(p2))
dim = len(p1)
if dim == 2:
return Segment2D(p1, p2, **kwargs)
elif dim == 3:
return Segment3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def contains(self, other):
"""
Is the other GeometryEntity contained within this Segment?
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 1), Point(3, 4)
>>> s = Segment(p1, p2)
>>> s2 = Segment(p2, p1)
>>> s.contains(s2)
True
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 1, 1), Point3D(3, 4, 5)
>>> s = Segment3D(p1, p2)
>>> s2 = Segment3D(p2, p1)
>>> s.contains(s2)
True
>>> s.contains((p1 + p2)/2)
True
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if isinstance(other, Point):
if Point.is_collinear(other, self.p1, self.p2):
if isinstance(self, Segment2D):
# if it is collinear and is in the bounding box of the
# segment then it must be on the segment
vert = (1/self.slope).equals(0)
if vert is False:
isin = (self.p1.x - other.x)*(self.p2.x - other.x) <= 0
if isin in (True, False):
return isin
if vert is True:
isin = (self.p1.y - other.y)*(self.p2.y - other.y) <= 0
if isin in (True, False):
return isin
# use the triangle inequality
d1, d2 = other - self.p1, other - self.p2
d = self.p2 - self.p1
# without the call to simplify, SymPy cannot tell that an expression
# like (a+b)*(a/2+b/2) is always non-negative. If it cannot be
# determined, raise an Undecidable error
try:
# the triangle inequality says that |d1|+|d2| >= |d| and is strict
# only if other lies in the line segment
return bool(simplify(Eq(abs(d1) + abs(d2) - abs(d), 0)))
except TypeError:
raise Undecidable("Cannot determine if {} is in {}".format(other, self))
if isinstance(other, Segment):
return other.p1 in self and other.p2 in self
return False
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
return isinstance(other, self.func) and list(
ordered(self.args)) == list(ordered(other.args))
def distance(self, other):
"""
Finds the shortest distance between a line segment and a point.
Raises
======
NotImplementedError is raised if `other` is not a Point
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 1), Point(3, 4)
>>> s = Segment(p1, p2)
>>> s.distance(Point(10, 15))
sqrt(170)
>>> s.distance((0, 12))
sqrt(73)
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 3), Point3D(1, 1, 4)
>>> s = Segment3D(p1, p2)
>>> s.distance(Point3D(10, 15, 12))
sqrt(341)
>>> s.distance((10, 15, 12))
sqrt(341)
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if isinstance(other, Point):
vp1 = other - self.p1
vp2 = other - self.p2
dot_prod_sign_1 = self.direction.dot(vp1) >= 0
dot_prod_sign_2 = self.direction.dot(vp2) <= 0
if dot_prod_sign_1 and dot_prod_sign_2:
return Line(self.p1, self.p2).distance(other)
if dot_prod_sign_1 and not dot_prod_sign_2:
return abs(vp2)
if not dot_prod_sign_1 and dot_prod_sign_2:
return abs(vp1)
raise NotImplementedError()
@property
def length(self):
"""The length of the line segment.
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 0), Point(4, 3)
>>> s1 = Segment(p1, p2)
>>> s1.length
5
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 3, 3)
>>> s1 = Segment3D(p1, p2)
>>> s1.length
sqrt(34)
"""
return Point.distance(self.p1, self.p2)
@property
def midpoint(self):
"""The midpoint of the line segment.
See Also
========
sympy.geometry.point.Point.midpoint
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 0), Point(4, 3)
>>> s1 = Segment(p1, p2)
>>> s1.midpoint
Point2D(2, 3/2)
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 3, 3)
>>> s1 = Segment3D(p1, p2)
>>> s1.midpoint
Point3D(2, 3/2, 3/2)
"""
return Point.midpoint(self.p1, self.p2)
def perpendicular_bisector(self, p=None):
"""The perpendicular bisector of this segment.
If no point is specified or the point specified is not on the
bisector then the bisector is returned as a Line. Otherwise a
Segment is returned that joins the point specified and the
intersection of the bisector and the segment.
Parameters
==========
p : Point
Returns
=======
bisector : Line or Segment
See Also
========
LinearEntity.perpendicular_segment
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2, p3 = Point(0, 0), Point(6, 6), Point(5, 1)
>>> s1 = Segment(p1, p2)
>>> s1.perpendicular_bisector()
Line2D(Point2D(3, 3), Point2D(-3, 9))
>>> s1.perpendicular_bisector(p3)
Segment2D(Point2D(5, 1), Point2D(3, 3))
"""
l = self.perpendicular_line(self.midpoint)
if p is not None:
p2 = Point(p, dim=self.ambient_dimension)
if p2 in l:
return Segment(p2, self.midpoint)
return l
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Segment gives
values that will produce the full segment in a plot.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> s1 = Segment(p1, p2)
>>> s1.plot_interval()
[t, 0, 1]
"""
t = _symbol(parameter, real=True)
return [t, 0, 1]
class LinearEntity2D(LinearEntity):
"""A base class for all linear entities (line, ray and segment)
in a 2-dimensional Euclidean space.
Attributes
==========
p1
p2
coefficients
slope
points
Notes
=====
This is an abstract class and is not meant to be instantiated.
See Also
========
sympy.geometry.entity.GeometryEntity
"""
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
verts = self.points
xs = [p.x for p in verts]
ys = [p.y for p in verts]
return (min(xs), min(ys), max(xs), max(ys))
def perpendicular_line(self, p):
"""Create a new Line perpendicular to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point
Returns
=======
line : Line
See Also
========
sympy.geometry.line.LinearEntity.is_perpendicular, perpendicular_segment
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(2, 3), Point(-2, 2)
>>> l1 = Line(p1, p2)
>>> l2 = l1.perpendicular_line(p3)
>>> p3 in l2
True
>>> l1.is_perpendicular(l2)
True
"""
p = Point(p, dim=self.ambient_dimension)
# any two lines in R^2 intersect, so blindly making
# a line through p in an orthogonal direction will work
return Line(p, p + self.direction.orthogonal_direction)
@property
def slope(self):
"""The slope of this linear entity, or infinity if vertical.
Returns
=======
slope : number or SymPy expression
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> l1 = Line(p1, p2)
>>> l1.slope
5/3
>>> p3 = Point(0, 4)
>>> l2 = Line(p1, p3)
>>> l2.slope
oo
"""
d1, d2 = (self.p1 - self.p2).args
if d1 == 0:
return S.Infinity
return simplify(d2/d1)
class Line2D(LinearEntity2D, Line):
"""An infinite line in space 2D.
A line is declared with two distinct points or a point and slope
as defined using keyword `slope`.
Parameters
==========
p1 : Point
pt : Point
slope : SymPy expression
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point
>>> from sympy.geometry import Line, Segment
>>> L = Line(Point(2,3), Point(3,5))
>>> L
Line2D(Point2D(2, 3), Point2D(3, 5))
>>> L.points
(Point2D(2, 3), Point2D(3, 5))
>>> L.equation()
-2*x + y + 1
>>> L.coefficients
(-2, 1, 1)
Instantiate with keyword ``slope``:
>>> Line(Point(0, 0), slope=0)
Line2D(Point2D(0, 0), Point2D(1, 0))
Instantiate with another linear object
>>> s = Segment((0, 0), (0, 1))
>>> Line(s).equation()
x
"""
def __new__(cls, p1, pt=None, slope=None, **kwargs):
if isinstance(p1, LinearEntity):
if pt is not None:
raise ValueError('When p1 is a LinearEntity, pt should be None')
p1, pt = Point._normalize_dimension(*p1.args, dim=2)
else:
p1 = Point(p1, dim=2)
if pt is not None and slope is None:
try:
p2 = Point(pt, dim=2)
except (NotImplementedError, TypeError, ValueError):
raise ValueError(filldedent('''
The 2nd argument was not a valid Point.
If it was a slope, enter it with keyword "slope".
'''))
elif slope is not None and pt is None:
slope = sympify(slope)
if slope.is_finite is False:
# when infinite slope, don't change x
dx = 0
dy = 1
else:
# go over 1 up slope
dx = 1
dy = slope
# XXX avoiding simplification by adding to coords directly
p2 = Point(p1.x + dx, p1.y + dy, evaluate=False)
else:
raise ValueError('A 2nd Point or keyword "slope" must be used.')
return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the LinearEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = (N(self.p1), N(self.p2))
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {}".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" '
'marker-start="url(#markerReverseArrow)" marker-end="url(#markerArrow)"/>'
).format(2.*scale_factor, path, fill_color)
@property
def coefficients(self):
"""The coefficients (`a`, `b`, `c`) for `ax + by + c = 0`.
See Also
========
sympy.geometry.line.Line2D.equation
Examples
========
>>> from sympy import Point, Line
>>> from sympy.abc import x, y
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.coefficients
(-3, 5, 0)
>>> p3 = Point(x, y)
>>> l2 = Line(p1, p3)
>>> l2.coefficients
(-y, x, 0)
"""
p1, p2 = self.points
if p1.x == p2.x:
return (S.One, S.Zero, -p1.x)
elif p1.y == p2.y:
return (S.Zero, S.One, -p1.y)
return tuple([simplify(i) for i in
(self.p1.y - self.p2.y,
self.p2.x - self.p1.x,
self.p1.x*self.p2.y - self.p1.y*self.p2.x)])
def equation(self, x='x', y='y'):
"""The equation of the line: ax + by + c.
Parameters
==========
x : str, optional
The name to use for the x-axis, default value is 'x'.
y : str, optional
The name to use for the y-axis, default value is 'y'.
Returns
=======
equation : SymPy expression
See Also
========
sympy.geometry.line.Line2D.coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(1, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.equation()
-3*x + 4*y + 3
"""
x = _symbol(x, real=True)
y = _symbol(y, real=True)
p1, p2 = self.points
if p1.x == p2.x:
return x - p1.x
elif p1.y == p2.y:
return y - p1.y
a, b, c = self.coefficients
return a*x + b*y + c
class Ray2D(LinearEntity2D, Ray):
"""
A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point
The source of the Ray
p2 : Point or radian value
This point determines the direction in which the Ray propagates.
If given as an angle it is interpreted in radians with the positive
direction being ccw.
Attributes
==========
source
xdirection
ydirection
See Also
========
sympy.geometry.point.Point, Line
Examples
========
>>> from sympy import Point, pi
>>> from sympy.geometry import Ray
>>> r = Ray(Point(2, 3), Point(3, 5))
>>> r
Ray2D(Point2D(2, 3), Point2D(3, 5))
>>> r.points
(Point2D(2, 3), Point2D(3, 5))
>>> r.source
Point2D(2, 3)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.slope
2
>>> Ray(Point(0, 0), angle=pi/4).slope
1
"""
def __new__(cls, p1, pt=None, angle=None, **kwargs):
p1 = Point(p1, dim=2)
if pt is not None and angle is None:
try:
p2 = Point(pt, dim=2)
except (NotImplementedError, TypeError, ValueError):
raise ValueError(filldedent('''
The 2nd argument was not a valid Point; if
it was meant to be an angle it should be
given with keyword "angle".'''))
if p1 == p2:
raise ValueError('A Ray requires two distinct points.')
elif angle is not None and pt is None:
# we need to know if the angle is an odd multiple of pi/2
c = pi_coeff(sympify(angle))
p2 = None
if c is not None:
if c.is_Rational:
if c.q == 2:
if c.p == 1:
p2 = p1 + Point(0, 1)
elif c.p == 3:
p2 = p1 + Point(0, -1)
elif c.q == 1:
if c.p == 0:
p2 = p1 + Point(1, 0)
elif c.p == 1:
p2 = p1 + Point(-1, 0)
if p2 is None:
c *= S.Pi
else:
c = angle % (2*S.Pi)
if not p2:
m = 2*c/S.Pi
left = And(1 < m, m < 3) # is it in quadrant 2 or 3?
x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True))
y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), (tan(c), True)), True))
p2 = p1 + Point(x, y)
else:
raise ValueError('A 2nd point or keyword "angle" must be used.')
return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
@property
def xdirection(self):
"""The x direction of the ray.
Positive infinity if the ray points in the positive x direction,
negative infinity if the ray points in the negative x direction,
or 0 if the ray is vertical.
See Also
========
ydirection
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, -1)
>>> r1, r2 = Ray(p1, p2), Ray(p1, p3)
>>> r1.xdirection
oo
>>> r2.xdirection
0
"""
if self.p1.x < self.p2.x:
return S.Infinity
elif self.p1.x == self.p2.x:
return S.Zero
else:
return S.NegativeInfinity
@property
def ydirection(self):
"""The y direction of the ray.
Positive infinity if the ray points in the positive y direction,
negative infinity if the ray points in the negative y direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2, p3 = Point(0, 0), Point(-1, -1), Point(-1, 0)
>>> r1, r2 = Ray(p1, p2), Ray(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
"""
if self.p1.y < self.p2.y:
return S.Infinity
elif self.p1.y == self.p2.y:
return S.Zero
else:
return S.NegativeInfinity
def closing_angle(r1, r2):
"""Return the angle by which r2 must be rotated so it faces the same
direction as r1.
Parameters
==========
r1 : Ray2D
r2 : Ray2D
Returns
=======
angle : angle in radians (ccw angle is positive)
See Also
========
LinearEntity.angle_between
Examples
========
>>> from sympy import Ray, pi
>>> r1 = Ray((0, 0), (1, 0))
>>> r2 = r1.rotate(-pi/2)
>>> angle = r1.closing_angle(r2); angle
pi/2
>>> r2.rotate(angle).direction.unit == r1.direction.unit
True
>>> r2.closing_angle(r1)
-pi/2
"""
if not all(isinstance(r, Ray2D) for r in (r1, r2)):
# although the direction property is defined for
# all linear entities, only the Ray is truly a
# directed object
raise TypeError('Both arguments must be Ray2D objects.')
a1 = atan2(*list(reversed(r1.direction.args)))
a2 = atan2(*list(reversed(r2.direction.args)))
if a1*a2 < 0:
a1 = 2*S.Pi + a1 if a1 < 0 else a1
a2 = 2*S.Pi + a2 if a2 < 0 else a2
return a1 - a2
class Segment2D(LinearEntity2D, Segment):
"""A line segment in 2D space.
Parameters
==========
p1 : Point
p2 : Point
Attributes
==========
length : number or SymPy expression
midpoint : Point
See Also
========
sympy.geometry.point.Point, Line
Examples
========
>>> from sympy import Point
>>> from sympy.geometry import Segment
>>> Segment((1, 0), (1, 1)) # tuples are interpreted as pts
Segment2D(Point2D(1, 0), Point2D(1, 1))
>>> s = Segment(Point(4, 3), Point(1, 1)); s
Segment2D(Point2D(4, 3), Point2D(1, 1))
>>> s.points
(Point2D(4, 3), Point2D(1, 1))
>>> s.slope
2/3
>>> s.length
sqrt(13)
>>> s.midpoint
Point2D(5/2, 2)
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point(p1, dim=2)
p2 = Point(p2, dim=2)
if p1 == p2:
return p1
return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the LinearEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = (N(self.p1), N(self.p2))
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {}".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" />'
).format(2.*scale_factor, path, fill_color)
class LinearEntity3D(LinearEntity):
"""An base class for all linear entities (line, ray and segment)
in a 3-dimensional Euclidean space.
Attributes
==========
p1
p2
direction_ratio
direction_cosine
points
Notes
=====
This is a base class and is not meant to be instantiated.
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point3D(p1, dim=3)
p2 = Point3D(p2, dim=3)
if p1 == p2:
# if it makes sense to return a Point, handle in subclass
raise ValueError(
"%s.__new__ requires two unique Points." % cls.__name__)
return GeometryEntity.__new__(cls, p1, p2, **kwargs)
ambient_dimension = 3
@property
def direction_ratio(self):
"""The direction ratio of a given line in 3D.
See Also
========
sympy.geometry.line.Line3D.equation
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.direction_ratio
[5, 3, 1]
"""
p1, p2 = self.points
return p1.direction_ratio(p2)
@property
def direction_cosine(self):
"""The normalized direction ratio of a given line in 3D.
See Also
========
sympy.geometry.line.Line3D.equation
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.direction_cosine
[sqrt(35)/7, 3*sqrt(35)/35, sqrt(35)/35]
>>> sum(i**2 for i in _)
1
"""
p1, p2 = self.points
return p1.direction_cosine(p2)
class Line3D(LinearEntity3D, Line):
"""An infinite 3D line in space.
A line is declared with two distinct points or a point and direction_ratio
as defined using keyword `direction_ratio`.
Parameters
==========
p1 : Point3D
pt : Point3D
direction_ratio : list
See Also
========
sympy.geometry.point.Point3D
sympy.geometry.line.Line
sympy.geometry.line.Line2D
Examples
========
>>> from sympy import Point3D
>>> from sympy.geometry import Line3D
>>> L = Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L
Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L.points
(Point3D(2, 3, 4), Point3D(3, 5, 1))
"""
def __new__(cls, p1, pt=None, direction_ratio=(), **kwargs):
if isinstance(p1, LinearEntity3D):
if pt is not None:
raise ValueError('if p1 is a LinearEntity, pt must be None.')
p1, pt = p1.args
else:
p1 = Point(p1, dim=3)
if pt is not None and len(direction_ratio) == 0:
pt = Point(pt, dim=3)
elif len(direction_ratio) == 3 and pt is None:
pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1],
p1.z + direction_ratio[2])
else:
raise ValueError('A 2nd Point or keyword "direction_ratio" must '
'be used.')
return LinearEntity3D.__new__(cls, p1, pt, **kwargs)
def equation(self, x='x', y='y', z='z', k=None):
"""Return the equations that define the line in 3D.
Parameters
==========
x : str, optional
The name to use for the x-axis, default value is 'x'.
y : str, optional
The name to use for the y-axis, default value is 'y'.
z : str, optional
The name to use for the z-axis, default value is 'z'.
Returns
=======
equation : Tuple of simultaneous equations
Examples
========
>>> from sympy import Point3D, Line3D, solve
>>> from sympy.abc import x, y, z
>>> p1, p2 = Point3D(1, 0, 0), Point3D(5, 3, 0)
>>> l1 = Line3D(p1, p2)
>>> eq = l1.equation(x, y, z); eq
(-3*x + 4*y + 3, z)
>>> solve(eq.subs(z, 0), (x, y, z))
{x: 4*y/3 + 1}
"""
if k is not None:
SymPyDeprecationWarning(
feature="equation() no longer needs 'k'",
issue=13742,
deprecated_since_version="1.2").warn()
from sympy.solvers.solvers import solve
x, y, z, k = [_symbol(i, real=True) for i in (x, y, z, 'k')]
p1, p2 = self.points
d1, d2, d3 = p1.direction_ratio(p2)
x1, y1, z1 = p1
eqs = [-d1*k + x - x1, -d2*k + y - y1, -d3*k + z - z1]
# eliminate k from equations by solving first eq with k for k
for i, e in enumerate(eqs):
if e.has(k):
kk = solve(eqs[i], k)[0]
eqs.pop(i)
break
return Tuple(*[i.subs(k, kk).as_numer_denom()[0] for i in eqs])
class Ray3D(LinearEntity3D, Ray):
"""
A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point3D
The source of the Ray
p2 : Point or a direction vector
direction_ratio: Determines the direction in which the Ray propagates.
Attributes
==========
source
xdirection
ydirection
zdirection
See Also
========
sympy.geometry.point.Point3D, Line3D
Examples
========
>>> from sympy import Point3D
>>> from sympy.geometry import Ray3D
>>> r = Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r
Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r.points
(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r.source
Point3D(2, 3, 4)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.direction_ratio
[1, 2, -4]
"""
def __new__(cls, p1, pt=None, direction_ratio=(), **kwargs):
if isinstance(p1, LinearEntity3D):
if pt is not None:
raise ValueError('If p1 is a LinearEntity, pt must be None')
p1, pt = p1.args
else:
p1 = Point(p1, dim=3)
if pt is not None and len(direction_ratio) == 0:
pt = Point(pt, dim=3)
elif len(direction_ratio) == 3 and pt is None:
pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1],
p1.z + direction_ratio[2])
else:
raise ValueError(filldedent('''
A 2nd Point or keyword "direction_ratio" must be used.
'''))
return LinearEntity3D.__new__(cls, p1, pt, **kwargs)
@property
def xdirection(self):
"""The x direction of the ray.
Positive infinity if the ray points in the positive x direction,
negative infinity if the ray points in the negative x direction,
or 0 if the ray is vertical.
See Also
========
ydirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, -1, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.xdirection
oo
>>> r2.xdirection
0
"""
if self.p1.x < self.p2.x:
return S.Infinity
elif self.p1.x == self.p2.x:
return S.Zero
else:
return S.NegativeInfinity
@property
def ydirection(self):
"""The y direction of the ray.
Positive infinity if the ray points in the positive y direction,
negative infinity if the ray points in the negative y direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
"""
if self.p1.y < self.p2.y:
return S.Infinity
elif self.p1.y == self.p2.y:
return S.Zero
else:
return S.NegativeInfinity
@property
def zdirection(self):
"""The z direction of the ray.
Positive infinity if the ray points in the positive z direction,
negative infinity if the ray points in the negative z direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
>>> r2.zdirection
0
"""
if self.p1.z < self.p2.z:
return S.Infinity
elif self.p1.z == self.p2.z:
return S.Zero
else:
return S.NegativeInfinity
class Segment3D(LinearEntity3D, Segment):
"""A line segment in a 3D space.
Parameters
==========
p1 : Point3D
p2 : Point3D
Attributes
==========
length : number or SymPy expression
midpoint : Point3D
See Also
========
sympy.geometry.point.Point3D, Line3D
Examples
========
>>> from sympy import Point3D
>>> from sympy.geometry import Segment3D
>>> Segment3D((1, 0, 0), (1, 1, 1)) # tuples are interpreted as pts
Segment3D(Point3D(1, 0, 0), Point3D(1, 1, 1))
>>> s = Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7)); s
Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s.points
(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s.length
sqrt(17)
>>> s.midpoint
Point3D(5/2, 2, 8)
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point(p1, dim=3)
p2 = Point(p2, dim=3)
if p1 == p2:
return p1
return LinearEntity3D.__new__(cls, p1, p2, **kwargs)
|
ef02e38f8b463639aa92e338f62f6b1dfaae6da6a4111a89ea634b3b035f723f | from sympy.core import Expr, S, oo, pi, sympify
from sympy.core.evalf import N
from sympy.core.sorting import default_sort_key, ordered
from sympy.core.symbol import _symbol, Dummy, symbols, Symbol
from sympy.functions.elementary.complexes import sign
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import cos, sin, tan
from .ellipse import Circle
from .entity import GeometryEntity, GeometrySet
from .exceptions import GeometryError
from .line import Line, Segment, Ray
from .point import Point
from sympy.logic import And
from sympy.matrices import Matrix
from sympy.simplify.simplify import simplify
from sympy.solvers.solvers import solve
from sympy.utilities.iterables import has_dups, has_variety, uniq, rotate_left, least_rotation
from sympy.utilities.misc import as_int, func_name
from mpmath.libmp.libmpf import prec_to_dps
import warnings
class Polygon(GeometrySet):
"""A two-dimensional polygon.
A simple polygon in space. Can be constructed from a sequence of points
or from a center, radius, number of sides and rotation angle.
Parameters
==========
vertices : sequence of Points
Optional parameters
==========
n : If > 0, an n-sided RegularPolygon is created. See below.
Default value is 0.
Attributes
==========
area
angles
perimeter
vertices
centroid
sides
Raises
======
GeometryError
If all parameters are not Points.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment, Triangle
Notes
=====
Polygons are treated as closed paths rather than 2D areas so
some calculations can be be negative or positive (e.g., area)
based on the orientation of the points.
Any consecutive identical points are reduced to a single point
and any points collinear and between two points will be removed
unless they are needed to define an explicit intersection (see examples).
A Triangle, Segment or Point will be returned when there are 3 or
fewer points provided.
Examples
========
>>> from sympy import Polygon, pi
>>> p1, p2, p3, p4, p5 = [(0, 0), (1, 0), (5, 1), (0, 1), (3, 0)]
>>> Polygon(p1, p2, p3, p4)
Polygon(Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1))
>>> Polygon(p1, p2)
Segment2D(Point2D(0, 0), Point2D(1, 0))
>>> Polygon(p1, p2, p5)
Segment2D(Point2D(0, 0), Point2D(3, 0))
The area of a polygon is calculated as positive when vertices are
traversed in a ccw direction. When the sides of a polygon cross the
area will have positive and negative contributions. The following
defines a Z shape where the bottom right connects back to the top
left.
>>> Polygon((0, 2), (2, 2), (0, 0), (2, 0)).area
0
When the keyword `n` is used to define the number of sides of the
Polygon then a RegularPolygon is created and the other arguments are
interpreted as center, radius and rotation. The unrotated RegularPolygon
will always have a vertex at Point(r, 0) where `r` is the radius of the
circle that circumscribes the RegularPolygon. Its method `spin` can be
used to increment that angle.
>>> p = Polygon((0,0), 1, n=3)
>>> p
RegularPolygon(Point2D(0, 0), 1, 3, 0)
>>> p.vertices[0]
Point2D(1, 0)
>>> p.args[0]
Point2D(0, 0)
>>> p.spin(pi/2)
>>> p.vertices[0]
Point2D(0, 1)
"""
def __new__(cls, *args, n = 0, **kwargs):
if n:
args = list(args)
# return a virtual polygon with n sides
if len(args) == 2: # center, radius
args.append(n)
elif len(args) == 3: # center, radius, rotation
args.insert(2, n)
return RegularPolygon(*args, **kwargs)
vertices = [Point(a, dim=2, **kwargs) for a in args]
# remove consecutive duplicates
nodup = []
for p in vertices:
if nodup and p == nodup[-1]:
continue
nodup.append(p)
if len(nodup) > 1 and nodup[-1] == nodup[0]:
nodup.pop() # last point was same as first
# remove collinear points
i = -3
while i < len(nodup) - 3 and len(nodup) > 2:
a, b, c = nodup[i], nodup[i + 1], nodup[i + 2]
if Point.is_collinear(a, b, c):
nodup.pop(i + 1)
if a == c:
nodup.pop(i)
else:
i += 1
vertices = list(nodup)
if len(vertices) > 3:
return GeometryEntity.__new__(cls, *vertices, **kwargs)
elif len(vertices) == 3:
return Triangle(*vertices, **kwargs)
elif len(vertices) == 2:
return Segment(*vertices, **kwargs)
else:
return Point(*vertices, **kwargs)
@property
def area(self):
"""
The area of the polygon.
Notes
=====
The area calculation can be positive or negative based on the
orientation of the points. If any side of the polygon crosses
any other side, there will be areas having opposite signs.
See Also
========
sympy.geometry.ellipse.Ellipse.area
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.area
3
In the Z shaped polygon (with the lower right connecting back
to the upper left) the areas cancel out:
>>> Z = Polygon((0, 1), (1, 1), (0, 0), (1, 0))
>>> Z.area
0
In the M shaped polygon, areas do not cancel because no side
crosses any other (though there is a point of contact).
>>> M = Polygon((0, 0), (0, 1), (2, 0), (3, 1), (3, 0))
>>> M.area
-3/2
"""
area = 0
args = self.args
for i in range(len(args)):
x1, y1 = args[i - 1].args
x2, y2 = args[i].args
area += x1*y2 - x2*y1
return simplify(area) / 2
@staticmethod
def _isright(a, b, c):
"""Return True/False for cw/ccw orientation.
Examples
========
>>> from sympy import Point, Polygon
>>> a, b, c = [Point(i) for i in [(0, 0), (1, 1), (1, 0)]]
>>> Polygon._isright(a, b, c)
True
>>> Polygon._isright(a, c, b)
False
"""
ba = b - a
ca = c - a
t_area = simplify(ba.x*ca.y - ca.x*ba.y)
res = t_area.is_nonpositive
if res is None:
raise ValueError("Can't determine orientation")
return res
@property
def angles(self):
"""The internal angle at each vertex.
Returns
=======
angles : dict
A dictionary where each key is a vertex and each value is the
internal angle at that vertex. The vertices are represented as
Points.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.angles[p1]
pi/2
>>> poly.angles[p2]
acos(-4*sqrt(17)/17)
"""
# Determine orientation of points
args = self.vertices
cw = self._isright(args[-1], args[0], args[1])
ret = {}
for i in range(len(args)):
a, b, c = args[i - 2], args[i - 1], args[i]
ang = Ray(b, a).angle_between(Ray(b, c))
if cw ^ self._isright(a, b, c):
ret[b] = 2*S.Pi - ang
else:
ret[b] = ang
return ret
@property
def ambient_dimension(self):
return self.vertices[0].ambient_dimension
@property
def perimeter(self):
"""The perimeter of the polygon.
Returns
=======
perimeter : number or Basic instance
See Also
========
sympy.geometry.line.Segment.length
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.perimeter
sqrt(17) + 7
"""
p = 0
args = self.vertices
for i in range(len(args)):
p += args[i - 1].distance(args[i])
return simplify(p)
@property
def vertices(self):
"""The vertices of the polygon.
Returns
=======
vertices : list of Points
Notes
=====
When iterating over the vertices, it is more efficient to index self
rather than to request the vertices and index them. Only use the
vertices when you want to process all of them at once. This is even
more important with RegularPolygons that calculate each vertex.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.vertices
[Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1)]
>>> poly.vertices[0]
Point2D(0, 0)
"""
return list(self.args)
@property
def centroid(self):
"""The centroid of the polygon.
Returns
=======
centroid : Point
See Also
========
sympy.geometry.point.Point, sympy.geometry.util.centroid
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.centroid
Point2D(31/18, 11/18)
"""
A = 1/(6*self.area)
cx, cy = 0, 0
args = self.args
for i in range(len(args)):
x1, y1 = args[i - 1].args
x2, y2 = args[i].args
v = x1*y2 - x2*y1
cx += v*(x1 + x2)
cy += v*(y1 + y2)
return Point(simplify(A*cx), simplify(A*cy))
def second_moment_of_area(self, point=None):
"""Returns the second moment and product moment of area of a two dimensional polygon.
Parameters
==========
point : Point, two-tuple of sympifyable objects, or None(default=None)
point is the point about which second moment of area is to be found.
If "point=None" it will be calculated about the axis passing through the
centroid of the polygon.
Returns
=======
I_xx, I_yy, I_xy : number or SymPy expression
I_xx, I_yy are second moment of area of a two dimensional polygon.
I_xy is product moment of area of a two dimensional polygon.
Examples
========
>>> from sympy import Polygon, symbols
>>> a, b = symbols('a, b')
>>> p1, p2, p3, p4, p5 = [(0, 0), (a, 0), (a, b), (0, b), (a/3, b/3)]
>>> rectangle = Polygon(p1, p2, p3, p4)
>>> rectangle.second_moment_of_area()
(a*b**3/12, a**3*b/12, 0)
>>> rectangle.second_moment_of_area(p5)
(a*b**3/9, a**3*b/9, a**2*b**2/36)
References
==========
.. [1] https://en.wikipedia.org/wiki/Second_moment_of_area
"""
I_xx, I_yy, I_xy = 0, 0, 0
args = self.vertices
for i in range(len(args)):
x1, y1 = args[i-1].args
x2, y2 = args[i].args
v = x1*y2 - x2*y1
I_xx += (y1**2 + y1*y2 + y2**2)*v
I_yy += (x1**2 + x1*x2 + x2**2)*v
I_xy += (x1*y2 + 2*x1*y1 + 2*x2*y2 + x2*y1)*v
A = self.area
c_x = self.centroid[0]
c_y = self.centroid[1]
# parallel axis theorem
I_xx_c = (I_xx/12) - (A*(c_y**2))
I_yy_c = (I_yy/12) - (A*(c_x**2))
I_xy_c = (I_xy/24) - (A*(c_x*c_y))
if point is None:
return I_xx_c, I_yy_c, I_xy_c
I_xx = (I_xx_c + A*((point[1]-c_y)**2))
I_yy = (I_yy_c + A*((point[0]-c_x)**2))
I_xy = (I_xy_c + A*((point[0]-c_x)*(point[1]-c_y)))
return I_xx, I_yy, I_xy
def first_moment_of_area(self, point=None):
"""
Returns the first moment of area of a two-dimensional polygon with
respect to a certain point of interest.
First moment of area is a measure of the distribution of the area
of a polygon in relation to an axis. The first moment of area of
the entire polygon about its own centroid is always zero. Therefore,
here it is calculated for an area, above or below a certain point
of interest, that makes up a smaller portion of the polygon. This
area is bounded by the point of interest and the extreme end
(top or bottom) of the polygon. The first moment for this area is
is then determined about the centroidal axis of the initial polygon.
References
==========
.. [1] https://skyciv.com/docs/tutorials/section-tutorials/calculating-the-statical-or-first-moment-of-area-of-beam-sections/?cc=BMD
.. [2] https://mechanicalc.com/reference/cross-sections
Parameters
==========
point: Point, two-tuple of sympifyable objects, or None (default=None)
point is the point above or below which the area of interest lies
If ``point=None`` then the centroid acts as the point of interest.
Returns
=======
Q_x, Q_y: number or SymPy expressions
Q_x is the first moment of area about the x-axis
Q_y is the first moment of area about the y-axis
A negative sign indicates that the section modulus is
determined for a section below (or left of) the centroidal axis
Examples
========
>>> from sympy import Point, Polygon
>>> a, b = 50, 10
>>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)]
>>> p = Polygon(p1, p2, p3, p4)
>>> p.first_moment_of_area()
(625, 3125)
>>> p.first_moment_of_area(point=Point(30, 7))
(525, 3000)
"""
if point:
xc, yc = self.centroid
else:
point = self.centroid
xc, yc = point
h_line = Line(point, slope=0)
v_line = Line(point, slope=S.Infinity)
h_poly = self.cut_section(h_line)
v_poly = self.cut_section(v_line)
poly_1 = h_poly[0] if h_poly[0].area <= h_poly[1].area else h_poly[1]
poly_2 = v_poly[0] if v_poly[0].area <= v_poly[1].area else v_poly[1]
Q_x = (poly_1.centroid.y - yc)*poly_1.area
Q_y = (poly_2.centroid.x - xc)*poly_2.area
return Q_x, Q_y
def polar_second_moment_of_area(self):
"""Returns the polar modulus of a two-dimensional polygon
It is a constituent of the second moment of area, linked through
the perpendicular axis theorem. While the planar second moment of
area describes an object's resistance to deflection (bending) when
subjected to a force applied to a plane parallel to the central
axis, the polar second moment of area describes an object's
resistance to deflection when subjected to a moment applied in a
plane perpendicular to the object's central axis (i.e. parallel to
the cross-section)
Examples
========
>>> from sympy import Polygon, symbols
>>> a, b = symbols('a, b')
>>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b))
>>> rectangle.polar_second_moment_of_area()
a**3*b/12 + a*b**3/12
References
==========
.. [1] https://en.wikipedia.org/wiki/Polar_moment_of_inertia
"""
second_moment = self.second_moment_of_area()
return second_moment[0] + second_moment[1]
def section_modulus(self, point=None):
"""Returns a tuple with the section modulus of a two-dimensional
polygon.
Section modulus is a geometric property of a polygon defined as the
ratio of second moment of area to the distance of the extreme end of
the polygon from the centroidal axis.
Parameters
==========
point : Point, two-tuple of sympifyable objects, or None(default=None)
point is the point at which section modulus is to be found.
If "point=None" it will be calculated for the point farthest from the
centroidal axis of the polygon.
Returns
=======
S_x, S_y: numbers or SymPy expressions
S_x is the section modulus with respect to the x-axis
S_y is the section modulus with respect to the y-axis
A negative sign indicates that the section modulus is
determined for a point below the centroidal axis
Examples
========
>>> from sympy import symbols, Polygon, Point
>>> a, b = symbols('a, b', positive=True)
>>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b))
>>> rectangle.section_modulus()
(a*b**2/6, a**2*b/6)
>>> rectangle.section_modulus(Point(a/4, b/4))
(-a*b**2/3, -a**2*b/3)
References
==========
.. [1] https://en.wikipedia.org/wiki/Section_modulus
"""
x_c, y_c = self.centroid
if point is None:
# taking x and y as maximum distances from centroid
x_min, y_min, x_max, y_max = self.bounds
y = max(y_c - y_min, y_max - y_c)
x = max(x_c - x_min, x_max - x_c)
else:
# taking x and y as distances of the given point from the centroid
y = point.y - y_c
x = point.x - x_c
second_moment= self.second_moment_of_area()
S_x = second_moment[0]/y
S_y = second_moment[1]/x
return S_x, S_y
@property
def sides(self):
"""The directed line segments that form the sides of the polygon.
Returns
=======
sides : list of sides
Each side is a directed Segment.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.sides
[Segment2D(Point2D(0, 0), Point2D(1, 0)),
Segment2D(Point2D(1, 0), Point2D(5, 1)),
Segment2D(Point2D(5, 1), Point2D(0, 1)), Segment2D(Point2D(0, 1), Point2D(0, 0))]
"""
res = []
args = self.vertices
for i in range(-len(args), 0):
res.append(Segment(args[i], args[i + 1]))
return res
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
verts = self.vertices
xs = [p.x for p in verts]
ys = [p.y for p in verts]
return (min(xs), min(ys), max(xs), max(ys))
def is_convex(self):
"""Is the polygon convex?
A polygon is convex if all its interior angles are less than 180
degrees and there are no intersections between sides.
Returns
=======
is_convex : boolean
True if this polygon is convex, False otherwise.
See Also
========
sympy.geometry.util.convex_hull
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.is_convex()
True
"""
# Determine orientation of points
args = self.vertices
cw = self._isright(args[-2], args[-1], args[0])
for i in range(1, len(args)):
if cw ^ self._isright(args[i - 2], args[i - 1], args[i]):
return False
# check for intersecting sides
sides = self.sides
for i, si in enumerate(sides):
pts = si.args
# exclude the sides connected to si
for j in range(1 if i == len(sides) - 1 else 0, i - 1):
sj = sides[j]
if sj.p1 not in pts and sj.p2 not in pts:
hit = si.intersection(sj)
if hit:
return False
return True
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
=====
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.encloses_point
Examples
========
>>> from sympy import Polygon, Point
>>> p = Polygon((0, 0), (4, 0), (4, 4))
>>> p.encloses_point(Point(2, 1))
True
>>> p.encloses_point(Point(2, 2))
False
>>> p.encloses_point(Point(5, 5))
False
References
==========
.. [1] http://paulbourke.net/geometry/polygonmesh/#insidepoly
"""
p = Point(p, dim=2)
if p in self.vertices or any(p in s for s in self.sides):
return False
# move to p, checking that the result is numeric
lit = []
for v in self.vertices:
lit.append(v - p) # the difference is simplified
if lit[-1].free_symbols:
return None
poly = Polygon(*lit)
# polygon closure is assumed in the following test but Polygon removes duplicate pts so
# the last point has to be added so all sides are computed. Using Polygon.sides is
# not good since Segments are unordered.
args = poly.args
indices = list(range(-len(args), 1))
if poly.is_convex():
orientation = None
for i in indices:
a = args[i]
b = args[i + 1]
test = ((-a.y)*(b.x - a.x) - (-a.x)*(b.y - a.y)).is_negative
if orientation is None:
orientation = test
elif test is not orientation:
return False
return True
hit_odd = False
p1x, p1y = args[0].args
for i in indices[1:]:
p2x, p2y = args[i].args
if 0 > min(p1y, p2y):
if 0 <= max(p1y, p2y):
if 0 <= max(p1x, p2x):
if p1y != p2y:
xinters = (-p1y)*(p2x - p1x)/(p2y - p1y) + p1x
if p1x == p2x or 0 <= xinters:
hit_odd = not hit_odd
p1x, p1y = p2x, p2y
return hit_odd
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the polygon.
The parameter, varying from 0 to 1, assigns points to the position on
the perimeter that is that fraction of the total perimeter. So the
point evaluated at t=1/2 would return the point from the first vertex
that is 1/2 way around the polygon.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the Polygon's definition.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Polygon, Symbol
>>> t = Symbol('t', real=True)
>>> tri = Polygon((0, 0), (1, 0), (1, 1))
>>> p = tri.arbitrary_point('t')
>>> perimeter = tri.perimeter
>>> s1, s2 = [s.length for s in tri.sides[:2]]
>>> p.subs(t, (s1 + s2/2)/perimeter)
Point2D(1, 1/2)
"""
t = _symbol(parameter, real=True)
if t.name in (f.name for f in self.free_symbols):
raise ValueError('Symbol %s already appears in object and cannot be used as a parameter.' % t.name)
sides = []
perimeter = self.perimeter
perim_fraction_start = 0
for s in self.sides:
side_perim_fraction = s.length/perimeter
perim_fraction_end = perim_fraction_start + side_perim_fraction
pt = s.arbitrary_point(parameter).subs(
t, (t - perim_fraction_start)/side_perim_fraction)
sides.append(
(pt, (And(perim_fraction_start <= t, t < perim_fraction_end))))
perim_fraction_start = perim_fraction_end
return Piecewise(*sides)
def parameter_value(self, other, t):
if not isinstance(other,GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other,Point):
raise ValueError("other must be a point")
if other.free_symbols:
raise NotImplementedError('non-numeric coordinates')
unknown = False
T = Dummy('t', real=True)
p = self.arbitrary_point(T)
for pt, cond in p.args:
sol = solve(pt - other, T, dict=True)
if not sol:
continue
value = sol[0][T]
if simplify(cond.subs(T, value)) == True:
return {t: value}
unknown = True
if unknown:
raise ValueError("Given point may not be on %s" % func_name(self))
raise ValueError("Given point is not on %s" % func_name(self))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the polygon.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Polygon
>>> p = Polygon((0, 0), (1, 0), (1, 1))
>>> p.plot_interval()
[t, 0, 1]
"""
t = Symbol(parameter, real=True)
return [t, 0, 1]
def intersection(self, o):
"""The intersection of polygon and geometry entity.
The intersection may be empty and can contain individual Points and
complete Line Segments.
Parameters
==========
other: GeometryEntity
Returns
=======
intersection : list
The list of Segments and Points
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy import Point, Polygon, Line
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly1 = Polygon(p1, p2, p3, p4)
>>> p5, p6, p7 = map(Point, [(3, 2), (1, -1), (0, 2)])
>>> poly2 = Polygon(p5, p6, p7)
>>> poly1.intersection(poly2)
[Point2D(1/3, 1), Point2D(2/3, 0), Point2D(9/5, 1/5), Point2D(7/3, 1)]
>>> poly1.intersection(Line(p1, p2))
[Segment2D(Point2D(0, 0), Point2D(1, 0))]
>>> poly1.intersection(p1)
[Point2D(0, 0)]
"""
intersection_result = []
k = o.sides if isinstance(o, Polygon) else [o]
for side in self.sides:
for side1 in k:
intersection_result.extend(side.intersection(side1))
intersection_result = list(uniq(intersection_result))
points = [entity for entity in intersection_result if isinstance(entity, Point)]
segments = [entity for entity in intersection_result if isinstance(entity, Segment)]
if points and segments:
points_in_segments = list(uniq([point for point in points for segment in segments if point in segment]))
if points_in_segments:
for i in points_in_segments:
points.remove(i)
return list(ordered(segments + points))
else:
return list(ordered(intersection_result))
def cut_section(self, line):
"""
Returns a tuple of two polygon segments that lie above and below
the intersecting line respectively.
Parameters
==========
line: Line object of geometry module
line which cuts the Polygon. The part of the Polygon that lies
above and below this line is returned.
Returns
=======
upper_polygon, lower_polygon: Polygon objects or None
upper_polygon is the polygon that lies above the given line.
lower_polygon is the polygon that lies below the given line.
upper_polygon and lower polygon are ``None`` when no polygon
exists above the line or below the line.
Raises
======
ValueError: When the line does not intersect the polygon
Examples
========
>>> from sympy import Polygon, Line
>>> a, b = 20, 10
>>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)]
>>> rectangle = Polygon(p1, p2, p3, p4)
>>> t = rectangle.cut_section(Line((0, 5), slope=0))
>>> t
(Polygon(Point2D(0, 10), Point2D(0, 5), Point2D(20, 5), Point2D(20, 10)),
Polygon(Point2D(0, 5), Point2D(0, 0), Point2D(20, 0), Point2D(20, 5)))
>>> upper_segment, lower_segment = t
>>> upper_segment.area
100
>>> upper_segment.centroid
Point2D(10, 15/2)
>>> lower_segment.centroid
Point2D(10, 5/2)
References
==========
.. [1] https://github.com/sympy/sympy/wiki/A-method-to-return-a-cut-section-of-any-polygon-geometry
"""
intersection_points = self.intersection(line)
if not intersection_points:
raise ValueError("This line does not intersect the polygon")
points = list(self.vertices)
points.append(points[0])
x, y = symbols('x, y', real=True, cls=Dummy)
eq = line.equation(x, y)
# considering equation of line to be `ax +by + c`
a = eq.coeff(x)
b = eq.coeff(y)
upper_vertices = []
lower_vertices = []
# prev is true when previous point is above the line
prev = True
prev_point = None
for point in points:
# when coefficient of y is 0, right side of the line is
# considered
compare = eq.subs({x: point.x, y: point.y})/b if b \
else eq.subs(x, point.x)/a
# if point lies above line
if compare > 0:
if not prev:
# if previous point lies below the line, the intersection
# point of the polygon egde and the line has to be included
edge = Line(point, prev_point)
new_point = edge.intersection(line)
upper_vertices.append(new_point[0])
lower_vertices.append(new_point[0])
upper_vertices.append(point)
prev = True
else:
if prev and prev_point:
edge = Line(point, prev_point)
new_point = edge.intersection(line)
upper_vertices.append(new_point[0])
lower_vertices.append(new_point[0])
lower_vertices.append(point)
prev = False
prev_point = point
upper_polygon, lower_polygon = None, None
if upper_vertices and isinstance(Polygon(*upper_vertices), Polygon):
upper_polygon = Polygon(*upper_vertices)
if lower_vertices and isinstance(Polygon(*lower_vertices), Polygon):
lower_polygon = Polygon(*lower_vertices)
return upper_polygon, lower_polygon
def distance(self, o):
"""
Returns the shortest distance between self and o.
If o is a point, then self does not need to be convex.
If o is another polygon self and o must be convex.
Examples
========
>>> from sympy import Point, Polygon, RegularPolygon
>>> p1, p2 = map(Point, [(0, 0), (7, 5)])
>>> poly = Polygon(*RegularPolygon(p1, 1, 3).vertices)
>>> poly.distance(p2)
sqrt(61)
"""
if isinstance(o, Point):
dist = oo
for side in self.sides:
current = side.distance(o)
if current == 0:
return S.Zero
elif current < dist:
dist = current
return dist
elif isinstance(o, Polygon) and self.is_convex() and o.is_convex():
return self._do_poly_distance(o)
raise NotImplementedError()
def _do_poly_distance(self, e2):
"""
Calculates the least distance between the exteriors of two
convex polygons e1 and e2. Does not check for the convexity
of the polygons as this is checked by Polygon.distance.
Notes
=====
- Prints a warning if the two polygons possibly intersect as the return
value will not be valid in such a case. For a more through test of
intersection use intersection().
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy.geometry import Point, Polygon
>>> square = Polygon(Point(0, 0), Point(0, 1), Point(1, 1), Point(1, 0))
>>> triangle = Polygon(Point(1, 2), Point(2, 2), Point(2, 1))
>>> square._do_poly_distance(triangle)
sqrt(2)/2
Description of method used
==========================
Method:
[1] http://cgm.cs.mcgill.ca/~orm/mind2p.html
Uses rotating calipers:
[2] https://en.wikipedia.org/wiki/Rotating_calipers
and antipodal points:
[3] https://en.wikipedia.org/wiki/Antipodal_point
"""
e1 = self
'''Tests for a possible intersection between the polygons and outputs a warning'''
e1_center = e1.centroid
e2_center = e2.centroid
e1_max_radius = S.Zero
e2_max_radius = S.Zero
for vertex in e1.vertices:
r = Point.distance(e1_center, vertex)
if e1_max_radius < r:
e1_max_radius = r
for vertex in e2.vertices:
r = Point.distance(e2_center, vertex)
if e2_max_radius < r:
e2_max_radius = r
center_dist = Point.distance(e1_center, e2_center)
if center_dist <= e1_max_radius + e2_max_radius:
warnings.warn("Polygons may intersect producing erroneous output")
'''
Find the upper rightmost vertex of e1 and the lowest leftmost vertex of e2
'''
e1_ymax = Point(0, -oo)
e2_ymin = Point(0, oo)
for vertex in e1.vertices:
if vertex.y > e1_ymax.y or (vertex.y == e1_ymax.y and vertex.x > e1_ymax.x):
e1_ymax = vertex
for vertex in e2.vertices:
if vertex.y < e2_ymin.y or (vertex.y == e2_ymin.y and vertex.x < e2_ymin.x):
e2_ymin = vertex
min_dist = Point.distance(e1_ymax, e2_ymin)
'''
Produce a dictionary with vertices of e1 as the keys and, for each vertex, the points
to which the vertex is connected as its value. The same is then done for e2.
'''
e1_connections = {}
e2_connections = {}
for side in e1.sides:
if side.p1 in e1_connections:
e1_connections[side.p1].append(side.p2)
else:
e1_connections[side.p1] = [side.p2]
if side.p2 in e1_connections:
e1_connections[side.p2].append(side.p1)
else:
e1_connections[side.p2] = [side.p1]
for side in e2.sides:
if side.p1 in e2_connections:
e2_connections[side.p1].append(side.p2)
else:
e2_connections[side.p1] = [side.p2]
if side.p2 in e2_connections:
e2_connections[side.p2].append(side.p1)
else:
e2_connections[side.p2] = [side.p1]
e1_current = e1_ymax
e2_current = e2_ymin
support_line = Line(Point(S.Zero, S.Zero), Point(S.One, S.Zero))
'''
Determine which point in e1 and e2 will be selected after e2_ymin and e1_ymax,
this information combined with the above produced dictionaries determines the
path that will be taken around the polygons
'''
point1 = e1_connections[e1_ymax][0]
point2 = e1_connections[e1_ymax][1]
angle1 = support_line.angle_between(Line(e1_ymax, point1))
angle2 = support_line.angle_between(Line(e1_ymax, point2))
if angle1 < angle2:
e1_next = point1
elif angle2 < angle1:
e1_next = point2
elif Point.distance(e1_ymax, point1) > Point.distance(e1_ymax, point2):
e1_next = point2
else:
e1_next = point1
point1 = e2_connections[e2_ymin][0]
point2 = e2_connections[e2_ymin][1]
angle1 = support_line.angle_between(Line(e2_ymin, point1))
angle2 = support_line.angle_between(Line(e2_ymin, point2))
if angle1 > angle2:
e2_next = point1
elif angle2 > angle1:
e2_next = point2
elif Point.distance(e2_ymin, point1) > Point.distance(e2_ymin, point2):
e2_next = point2
else:
e2_next = point1
'''
Loop which determines the distance between anti-podal pairs and updates the
minimum distance accordingly. It repeats until it reaches the starting position.
'''
while True:
e1_angle = support_line.angle_between(Line(e1_current, e1_next))
e2_angle = pi - support_line.angle_between(Line(
e2_current, e2_next))
if (e1_angle < e2_angle) is True:
support_line = Line(e1_current, e1_next)
e1_segment = Segment(e1_current, e1_next)
min_dist_current = e1_segment.distance(e2_current)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e1_connections[e1_next][0] != e1_current:
e1_current = e1_next
e1_next = e1_connections[e1_next][0]
else:
e1_current = e1_next
e1_next = e1_connections[e1_next][1]
elif (e1_angle > e2_angle) is True:
support_line = Line(e2_next, e2_current)
e2_segment = Segment(e2_current, e2_next)
min_dist_current = e2_segment.distance(e1_current)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e2_connections[e2_next][0] != e2_current:
e2_current = e2_next
e2_next = e2_connections[e2_next][0]
else:
e2_current = e2_next
e2_next = e2_connections[e2_next][1]
else:
support_line = Line(e1_current, e1_next)
e1_segment = Segment(e1_current, e1_next)
e2_segment = Segment(e2_current, e2_next)
min1 = e1_segment.distance(e2_next)
min2 = e2_segment.distance(e1_next)
min_dist_current = min(min1, min2)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e1_connections[e1_next][0] != e1_current:
e1_current = e1_next
e1_next = e1_connections[e1_next][0]
else:
e1_current = e1_next
e1_next = e1_connections[e1_next][1]
if e2_connections[e2_next][0] != e2_current:
e2_current = e2_next
e2_next = e2_connections[e2_next][0]
else:
e2_current = e2_next
e2_next = e2_connections[e2_next][1]
if e1_current == e1_ymax and e2_current == e2_ymin:
break
return min_dist
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the Polygon.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = map(N, self.vertices)
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {} z".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" />'
).format(2. * scale_factor, path, fill_color)
def _hashable_content(self):
D = {}
def ref_list(point_list):
kee = {}
for i, p in enumerate(ordered(set(point_list))):
kee[p] = i
D[i] = p
return [kee[p] for p in point_list]
S1 = ref_list(self.args)
r_nor = rotate_left(S1, least_rotation(S1))
S2 = ref_list(list(reversed(self.args)))
r_rev = rotate_left(S2, least_rotation(S2))
if r_nor < r_rev:
r = r_nor
else:
r = r_rev
canonical_args = [ D[order] for order in r ]
return tuple(canonical_args)
def __contains__(self, o):
"""
Return True if o is contained within the boundary lines of self.altitudes
Parameters
==========
other : GeometryEntity
Returns
=======
contained in : bool
The points (and sides, if applicable) are contained in self.
See Also
========
sympy.geometry.entity.GeometryEntity.encloses
Examples
========
>>> from sympy import Line, Segment, Point
>>> p = Point(0, 0)
>>> q = Point(1, 1)
>>> s = Segment(p, q*2)
>>> l = Line(p, q)
>>> p in q
False
>>> p in s
True
>>> q*3 in s
False
>>> s in l
True
"""
if isinstance(o, Polygon):
return self == o
elif isinstance(o, Segment):
return any(o in s for s in self.sides)
elif isinstance(o, Point):
if o in self.vertices:
return True
for side in self.sides:
if o in side:
return True
return False
def bisectors(p, prec=None):
"""Returns angle bisectors of a polygon. If prec is given
then approximate the point defining the ray to that precision.
The distance between the points defining the bisector ray is 1.
Examples
========
>>> from sympy import Polygon, Point
>>> p = Polygon(Point(0, 0), Point(2, 0), Point(1, 1), Point(0, 3))
>>> p.bisectors(2)
{Point2D(0, 0): Ray2D(Point2D(0, 0), Point2D(0.71, 0.71)),
Point2D(0, 3): Ray2D(Point2D(0, 3), Point2D(0.23, 2.0)),
Point2D(1, 1): Ray2D(Point2D(1, 1), Point2D(0.19, 0.42)),
Point2D(2, 0): Ray2D(Point2D(2, 0), Point2D(1.1, 0.38))}
"""
b = {}
pts = list(p.args)
pts.append(pts[0]) # close it
cw = Polygon._isright(*pts[:3])
if cw:
pts = list(reversed(pts))
for v, a in p.angles.items():
i = pts.index(v)
p1, p2 = Point._normalize_dimension(pts[i], pts[i + 1])
ray = Ray(p1, p2).rotate(a/2, v)
dir = ray.direction
ray = Ray(ray.p1, ray.p1 + dir/dir.distance((0, 0)))
if prec is not None:
ray = Ray(ray.p1, ray.p2.n(prec))
b[v] = ray
return b
class RegularPolygon(Polygon):
"""
A regular polygon.
Such a polygon has all internal angles equal and all sides the same length.
Parameters
==========
center : Point
radius : number or Basic instance
The distance from the center to a vertex
n : int
The number of sides
Attributes
==========
vertices
center
radius
rotation
apothem
interior_angle
exterior_angle
circumcircle
incircle
angles
Raises
======
GeometryError
If the `center` is not a Point, or the `radius` is not a number or Basic
instance, or the number of sides, `n`, is less than three.
Notes
=====
A RegularPolygon can be instantiated with Polygon with the kwarg n.
Regular polygons are instantiated with a center, radius, number of sides
and a rotation angle. Whereas the arguments of a Polygon are vertices, the
vertices of the RegularPolygon must be obtained with the vertices method.
See Also
========
sympy.geometry.point.Point, Polygon
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r
RegularPolygon(Point2D(0, 0), 5, 3, 0)
>>> r.vertices[0]
Point2D(5, 0)
"""
__slots__ = ('_n', '_center', '_radius', '_rot')
def __new__(self, c, r, n, rot=0, **kwargs):
r, n, rot = map(sympify, (r, n, rot))
c = Point(c, dim=2, **kwargs)
if not isinstance(r, Expr):
raise GeometryError("r must be an Expr object, not %s" % r)
if n.is_Number:
as_int(n) # let an error raise if necessary
if n < 3:
raise GeometryError("n must be a >= 3, not %s" % n)
obj = GeometryEntity.__new__(self, c, r, n, **kwargs)
obj._n = n
obj._center = c
obj._radius = r
obj._rot = rot % (2*S.Pi/n) if rot.is_number else rot
return obj
def _eval_evalf(self, prec=15, **options):
c, r, n, a = self.args
dps = prec_to_dps(prec)
c, r, a = [i.evalf(n=dps, **options) for i in (c, r, a)]
return self.func(c, r, n, a)
@property
def args(self):
"""
Returns the center point, the radius,
the number of sides, and the orientation angle.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.args
(Point2D(0, 0), 5, 3, 0)
"""
return self._center, self._radius, self._n, self._rot
def __str__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
def __repr__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
@property
def area(self):
"""Returns the area.
Examples
========
>>> from sympy.geometry import RegularPolygon
>>> square = RegularPolygon((0, 0), 1, 4)
>>> square.area
2
>>> _ == square.length**2
True
"""
c, r, n, rot = self.args
return sign(r)*n*self.length**2/(4*tan(pi/n))
@property
def length(self):
"""Returns the length of the sides.
The half-length of the side and the apothem form two legs
of a right triangle whose hypotenuse is the radius of the
regular polygon.
Examples
========
>>> from sympy.geometry import RegularPolygon
>>> from sympy import sqrt
>>> s = square_in_unit_circle = RegularPolygon((0, 0), 1, 4)
>>> s.length
sqrt(2)
>>> sqrt((_/2)**2 + s.apothem**2) == s.radius
True
"""
return self.radius*2*sin(pi/self._n)
@property
def center(self):
"""The center of the RegularPolygon
This is also the center of the circumscribing circle.
Returns
=======
center : Point
See Also
========
sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.center
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.center
Point2D(0, 0)
"""
return self._center
centroid = center
@property
def circumcenter(self):
"""
Alias for center.
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.circumcenter
Point2D(0, 0)
"""
return self.center
@property
def radius(self):
"""Radius of the RegularPolygon
This is also the radius of the circumscribing circle.
Returns
=======
radius : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.radius
r
"""
return self._radius
@property
def circumradius(self):
"""
Alias for radius.
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.circumradius
r
"""
return self.radius
@property
def rotation(self):
"""CCW angle by which the RegularPolygon is rotated
Returns
=======
rotation : number or instance of Basic
Examples
========
>>> from sympy import pi
>>> from sympy.abc import a
>>> from sympy.geometry import RegularPolygon, Point
>>> RegularPolygon(Point(0, 0), 3, 4, pi/4).rotation
pi/4
Numerical rotation angles are made canonical:
>>> RegularPolygon(Point(0, 0), 3, 4, a).rotation
a
>>> RegularPolygon(Point(0, 0), 3, 4, pi).rotation
0
"""
return self._rot
@property
def apothem(self):
"""The inradius of the RegularPolygon.
The apothem/inradius is the radius of the inscribed circle.
Returns
=======
apothem : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.apothem
sqrt(2)*r/2
"""
return self.radius * cos(S.Pi/self._n)
@property
def inradius(self):
"""
Alias for apothem.
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.inradius
sqrt(2)*r/2
"""
return self.apothem
@property
def interior_angle(self):
"""Measure of the interior angles.
Returns
=======
interior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.interior_angle
3*pi/4
"""
return (self._n - 2)*S.Pi/self._n
@property
def exterior_angle(self):
"""Measure of the exterior angles.
Returns
=======
exterior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.exterior_angle
pi/4
"""
return 2*S.Pi/self._n
@property
def circumcircle(self):
"""The circumcircle of the RegularPolygon.
Returns
=======
circumcircle : Circle
See Also
========
circumcenter, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.circumcircle
Circle(Point2D(0, 0), 4)
"""
return Circle(self.center, self.radius)
@property
def incircle(self):
"""The incircle of the RegularPolygon.
Returns
=======
incircle : Circle
See Also
========
inradius, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 7)
>>> rp.incircle
Circle(Point2D(0, 0), 4*cos(pi/7))
"""
return Circle(self.center, self.apothem)
@property
def angles(self):
"""
Returns a dictionary with keys, the vertices of the Polygon,
and values, the interior angle at each vertex.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.angles
{Point2D(-5/2, -5*sqrt(3)/2): pi/3,
Point2D(-5/2, 5*sqrt(3)/2): pi/3,
Point2D(5, 0): pi/3}
"""
ret = {}
ang = self.interior_angle
for v in self.vertices:
ret[v] = ang
return ret
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
=====
Being on the border of self is considered False.
The general Polygon.encloses_point method is called only if
a point is not within or beyond the incircle or circumcircle,
respectively.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.ellipse.Ellipse.encloses_point
Examples
========
>>> from sympy import RegularPolygon, S, Point, Symbol
>>> p = RegularPolygon((0, 0), 3, 4)
>>> p.encloses_point(Point(0, 0))
True
>>> r, R = p.inradius, p.circumradius
>>> p.encloses_point(Point((r + R)/2, 0))
True
>>> p.encloses_point(Point(R/2, R/2 + (R - r)/10))
False
>>> t = Symbol('t', real=True)
>>> p.encloses_point(p.arbitrary_point().subs(t, S.Half))
False
>>> p.encloses_point(Point(5, 5))
False
"""
c = self.center
d = Segment(c, p).length
if d >= self.radius:
return False
elif d < self.inradius:
return True
else:
# now enumerate the RegularPolygon like a general polygon.
return Polygon.encloses_point(self, p)
def spin(self, angle):
"""Increment *in place* the virtual Polygon's rotation by ccw angle.
See also: rotate method which moves the center.
>>> from sympy import Polygon, Point, pi
>>> r = Polygon(Point(0,0), 1, n=3)
>>> r.vertices[0]
Point2D(1, 0)
>>> r.spin(pi/6)
>>> r.vertices[0]
Point2D(sqrt(3)/2, 1/2)
See Also
========
rotation
rotate : Creates a copy of the RegularPolygon rotated about a Point
"""
self._rot += angle
def rotate(self, angle, pt=None):
"""Override GeometryEntity.rotate to first rotate the RegularPolygon
about its center.
>>> from sympy import Point, RegularPolygon, pi
>>> t = RegularPolygon(Point(1, 0), 1, 3)
>>> t.vertices[0] # vertex on x-axis
Point2D(2, 0)
>>> t.rotate(pi/2).vertices[0] # vertex on y axis now
Point2D(0, 2)
See Also
========
rotation
spin : Rotates a RegularPolygon in place
"""
r = type(self)(*self.args) # need a copy or else changes are in-place
r._rot += angle
return GeometryEntity.rotate(r, angle, pt)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the radius that must be
scaled (if x == y) or else a new Polygon must be returned.
>>> from sympy import RegularPolygon
Symmetric scaling returns a RegularPolygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 2)
RegularPolygon(Point2D(0, 0), 2, 4, 0)
Asymmetric scaling returns a kite as a Polygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 1)
Polygon(Point2D(2, 0), Point2D(0, 1), Point2D(-2, 0), Point2D(0, -1))
"""
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
if x != y:
return Polygon(*self.vertices).scale(x, y)
c, r, n, rot = self.args
r *= x
return self.func(c, r, n, rot)
def reflect(self, line):
"""Override GeometryEntity.reflect since this is not made of only
points.
Examples
========
>>> from sympy import RegularPolygon, Line
>>> RegularPolygon((0, 0), 1, 4).reflect(Line((0, 1), slope=-2))
RegularPolygon(Point2D(4/5, 2/5), -1, 4, atan(4/3))
"""
c, r, n, rot = self.args
v = self.vertices[0]
d = v - c
cc = c.reflect(line)
vv = v.reflect(line)
dd = vv - cc
# calculate rotation about the new center
# which will align the vertices
l1 = Ray((0, 0), dd)
l2 = Ray((0, 0), d)
ang = l1.closing_angle(l2)
rot += ang
# change sign of radius as point traversal is reversed
return self.func(cc, -r, n, rot)
@property
def vertices(self):
"""The vertices of the RegularPolygon.
Returns
=======
vertices : list
Each vertex is a Point.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.vertices
[Point2D(5, 0), Point2D(0, 5), Point2D(-5, 0), Point2D(0, -5)]
"""
c = self._center
r = abs(self._radius)
rot = self._rot
v = 2*S.Pi/self._n
return [Point(c.x + r*cos(k*v + rot), c.y + r*sin(k*v + rot))
for k in range(self._n)]
def __eq__(self, o):
if not isinstance(o, Polygon):
return False
elif not isinstance(o, RegularPolygon):
return Polygon.__eq__(o, self)
return self.args == o.args
def __hash__(self):
return super().__hash__()
class Triangle(Polygon):
"""
A polygon with three vertices and three sides.
Parameters
==========
points : sequence of Points
keyword: asa, sas, or sss to specify sides/angles of the triangle
Attributes
==========
vertices
altitudes
orthocenter
circumcenter
circumradius
circumcircle
inradius
incircle
exradii
medians
medial
nine_point_circle
Raises
======
GeometryError
If the number of vertices is not equal to three, or one of the vertices
is not a Point, or a valid keyword is not given.
See Also
========
sympy.geometry.point.Point, Polygon
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
Triangle(Point2D(0, 0), Point2D(4, 0), Point2D(4, 3))
Keywords sss, sas, or asa can be used to give the desired
side lengths (in order) and interior angles (in degrees) that
define the triangle:
>>> Triangle(sss=(3, 4, 5))
Triangle(Point2D(0, 0), Point2D(3, 0), Point2D(3, 4))
>>> Triangle(asa=(30, 1, 30))
Triangle(Point2D(0, 0), Point2D(1, 0), Point2D(1/2, sqrt(3)/6))
>>> Triangle(sas=(1, 45, 2))
Triangle(Point2D(0, 0), Point2D(2, 0), Point2D(sqrt(2)/2, sqrt(2)/2))
"""
def __new__(cls, *args, **kwargs):
if len(args) != 3:
if 'sss' in kwargs:
return _sss(*[simplify(a) for a in kwargs['sss']])
if 'asa' in kwargs:
return _asa(*[simplify(a) for a in kwargs['asa']])
if 'sas' in kwargs:
return _sas(*[simplify(a) for a in kwargs['sas']])
msg = "Triangle instantiates with three points or a valid keyword."
raise GeometryError(msg)
vertices = [Point(a, dim=2, **kwargs) for a in args]
# remove consecutive duplicates
nodup = []
for p in vertices:
if nodup and p == nodup[-1]:
continue
nodup.append(p)
if len(nodup) > 1 and nodup[-1] == nodup[0]:
nodup.pop() # last point was same as first
# remove collinear points
i = -3
while i < len(nodup) - 3 and len(nodup) > 2:
a, b, c = sorted(
[nodup[i], nodup[i + 1], nodup[i + 2]], key=default_sort_key)
if Point.is_collinear(a, b, c):
nodup[i] = a
nodup[i + 1] = None
nodup.pop(i + 1)
i += 1
vertices = list(filter(lambda x: x is not None, nodup))
if len(vertices) == 3:
return GeometryEntity.__new__(cls, *vertices, **kwargs)
elif len(vertices) == 2:
return Segment(*vertices, **kwargs)
else:
return Point(*vertices, **kwargs)
@property
def vertices(self):
"""The triangle's vertices
Returns
=======
vertices : tuple
Each element in the tuple is a Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t.vertices
(Point2D(0, 0), Point2D(4, 0), Point2D(4, 3))
"""
return self.args
def is_similar(t1, t2):
"""Is another triangle similar to this one.
Two triangles are similar if one can be uniformly scaled to the other.
Parameters
==========
other: Triangle
Returns
=======
is_similar : boolean
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t2 = Triangle(Point(0, 0), Point(-4, 0), Point(-4, -3))
>>> t1.is_similar(t2)
True
>>> t2 = Triangle(Point(0, 0), Point(-4, 0), Point(-4, -4))
>>> t1.is_similar(t2)
False
"""
if not isinstance(t2, Polygon):
return False
s1_1, s1_2, s1_3 = [side.length for side in t1.sides]
s2 = [side.length for side in t2.sides]
def _are_similar(u1, u2, u3, v1, v2, v3):
e1 = simplify(u1/v1)
e2 = simplify(u2/v2)
e3 = simplify(u3/v3)
return bool(e1 == e2) and bool(e2 == e3)
# There's only 6 permutations, so write them out
return _are_similar(s1_1, s1_2, s1_3, *s2) or \
_are_similar(s1_1, s1_3, s1_2, *s2) or \
_are_similar(s1_2, s1_1, s1_3, *s2) or \
_are_similar(s1_2, s1_3, s1_1, *s2) or \
_are_similar(s1_3, s1_1, s1_2, *s2) or \
_are_similar(s1_3, s1_2, s1_1, *s2)
def is_equilateral(self):
"""Are all the sides the same length?
Returns
=======
is_equilateral : boolean
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar, RegularPolygon
is_isosceles, is_right, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t1.is_equilateral()
False
>>> from sympy import sqrt
>>> t2 = Triangle(Point(0, 0), Point(10, 0), Point(5, 5*sqrt(3)))
>>> t2.is_equilateral()
True
"""
return not has_variety(s.length for s in self.sides)
def is_isosceles(self):
"""Are two or more of the sides the same length?
Returns
=======
is_isosceles : boolean
See Also
========
is_equilateral, is_right, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(2, 4))
>>> t1.is_isosceles()
True
"""
return has_dups(s.length for s in self.sides)
def is_scalene(self):
"""Are all the sides of the triangle of different lengths?
Returns
=======
is_scalene : boolean
See Also
========
is_equilateral, is_isosceles, is_right
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(1, 4))
>>> t1.is_scalene()
True
"""
return not has_dups(s.length for s in self.sides)
def is_right(self):
"""Is the triangle right-angled.
Returns
=======
is_right : boolean
See Also
========
sympy.geometry.line.LinearEntity.is_perpendicular
is_equilateral, is_isosceles, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t1.is_right()
True
"""
s = self.sides
return Segment.is_perpendicular(s[0], s[1]) or \
Segment.is_perpendicular(s[1], s[2]) or \
Segment.is_perpendicular(s[0], s[2])
@property
def altitudes(self):
"""The altitudes of the triangle.
An altitude of a triangle is a segment through a vertex,
perpendicular to the opposite side, with length being the
height of the vertex measured from the line containing the side.
Returns
=======
altitudes : dict
The dictionary consists of keys which are vertices and values
which are Segments.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment.length
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.altitudes[p1]
Segment2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
s = self.sides
v = self.vertices
return {v[0]: s[1].perpendicular_segment(v[0]),
v[1]: s[2].perpendicular_segment(v[1]),
v[2]: s[0].perpendicular_segment(v[2])}
@property
def orthocenter(self):
"""The orthocenter of the triangle.
The orthocenter is the intersection of the altitudes of a triangle.
It may lie inside, outside or on the triangle.
Returns
=======
orthocenter : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.orthocenter
Point2D(0, 0)
"""
a = self.altitudes
v = self.vertices
return Line(a[v[0]]).intersection(Line(a[v[1]]))[0]
@property
def circumcenter(self):
"""The circumcenter of the triangle
The circumcenter is the center of the circumcircle.
Returns
=======
circumcenter : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.circumcenter
Point2D(1/2, 1/2)
"""
a, b, c = [x.perpendicular_bisector() for x in self.sides]
if not a.intersection(b):
print(a,b,a.intersection(b))
return a.intersection(b)[0]
@property
def circumradius(self):
"""The radius of the circumcircle of the triangle.
Returns
=======
circumradius : number of Basic instance
See Also
========
sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import Point, Triangle
>>> a = Symbol('a')
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, a)
>>> t = Triangle(p1, p2, p3)
>>> t.circumradius
sqrt(a**2/4 + 1/4)
"""
return Point.distance(self.circumcenter, self.vertices[0])
@property
def circumcircle(self):
"""The circle which passes through the three vertices of the triangle.
Returns
=======
circumcircle : Circle
See Also
========
sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.circumcircle
Circle(Point2D(1/2, 1/2), sqrt(2)/2)
"""
return Circle(self.circumcenter, self.circumradius)
def bisectors(self):
"""The angle bisectors of the triangle.
An angle bisector of a triangle is a straight line through a vertex
which cuts the corresponding angle in half.
Returns
=======
bisectors : dict
Each key is a vertex (Point) and each value is the corresponding
bisector (Segment).
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy.geometry import Point, Triangle, Segment
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> from sympy import sqrt
>>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1))
True
"""
# use lines containing sides so containment check during
# intersection calculation can be avoided, thus reducing
# the processing time for calculating the bisectors
s = [Line(l) for l in self.sides]
v = self.vertices
c = self.incenter
l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0])
l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0])
l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0])
return {v[0]: l1, v[1]: l2, v[2]: l3}
@property
def incenter(self):
"""The center of the incircle.
The incircle is the circle which lies inside the triangle and touches
all three sides.
Returns
=======
incenter : Point
See Also
========
incircle, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.incenter
Point2D(1 - sqrt(2)/2, 1 - sqrt(2)/2)
"""
s = self.sides
l = Matrix([s[i].length for i in [1, 2, 0]])
p = sum(l)
v = self.vertices
x = simplify(l.dot(Matrix([vi.x for vi in v]))/p)
y = simplify(l.dot(Matrix([vi.y for vi in v]))/p)
return Point(x, y)
@property
def inradius(self):
"""The radius of the incircle.
Returns
=======
inradius : number of Basic instance
See Also
========
incircle, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(4, 0), Point(0, 3)
>>> t = Triangle(p1, p2, p3)
>>> t.inradius
1
"""
return simplify(2 * self.area / self.perimeter)
@property
def incircle(self):
"""The incircle of the triangle.
The incircle is the circle which lies inside the triangle and touches
all three sides.
Returns
=======
incircle : Circle
See Also
========
sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(2, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.incircle
Circle(Point2D(2 - sqrt(2), 2 - sqrt(2)), 2 - sqrt(2))
"""
return Circle(self.incenter, self.inradius)
@property
def exradii(self):
"""The radius of excircles of a triangle.
An excircle of the triangle is a circle lying outside the triangle,
tangent to one of its sides and tangent to the extensions of the
other two.
Returns
=======
exradii : dict
See Also
========
sympy.geometry.polygon.Triangle.inradius
Examples
========
The exradius touches the side of the triangle to which it is keyed, e.g.
the exradius touching side 2 is:
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.exradii[t.sides[2]]
-2 + sqrt(10)
References
==========
.. [1] http://mathworld.wolfram.com/Exradius.html
.. [2] http://mathworld.wolfram.com/Excircles.html
"""
side = self.sides
a = side[0].length
b = side[1].length
c = side[2].length
s = (a+b+c)/2
area = self.area
exradii = {self.sides[0]: simplify(area/(s-a)),
self.sides[1]: simplify(area/(s-b)),
self.sides[2]: simplify(area/(s-c))}
return exradii
@property
def excenters(self):
"""Excenters of the triangle.
An excenter is the center of a circle that is tangent to a side of the
triangle and the extensions of the other two sides.
Returns
=======
excenters : dict
Examples
========
The excenters are keyed to the side of the triangle to which their corresponding
excircle is tangent: The center is keyed, e.g. the excenter of a circle touching
side 0 is:
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.excenters[t.sides[0]]
Point2D(12*sqrt(10), 2/3 + sqrt(10)/3)
See Also
========
sympy.geometry.polygon.Triangle.exradii
References
==========
.. [1] http://mathworld.wolfram.com/Excircles.html
"""
s = self.sides
v = self.vertices
a = s[0].length
b = s[1].length
c = s[2].length
x = [v[0].x, v[1].x, v[2].x]
y = [v[0].y, v[1].y, v[2].y]
exc_coords = {
"x1": simplify(-a*x[0]+b*x[1]+c*x[2]/(-a+b+c)),
"x2": simplify(a*x[0]-b*x[1]+c*x[2]/(a-b+c)),
"x3": simplify(a*x[0]+b*x[1]-c*x[2]/(a+b-c)),
"y1": simplify(-a*y[0]+b*y[1]+c*y[2]/(-a+b+c)),
"y2": simplify(a*y[0]-b*y[1]+c*y[2]/(a-b+c)),
"y3": simplify(a*y[0]+b*y[1]-c*y[2]/(a+b-c))
}
excenters = {
s[0]: Point(exc_coords["x1"], exc_coords["y1"]),
s[1]: Point(exc_coords["x2"], exc_coords["y2"]),
s[2]: Point(exc_coords["x3"], exc_coords["y3"])
}
return excenters
@property
def medians(self):
"""The medians of the triangle.
A median of a triangle is a straight line through a vertex and the
midpoint of the opposite side, and divides the triangle into two
equal areas.
Returns
=======
medians : dict
Each key is a vertex (Point) and each value is the median (Segment)
at that point.
See Also
========
sympy.geometry.point.Point.midpoint, sympy.geometry.line.Segment.midpoint
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.medians[p1]
Segment2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
s = self.sides
v = self.vertices
return {v[0]: Segment(v[0], s[1].midpoint),
v[1]: Segment(v[1], s[2].midpoint),
v[2]: Segment(v[2], s[0].midpoint)}
@property
def medial(self):
"""The medial triangle of the triangle.
The triangle which is formed from the midpoints of the three sides.
Returns
=======
medial : Triangle
See Also
========
sympy.geometry.line.Segment.midpoint
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.medial
Triangle(Point2D(1/2, 0), Point2D(1/2, 1/2), Point2D(0, 1/2))
"""
s = self.sides
return Triangle(s[0].midpoint, s[1].midpoint, s[2].midpoint)
@property
def nine_point_circle(self):
"""The nine-point circle of the triangle.
Nine-point circle is the circumcircle of the medial triangle, which
passes through the feet of altitudes and the middle points of segments
connecting the vertices and the orthocenter.
Returns
=======
nine_point_circle : Circle
See also
========
sympy.geometry.line.Segment.midpoint
sympy.geometry.polygon.Triangle.medial
sympy.geometry.polygon.Triangle.orthocenter
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.nine_point_circle
Circle(Point2D(1/4, 1/4), sqrt(2)/4)
"""
return Circle(*self.medial.vertices)
@property
def eulerline(self):
"""The Euler line of the triangle.
The line which passes through circumcenter, centroid and orthocenter.
Returns
=======
eulerline : Line (or Point for equilateral triangles in which case all
centers coincide)
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.eulerline
Line2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
if self.is_equilateral():
return self.orthocenter
return Line(self.orthocenter, self.circumcenter)
def rad(d):
"""Return the radian value for the given degrees (pi = 180 degrees)."""
return d*pi/180
def deg(r):
"""Return the degree value for the given radians (pi = 180 degrees)."""
return r/pi*180
def _slope(d):
rv = tan(rad(d))
return rv
def _asa(d1, l, d2):
"""Return triangle having side with length l on the x-axis."""
xy = Line((0, 0), slope=_slope(d1)).intersection(
Line((l, 0), slope=_slope(180 - d2)))[0]
return Triangle((0, 0), (l, 0), xy)
def _sss(l1, l2, l3):
"""Return triangle having side of length l1 on the x-axis."""
c1 = Circle((0, 0), l3)
c2 = Circle((l1, 0), l2)
inter = [a for a in c1.intersection(c2) if a.y.is_nonnegative]
if not inter:
return None
pt = inter[0]
return Triangle((0, 0), (l1, 0), pt)
def _sas(l1, d, l2):
"""Return triangle having side with length l2 on the x-axis."""
p1 = Point(0, 0)
p2 = Point(l2, 0)
p3 = Point(cos(rad(d))*l1, sin(rad(d))*l1)
return Triangle(p1, p2, p3)
|
d1ce69c1eb18ef3297d746b1b5d41e8e1b75a0ea77263cd5fd50502a62fb2818 | """Recurrence Operators"""
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.printing import sstr
from sympy.core.sympify import sympify
def RecurrenceOperators(base, generator):
"""
Returns an Algebra of Recurrence Operators and the operator for
shifting i.e. the `Sn` operator.
The first argument needs to be the base polynomial ring for the algebra
and the second argument must be a generator which can be either a
noncommutative Symbol or a string.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.recurrence import RecurrenceOperators
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
"""
ring = RecurrenceOperatorAlgebra(base, generator)
return (ring, ring.shift_operator)
class RecurrenceOperatorAlgebra:
"""
A Recurrence Operator Algebra is a set of noncommutative polynomials
in intermediate `Sn` and coefficients in a base ring A. It follows the
commutation rule:
Sn * a(n) = a(n + 1) * Sn
This class represents a Recurrence Operator Algebra and serves as the parent ring
for Recurrence Operators.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.recurrence import RecurrenceOperators
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
>>> R
Univariate Recurrence Operator Algebra in intermediate Sn over the base ring
ZZ[n]
See Also
========
RecurrenceOperator
"""
def __init__(self, base, generator):
# the base ring for the algebra
self.base = base
# the operator representing shift i.e. `Sn`
self.shift_operator = RecurrenceOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = symbols('Sn', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = symbols(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Recurrence Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
def _add_lists(list1, list2):
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
class RecurrenceOperator:
"""
The Recurrence Operators are defined by a list of polynomials
in the base ring and the parent ring of the Operator.
Explanation
===========
Takes a list of polynomials for each power of Sn and the
parent ring which must be an instance of RecurrenceOperatorAlgebra.
A Recurrence Operator can be created easily using
the operator `Sn`. See examples below.
Examples
========
>>> from sympy.holonomic.recurrence import RecurrenceOperator, RecurrenceOperators
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n),'Sn')
>>> RecurrenceOperator([0, 1, n**2], R)
(1)Sn + (n**2)Sn**2
>>> Sn*n
(n + 1)Sn
>>> n*Sn*n + 1 - Sn**2*n
(1) + (n**2 + n)Sn + (-n - 2)Sn**2
See Also
========
DifferentialOperatorAlgebra
"""
_op_priority = 20
def __init__(self, list_of_poly, parent):
# the parent ring for this operator
# must be an RecurrenceOperatorAlgebra object
self.parent = parent
# sequence of polynomials in n for each power of Sn
# represents the operator
# convert the expressions into ring elements using from_sympy
if isinstance(list_of_poly, list):
for i, j in enumerate(list_of_poly):
if isinstance(j, int):
list_of_poly[i] = self.parent.base.from_sympy(S(j))
elif not isinstance(j, self.parent.base.dtype):
list_of_poly[i] = self.parent.base.from_sympy(j)
self.listofpoly = list_of_poly
self.order = len(self.listofpoly) - 1
def __mul__(self, other):
"""
Multiplies two Operators and returns another
RecurrenceOperator instance using the commutation rule
Sn * a(n) = a(n + 1) * Sn
"""
listofself = self.listofpoly
base = self.parent.base
if not isinstance(other, RecurrenceOperator):
if not isinstance(other, self.parent.base.dtype):
listofother = [self.parent.base.from_sympy(sympify(other))]
else:
listofother = [other]
else:
listofother = other.listofpoly
# multiply a polynomial `b` with a list of polynomials
def _mul_dmp_diffop(b, listofother):
if isinstance(listofother, list):
sol = []
for i in listofother:
sol.append(i * b)
return sol
else:
return [b * listofother]
sol = _mul_dmp_diffop(listofself[0], listofother)
# compute Sn^i * b
def _mul_Sni_b(b):
sol = [base.zero]
if isinstance(b, list):
for i in b:
j = base.to_sympy(i).subs(base.gens[0], base.gens[0] + S.One)
sol.append(base.from_sympy(j))
else:
j = b.subs(base.gens[0], base.gens[0] + S.One)
sol.append(base.from_sympy(j))
return sol
for i in range(1, len(listofself)):
# find Sn^i * b in ith iteration
listofother = _mul_Sni_b(listofother)
# solution = solution + listofself[i] * (Sn^i * b)
sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
return RecurrenceOperator(sol, self.parent)
def __rmul__(self, other):
if not isinstance(other, RecurrenceOperator):
if isinstance(other, int):
other = S(other)
if not isinstance(other, self.parent.base.dtype):
other = (self.parent.base).from_sympy(other)
sol = []
for j in self.listofpoly:
sol.append(other * j)
return RecurrenceOperator(sol, self.parent)
def __add__(self, other):
if isinstance(other, RecurrenceOperator):
sol = _add_lists(self.listofpoly, other.listofpoly)
return RecurrenceOperator(sol, self.parent)
else:
if isinstance(other, int):
other = S(other)
list_self = self.listofpoly
if not isinstance(other, self.parent.base.dtype):
list_other = [((self.parent).base).from_sympy(other)]
else:
list_other = [other]
sol = []
sol.append(list_self[0] + list_other[0])
sol += list_self[1:]
return RecurrenceOperator(sol, self.parent)
__radd__ = __add__
def __sub__(self, other):
return self + (-1) * other
def __rsub__(self, other):
return (-1) * self + other
def __pow__(self, n):
if n == 1:
return self
if n == 0:
return RecurrenceOperator([self.parent.base.one], self.parent)
# if self is `Sn`
if self.listofpoly == self.parent.shift_operator.listofpoly:
sol = []
for i in range(0, n):
sol.append(self.parent.base.zero)
sol.append(self.parent.base.one)
return RecurrenceOperator(sol, self.parent)
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def __str__(self):
listofpoly = self.listofpoly
print_str = ''
for i, j in enumerate(listofpoly):
if j == self.parent.base.zero:
continue
if i == 0:
print_str += '(' + sstr(j) + ')'
continue
if print_str:
print_str += ' + '
if i == 1:
print_str += '(' + sstr(j) + ')Sn'
continue
print_str += '(' + sstr(j) + ')' + 'Sn**' + sstr(i)
return print_str
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, RecurrenceOperator):
if self.listofpoly == other.listofpoly and self.parent == other.parent:
return True
else:
return False
else:
if self.listofpoly[0] == other:
for i in self.listofpoly[1:]:
if i is not self.parent.base.zero:
return False
return True
else:
return False
class HolonomicSequence:
"""
A Holonomic Sequence is a type of sequence satisfying a linear homogeneous
recurrence relation with Polynomial coefficients. Alternatively, A sequence
is Holonomic if and only if its generating function is a Holonomic Function.
"""
def __init__(self, recurrence, u0=[]):
self.recurrence = recurrence
if not isinstance(u0, list):
self.u0 = [u0]
else:
self.u0 = u0
if len(self.u0) == 0:
self._have_init_cond = False
else:
self._have_init_cond = True
self.n = recurrence.parent.base.gens[0]
def __repr__(self):
str_sol = 'HolonomicSequence(%s, %s)' % ((self.recurrence).__repr__(), sstr(self.n))
if not self._have_init_cond:
return str_sol
else:
cond_str = ''
seq_str = 0
for i in self.u0:
cond_str += ', u(%s) = %s' % (sstr(seq_str), sstr(i))
seq_str += 1
sol = str_sol + cond_str
return sol
__str__ = __repr__
def __eq__(self, other):
if self.recurrence == other.recurrence:
if self.n == other.n:
if self._have_init_cond and other._have_init_cond:
if self.u0 == other.u0:
return True
else:
return False
else:
return True
else:
return False
else:
return False
|
3cdb44d3720eaedfea6d5a6e23f6df785c1ff900961cab29d53f981981cc5c93 | """
This module implements Holonomic Functions and
various operations on them.
"""
from sympy.core import Add, Mul, Pow
from sympy.core.numbers import NaN, Infinity, NegativeInfinity, Float, I, pi
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import Dummy, Symbol
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.factorials import binomial, factorial, rf
from sympy.functions.elementary.exponential import exp_polar, exp, log
from sympy.functions.elementary.hyperbolic import (cosh, sinh)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (cos, sin, sinc)
from sympy.functions.special.error_functions import (Ci, Shi, Si, erf, erfc, erfi)
from sympy.functions.special.gamma_functions import gamma
from sympy.functions.special.hyper import hyper, meijerg
from sympy.integrals import meijerint
from sympy.matrices import Matrix
from sympy.polys.rings import PolyElement
from sympy.polys.fields import FracElement
from sympy.polys.domains import QQ, RR
from sympy.polys.polyclasses import DMF
from sympy.polys.polyroots import roots
from sympy.polys.polytools import Poly
from sympy.polys.matrices import DomainMatrix
from sympy.printing import sstr
from sympy.series.limits import limit
from sympy.series.order import Order
from sympy.simplify.hyperexpand import hyperexpand
from sympy.simplify.simplify import nsimplify
from sympy.solvers.solvers import solve
from .recurrence import HolonomicSequence, RecurrenceOperator, RecurrenceOperators
from .holonomicerrors import (NotPowerSeriesError, NotHyperSeriesError,
SingularityError, NotHolonomicError)
def _find_nonzero_solution(r, homosys):
ones = lambda shape: DomainMatrix.ones(shape, r.domain)
particular, nullspace = r._solve(homosys)
nullity = nullspace.shape[0]
nullpart = ones((1, nullity)) * nullspace
sol = (particular + nullpart).transpose()
return sol
def DifferentialOperators(base, generator):
r"""
This function is used to create annihilators using ``Dx``.
Explanation
===========
Returns an Algebra of Differential Operators also called Weyl Algebra
and the operator for differentiation i.e. the ``Dx`` operator.
Parameters
==========
base:
Base polynomial ring for the algebra.
The base polynomial ring is the ring of polynomials in :math:`x` that
will appear as coefficients in the operators.
generator:
Generator of the algebra which can
be either a noncommutative ``Symbol`` or a string. e.g. "Dx" or "D".
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.abc import x
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring ZZ[x]
>>> Dx*x
(1) + (x)*Dx
"""
ring = DifferentialOperatorAlgebra(base, generator)
return (ring, ring.derivative_operator)
class DifferentialOperatorAlgebra:
r"""
An Ore Algebra is a set of noncommutative polynomials in the
intermediate ``Dx`` and coefficients in a base polynomial ring :math:`A`.
It follows the commutation rule:
.. math ::
Dxa = \sigma(a)Dx + \delta(a)
for :math:`a \subset A`.
Where :math:`\sigma: A \Rightarrow A` is an endomorphism and :math:`\delta: A \rightarrow A`
is a skew-derivation i.e. :math:`\delta(ab) = \delta(a) b + \sigma(a) \delta(b)`.
If one takes the sigma as identity map and delta as the standard derivation
then it becomes the algebra of Differential Operators also called
a Weyl Algebra i.e. an algebra whose elements are Differential Operators.
This class represents a Weyl Algebra and serves as the parent ring for
Differential Operators.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring
ZZ[x]
See Also
========
DifferentialOperator
"""
def __init__(self, base, generator):
# the base polynomial ring for the algebra
self.base = base
# the operator representing differentiation i.e. `Dx`
self.derivative_operator = DifferentialOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = Symbol('Dx', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = Symbol(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Differential Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
class DifferentialOperator:
"""
Differential Operators are elements of Weyl Algebra. The Operators
are defined by a list of polynomials in the base ring and the
parent ring of the Operator i.e. the algebra it belongs to.
Explanation
===========
Takes a list of polynomials for each power of ``Dx`` and the
parent ring which must be an instance of DifferentialOperatorAlgebra.
A Differential Operator can be created easily using
the operator ``Dx``. See examples below.
Examples
========
>>> from sympy.holonomic.holonomic import DifferentialOperator, DifferentialOperators
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> DifferentialOperator([0, 1, x**2], R)
(1)*Dx + (x**2)*Dx**2
>>> (x*Dx*x + 1 - Dx**2)**2
(2*x**2 + 2*x + 1) + (4*x**3 + 2*x**2 - 4)*Dx + (x**4 - 6*x - 2)*Dx**2 + (-2*x**2)*Dx**3 + (1)*Dx**4
See Also
========
DifferentialOperatorAlgebra
"""
_op_priority = 20
def __init__(self, list_of_poly, parent):
"""
Parameters
==========
list_of_poly:
List of polynomials belonging to the base ring of the algebra.
parent:
Parent algebra of the operator.
"""
# the parent ring for this operator
# must be an DifferentialOperatorAlgebra object
self.parent = parent
base = self.parent.base
self.x = base.gens[0] if isinstance(base.gens[0], Symbol) else base.gens[0][0]
# sequence of polynomials in x for each power of Dx
# the list should not have trailing zeroes
# represents the operator
# convert the expressions into ring elements using from_sympy
for i, j in enumerate(list_of_poly):
if not isinstance(j, base.dtype):
list_of_poly[i] = base.from_sympy(sympify(j))
else:
list_of_poly[i] = base.from_sympy(base.to_sympy(j))
self.listofpoly = list_of_poly
# highest power of `Dx`
self.order = len(self.listofpoly) - 1
def __mul__(self, other):
"""
Multiplies two DifferentialOperator and returns another
DifferentialOperator instance using the commutation rule
Dx*a = a*Dx + a'
"""
listofself = self.listofpoly
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
listofother = [self.parent.base.from_sympy(sympify(other))]
else:
listofother = [other]
else:
listofother = other.listofpoly
# multiplies a polynomial `b` with a list of polynomials
def _mul_dmp_diffop(b, listofother):
if isinstance(listofother, list):
sol = []
for i in listofother:
sol.append(i * b)
return sol
else:
return [b * listofother]
sol = _mul_dmp_diffop(listofself[0], listofother)
# compute Dx^i * b
def _mul_Dxi_b(b):
sol1 = [self.parent.base.zero]
sol2 = []
if isinstance(b, list):
for i in b:
sol1.append(i)
sol2.append(i.diff())
else:
sol1.append(self.parent.base.from_sympy(b))
sol2.append(self.parent.base.from_sympy(b).diff())
return _add_lists(sol1, sol2)
for i in range(1, len(listofself)):
# find Dx^i * b in ith iteration
listofother = _mul_Dxi_b(listofother)
# solution = solution + listofself[i] * (Dx^i * b)
sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
return DifferentialOperator(sol, self.parent)
def __rmul__(self, other):
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
other = (self.parent.base).from_sympy(sympify(other))
sol = []
for j in self.listofpoly:
sol.append(other * j)
return DifferentialOperator(sol, self.parent)
def __add__(self, other):
if isinstance(other, DifferentialOperator):
sol = _add_lists(self.listofpoly, other.listofpoly)
return DifferentialOperator(sol, self.parent)
else:
list_self = self.listofpoly
if not isinstance(other, self.parent.base.dtype):
list_other = [((self.parent).base).from_sympy(sympify(other))]
else:
list_other = [other]
sol = []
sol.append(list_self[0] + list_other[0])
sol += list_self[1:]
return DifferentialOperator(sol, self.parent)
__radd__ = __add__
def __sub__(self, other):
return self + (-1) * other
def __rsub__(self, other):
return (-1) * self + other
def __neg__(self):
return -1 * self
def __truediv__(self, other):
return self * (S.One / other)
def __pow__(self, n):
if n == 1:
return self
if n == 0:
return DifferentialOperator([self.parent.base.one], self.parent)
# if self is `Dx`
if self.listofpoly == self.parent.derivative_operator.listofpoly:
sol = []
for i in range(0, n):
sol.append(self.parent.base.zero)
sol.append(self.parent.base.one)
return DifferentialOperator(sol, self.parent)
# the general case
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def __str__(self):
listofpoly = self.listofpoly
print_str = ''
for i, j in enumerate(listofpoly):
if j == self.parent.base.zero:
continue
if i == 0:
print_str += '(' + sstr(j) + ')'
continue
if print_str:
print_str += ' + '
if i == 1:
print_str += '(' + sstr(j) + ')*%s' %(self.parent.gen_symbol)
continue
print_str += '(' + sstr(j) + ')' + '*%s**' %(self.parent.gen_symbol) + sstr(i)
return print_str
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, DifferentialOperator):
if self.listofpoly == other.listofpoly and self.parent == other.parent:
return True
else:
return False
else:
if self.listofpoly[0] == other:
for i in self.listofpoly[1:]:
if i is not self.parent.base.zero:
return False
return True
else:
return False
def is_singular(self, x0):
"""
Checks if the differential equation is singular at x0.
"""
base = self.parent.base
return x0 in roots(base.to_sympy(self.listofpoly[-1]), self.x)
class HolonomicFunction:
r"""
A Holonomic Function is a solution to a linear homogeneous ordinary
differential equation with polynomial coefficients. This differential
equation can also be represented by an annihilator i.e. a Differential
Operator ``L`` such that :math:`L.f = 0`. For uniqueness of these functions,
initial conditions can also be provided along with the annihilator.
Explanation
===========
Holonomic functions have closure properties and thus forms a ring.
Given two Holonomic Functions f and g, their sum, product,
integral and derivative is also a Holonomic Function.
For ordinary points initial condition should be a vector of values of
the derivatives i.e. :math:`[y(x_0), y'(x_0), y''(x_0) ... ]`.
For regular singular points initial conditions can also be provided in this
format:
:math:`{s0: [C_0, C_1, ...], s1: [C^1_0, C^1_1, ...], ...}`
where s0, s1, ... are the roots of indicial equation and vectors
:math:`[C_0, C_1, ...], [C^0_0, C^0_1, ...], ...` are the corresponding initial
terms of the associated power series. See Examples below.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> p = HolonomicFunction(Dx - 1, x, 0, [1]) # e^x
>>> q = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]) # sin(x)
>>> p + q # annihilator of e^x + sin(x)
HolonomicFunction((-1) + (1)*Dx + (-1)*Dx**2 + (1)*Dx**3, x, 0, [1, 2, 1])
>>> p * q # annihilator of e^x * sin(x)
HolonomicFunction((2) + (-2)*Dx + (1)*Dx**2, x, 0, [0, 1])
An example of initial conditions for regular singular points,
the indicial equation has only one root `1/2`.
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]})
HolonomicFunction((-1/2) + (x)*Dx, x, 0, {1/2: [1]})
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_expr()
sqrt(x)
To plot a Holonomic Function, one can use `.evalf()` for numerical
computation. Here's an example on `sin(x)**2/x` using numpy and matplotlib.
>>> import sympy.holonomic # doctest: +SKIP
>>> from sympy import var, sin # doctest: +SKIP
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> import numpy as np # doctest: +SKIP
>>> var("x") # doctest: +SKIP
>>> r = np.linspace(1, 5, 100) # doctest: +SKIP
>>> y = sympy.holonomic.expr_to_holonomic(sin(x)**2/x, x0=1).evalf(r) # doctest: +SKIP
>>> plt.plot(r, y, label="holonomic function") # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
_op_priority = 20
def __init__(self, annihilator, x, x0=0, y0=None):
"""
Parameters
==========
annihilator:
Annihilator of the Holonomic Function, represented by a
`DifferentialOperator` object.
x:
Variable of the function.
x0:
The point at which initial conditions are stored.
Generally an integer.
y0:
The initial condition. The proper format for the initial condition
is described in class docstring. To make the function unique,
length of the vector `y0` should be equal to or greater than the
order of differential equation.
"""
# initial condition
self.y0 = y0
# the point for initial conditions, default is zero.
self.x0 = x0
# differential operator L such that L.f = 0
self.annihilator = annihilator
self.x = x
def __str__(self):
if self._have_init_cond():
str_sol = 'HolonomicFunction(%s, %s, %s, %s)' % (str(self.annihilator),\
sstr(self.x), sstr(self.x0), sstr(self.y0))
else:
str_sol = 'HolonomicFunction(%s, %s)' % (str(self.annihilator),\
sstr(self.x))
return str_sol
__repr__ = __str__
def unify(self, other):
"""
Unifies the base polynomial ring of a given two Holonomic
Functions.
"""
R1 = self.annihilator.parent.base
R2 = other.annihilator.parent.base
dom1 = R1.dom
dom2 = R2.dom
if R1 == R2:
return (self, other)
R = (dom1.unify(dom2)).old_poly_ring(self.x)
newparent, _ = DifferentialOperators(R, str(self.annihilator.parent.gen_symbol))
sol1 = [R1.to_sympy(i) for i in self.annihilator.listofpoly]
sol2 = [R2.to_sympy(i) for i in other.annihilator.listofpoly]
sol1 = DifferentialOperator(sol1, newparent)
sol2 = DifferentialOperator(sol2, newparent)
sol1 = HolonomicFunction(sol1, self.x, self.x0, self.y0)
sol2 = HolonomicFunction(sol2, other.x, other.x0, other.y0)
return (sol1, sol2)
def is_singularics(self):
"""
Returns True if the function have singular initial condition
in the dictionary format.
Returns False if the function have ordinary initial condition
in the list format.
Returns None for all other cases.
"""
if isinstance(self.y0, dict):
return True
elif isinstance(self.y0, list):
return False
def _have_init_cond(self):
"""
Checks if the function have initial condition.
"""
return bool(self.y0)
def _singularics_to_ord(self):
"""
Converts a singular initial condition to ordinary if possible.
"""
a = list(self.y0)[0]
b = self.y0[a]
if len(self.y0) == 1 and a == int(a) and a > 0:
y0 = []
a = int(a)
for i in range(a):
y0.append(S.Zero)
y0 += [j * factorial(a + i) for i, j in enumerate(b)]
return HolonomicFunction(self.annihilator, self.x, self.x0, y0)
def __add__(self, other):
# if the ground domains are different
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a + b
deg1 = self.annihilator.order
deg2 = other.annihilator.order
dim = max(deg1, deg2)
R = self.annihilator.parent.base
K = R.get_field()
rowsself = [self.annihilator]
rowsother = [other.annihilator]
gen = self.annihilator.parent.derivative_operator
# constructing annihilators up to order dim
for i in range(dim - deg1):
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
for i in range(dim - deg2):
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
# constructing the matrix of the ansatz
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(K.zero)
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
# solving the linear system using gauss jordan solver
r = DomainMatrix(r, (len(row), dim+1), K).transpose()
homosys = DomainMatrix.zeros((dim+1, 1), K)
sol = _find_nonzero_solution(r, homosys)
# if a solution is not obtained then increasing the order by 1 in each
# iteration
while sol.is_zero_matrix:
dim += 1
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(K.zero)
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
# solving the linear system using gauss jordan solver
r = DomainMatrix(r, (len(row), dim+1), K).transpose()
homosys = DomainMatrix.zeros((dim+1, 1), K)
sol = _find_nonzero_solution(r, homosys)
# taking only the coefficients needed to multiply with `self`
# can be also be done the other way by taking R.H.S and multiplying with
# `other`
sol = sol.flat()[:dim + 1 - deg1]
sol1 = _normalize(sol, self.annihilator.parent)
# annihilator of the solution
sol = sol1 * (self.annihilator)
sol = _normalize(sol.listofpoly, self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol, self.x)
# both the functions have ordinary initial conditions
if self.is_singularics() == False and other.is_singularics() == False:
# directly add the corresponding value
if self.x0 == other.x0:
# try to extended the initial conditions
# using the annihilator
y1 = _extend_y0(self, sol.order)
y2 = _extend_y0(other, sol.order)
y0 = [a + b for a, b in zip(y1, y2)]
return HolonomicFunction(sol, self.x, self.x0, y0)
else:
# change the intiial conditions to a same point
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self + other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) + other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self + other.change_ics(self.x0)
else:
return self.change_ics(other.x0) + other
if self.x0 != other.x0:
return HolonomicFunction(sol, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
# convert the ordinary initial condition to singular.
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S.Zero: _y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S.Zero: _y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
# computing singular initial condition for the result
# taking union of the series terms of both functions
y0 = {}
for i in y1:
# add corresponding initial terms if the power
# on `x` is same
if i in y2:
y0[i] = [a + b for a, b in zip(y1[i], y2[i])]
else:
y0[i] = y1[i]
for i in y2:
if not i in y1:
y0[i] = y2[i]
return HolonomicFunction(sol, self.x, self.x0, y0)
def integrate(self, limits, initcond=False):
"""
Integrates the given holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).integrate((x, 0, x)) # e^x - 1
HolonomicFunction((-1)*Dx + (1)*Dx**2, x, 0, [0, 1])
>>> HolonomicFunction(Dx**2 + 1, x, 0, [1, 0]).integrate((x, 0, x))
HolonomicFunction((1)*Dx + (1)*Dx**3, x, 0, [0, 1, 0])
"""
# to get the annihilator, just multiply by Dx from right
D = self.annihilator.parent.derivative_operator
# if the function have initial conditions of the series format
if self.is_singularics() == True:
r = self._singularics_to_ord()
if r:
return r.integrate(limits, initcond=initcond)
# computing singular initial condition for the function
# produced after integration.
y0 = {}
for i in self.y0:
c = self.y0[i]
c2 = []
for j in range(len(c)):
if c[j] == 0:
c2.append(S.Zero)
# if power on `x` is -1, the integration becomes log(x)
# TODO: Implement this case
elif i + j + 1 == 0:
raise NotImplementedError("logarithmic terms in the series are not supported")
else:
c2.append(c[j] / S(i + j + 1))
y0[i + 1] = c2
if hasattr(limits, "__iter__"):
raise NotImplementedError("Definite integration for singular initial conditions")
return HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
# if no initial conditions are available for the function
if not self._have_init_cond():
if initcond:
return HolonomicFunction(self.annihilator * D, self.x, self.x0, [S.Zero])
return HolonomicFunction(self.annihilator * D, self.x)
# definite integral
# initial conditions for the answer will be stored at point `a`,
# where `a` is the lower limit of the integrand
if hasattr(limits, "__iter__"):
if len(limits) == 3 and limits[0] == self.x:
x0 = self.x0
a = limits[1]
b = limits[2]
definite = True
else:
definite = False
y0 = [S.Zero]
y0 += self.y0
indefinite_integral = HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
if not definite:
return indefinite_integral
# use evalf to get the values at `a`
if x0 != a:
try:
indefinite_expr = indefinite_integral.to_expr()
except (NotHyperSeriesError, NotPowerSeriesError):
indefinite_expr = None
if indefinite_expr:
lower = indefinite_expr.subs(self.x, a)
if isinstance(lower, NaN):
lower = indefinite_expr.limit(self.x, a)
else:
lower = indefinite_integral.evalf(a)
if b == self.x:
y0[0] = y0[0] - lower
return HolonomicFunction(self.annihilator * D, self.x, x0, y0)
elif S(b).is_Number:
if indefinite_expr:
upper = indefinite_expr.subs(self.x, b)
if isinstance(upper, NaN):
upper = indefinite_expr.limit(self.x, b)
else:
upper = indefinite_integral.evalf(b)
return upper - lower
# if the upper limit is `x`, the answer will be a function
if b == self.x:
return HolonomicFunction(self.annihilator * D, self.x, a, y0)
# if the upper limits is a Number, a numerical value will be returned
elif S(b).is_Number:
try:
s = HolonomicFunction(self.annihilator * D, self.x, a,\
y0).to_expr()
indefinite = s.subs(self.x, b)
if not isinstance(indefinite, NaN):
return indefinite
else:
return s.limit(self.x, b)
except (NotHyperSeriesError, NotPowerSeriesError):
return HolonomicFunction(self.annihilator * D, self.x, a, y0).evalf(b)
return HolonomicFunction(self.annihilator * D, self.x)
def diff(self, *args, **kwargs):
r"""
Differentiation of the given Holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).diff().to_expr()
cos(x)
>>> HolonomicFunction(Dx - 2, x, 0, [1]).diff().to_expr()
2*exp(2*x)
See Also
========
.integrate()
"""
kwargs.setdefault('evaluate', True)
if args:
if args[0] != self.x:
return S.Zero
elif len(args) == 2:
sol = self
for i in range(args[1]):
sol = sol.diff(args[0])
return sol
ann = self.annihilator
# if the function is constant.
if ann.listofpoly[0] == ann.parent.base.zero and ann.order == 1:
return S.Zero
# if the coefficient of y in the differential equation is zero.
# a shifting is done to compute the answer in this case.
elif ann.listofpoly[0] == ann.parent.base.zero:
sol = DifferentialOperator(ann.listofpoly[1:], ann.parent)
if self._have_init_cond():
# if ordinary initial condition
if self.is_singularics() == False:
return HolonomicFunction(sol, self.x, self.x0, self.y0[1:])
# TODO: support for singular initial condition
return HolonomicFunction(sol, self.x)
else:
return HolonomicFunction(sol, self.x)
# the general algorithm
R = ann.parent.base
K = R.get_field()
seq_dmf = [K.new(i.rep) for i in ann.listofpoly]
# -y = a1*y'/a0 + a2*y''/a0 ... + an*y^n/a0
rhs = [i / seq_dmf[0] for i in seq_dmf[1:]]
rhs.insert(0, K.zero)
# differentiate both lhs and rhs
sol = _derivate_diff_eq(rhs)
# add the term y' in lhs to rhs
sol = _add_lists(sol, [K.zero, K.one])
sol = _normalize(sol[1:], self.annihilator.parent, negative=False)
if not self._have_init_cond() or self.is_singularics() == True:
return HolonomicFunction(sol, self.x)
y0 = _extend_y0(self, sol.order + 1)[1:]
return HolonomicFunction(sol, self.x, self.x0, y0)
def __eq__(self, other):
if self.annihilator == other.annihilator:
if self.x == other.x:
if self._have_init_cond() and other._have_init_cond():
if self.x0 == other.x0 and self.y0 == other.y0:
return True
else:
return False
else:
return True
else:
return False
else:
return False
def __mul__(self, other):
ann_self = self.annihilator
if not isinstance(other, HolonomicFunction):
other = sympify(other)
if other.has(self.x):
raise NotImplementedError(" Can't multiply a HolonomicFunction and expressions/functions.")
if not self._have_init_cond():
return self
else:
y0 = _extend_y0(self, ann_self.order)
y1 = []
for j in y0:
y1.append((Poly.new(j, self.x) * other).rep)
return HolonomicFunction(ann_self, self.x, self.x0, y1)
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a * b
ann_other = other.annihilator
list_self = []
list_other = []
a = ann_self.order
b = ann_other.order
R = ann_self.parent.base
K = R.get_field()
for j in ann_self.listofpoly:
list_self.append(K.new(j.rep))
for j in ann_other.listofpoly:
list_other.append(K.new(j.rep))
# will be used to reduce the degree
self_red = [-list_self[i] / list_self[a] for i in range(a)]
other_red = [-list_other[i] / list_other[b] for i in range(b)]
# coeff_mull[i][j] is the coefficient of Dx^i(f).Dx^j(g)
coeff_mul = [[K.zero for i in range(b + 1)] for j in range(a + 1)]
coeff_mul[0][0] = K.one
# making the ansatz
lin_sys_elements = [[coeff_mul[i][j] for i in range(a) for j in range(b)]]
lin_sys = DomainMatrix(lin_sys_elements, (1, a*b), K).transpose()
homo_sys = DomainMatrix.zeros((a*b, 1), K)
sol = _find_nonzero_solution(lin_sys, homo_sys)
# until a non trivial solution is found
while sol.is_zero_matrix:
# updating the coefficients Dx^i(f).Dx^j(g) for next degree
for i in range(a - 1, -1, -1):
for j in range(b - 1, -1, -1):
coeff_mul[i][j + 1] += coeff_mul[i][j]
coeff_mul[i + 1][j] += coeff_mul[i][j]
if isinstance(coeff_mul[i][j], K.dtype):
coeff_mul[i][j] = DMFdiff(coeff_mul[i][j])
else:
coeff_mul[i][j] = coeff_mul[i][j].diff(self.x)
# reduce the terms to lower power using annihilators of f, g
for i in range(a + 1):
if not coeff_mul[i][b].is_zero:
for j in range(b):
coeff_mul[i][j] += other_red[j] * \
coeff_mul[i][b]
coeff_mul[i][b] = K.zero
# not d2 + 1, as that is already covered in previous loop
for j in range(b):
if not coeff_mul[a][j] == 0:
for i in range(a):
coeff_mul[i][j] += self_red[i] * \
coeff_mul[a][j]
coeff_mul[a][j] = K.zero
lin_sys_elements.append([coeff_mul[i][j] for i in range(a) for j in range(b)])
lin_sys = DomainMatrix(lin_sys_elements, (len(lin_sys_elements), a*b), K).transpose()
sol = _find_nonzero_solution(lin_sys, homo_sys)
sol_ann = _normalize(sol.flat(), self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol_ann, self.x)
if self.is_singularics() == False and other.is_singularics() == False:
# if both the conditions are at same point
if self.x0 == other.x0:
# try to find more initial conditions
y0_self = _extend_y0(self, sol_ann.order)
y0_other = _extend_y0(other, sol_ann.order)
# h(x0) = f(x0) * g(x0)
y0 = [y0_self[0] * y0_other[0]]
# coefficient of Dx^j(f)*Dx^i(g) in Dx^i(fg)
for i in range(1, min(len(y0_self), len(y0_other))):
coeff = [[0 for i in range(i + 1)] for j in range(i + 1)]
for j in range(i + 1):
for k in range(i + 1):
if j + k == i:
coeff[j][k] = binomial(i, j)
sol = 0
for j in range(i + 1):
for k in range(i + 1):
sol += coeff[j][k]* y0_self[j] * y0_other[k]
y0.append(sol)
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
# if the points are different, consider one
else:
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self * other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) * other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self * other.change_ics(self.x0)
else:
return self.change_ics(other.x0) * other
if self.x0 != other.x0:
return HolonomicFunction(sol_ann, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S.Zero: _y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S.Zero: _y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
y0 = {}
# multiply every possible pair of the series terms
for i in y1:
for j in y2:
k = min(len(y1[i]), len(y2[j]))
c = []
for a in range(k):
s = S.Zero
for b in range(a + 1):
s += y1[i][b] * y2[j][a - b]
c.append(s)
if not i + j in y0:
y0[i + j] = c
else:
y0[i + j] = [a + b for a, b in zip(c, y0[i + j])]
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
__rmul__ = __mul__
def __sub__(self, other):
return self + other * -1
def __rsub__(self, other):
return self * -1 + other
def __neg__(self):
return -1 * self
def __truediv__(self, other):
return self * (S.One / other)
def __pow__(self, n):
if self.annihilator.order <= 1:
ann = self.annihilator
parent = ann.parent
if self.y0 is None:
y0 = None
else:
y0 = [list(self.y0)[0] ** n]
p0 = ann.listofpoly[0]
p1 = ann.listofpoly[1]
p0 = (Poly.new(p0, self.x) * n).rep
sol = [parent.base.to_sympy(i) for i in [p0, p1]]
dd = DifferentialOperator(sol, parent)
return HolonomicFunction(dd, self.x, self.x0, y0)
if n < 0:
raise NotHolonomicError("Negative Power on a Holonomic Function")
if n == 0:
Dx = self.annihilator.parent.derivative_operator
return HolonomicFunction(Dx, self.x, S.Zero, [S.One])
if n == 1:
return self
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def degree(self):
"""
Returns the highest power of `x` in the annihilator.
"""
sol = [i.degree() for i in self.annihilator.listofpoly]
return max(sol)
def composition(self, expr, *args, **kwargs):
"""
Returns function after composition of a holonomic
function with an algebraic function. The method cannot compute
initial conditions for the result by itself, so they can be also be
provided.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x).composition(x**2, 0, [1]) # e^(x**2)
HolonomicFunction((-2*x) + (1)*Dx, x, 0, [1])
>>> HolonomicFunction(Dx**2 + 1, x).composition(x**2 - 1, 1, [1, 0])
HolonomicFunction((4*x**3) + (-1)*Dx + (x)*Dx**2, x, 1, [1, 0])
See Also
========
from_hyper()
"""
R = self.annihilator.parent
a = self.annihilator.order
diff = expr.diff(self.x)
listofpoly = self.annihilator.listofpoly
for i, j in enumerate(listofpoly):
if isinstance(j, self.annihilator.parent.base.dtype):
listofpoly[i] = self.annihilator.parent.base.to_sympy(j)
r = listofpoly[a].subs({self.x:expr})
subs = [-listofpoly[i].subs({self.x:expr}) / r for i in range (a)]
coeffs = [S.Zero for i in range(a)] # coeffs[i] == coeff of (D^i f)(a) in D^k (f(a))
coeffs[0] = S.One
system = [coeffs]
homogeneous = Matrix([[S.Zero for i in range(a)]]).transpose()
while True:
coeffs_next = [p.diff(self.x) for p in coeffs]
for i in range(a - 1):
coeffs_next[i + 1] += (coeffs[i] * diff)
for i in range(a):
coeffs_next[i] += (coeffs[-1] * subs[i] * diff)
coeffs = coeffs_next
# check for linear relations
system.append(coeffs)
sol, taus = (Matrix(system).transpose()
).gauss_jordan_solve(homogeneous)
if sol.is_zero_matrix is not True:
break
tau = list(taus)[0]
sol = sol.subs(tau, 1)
sol = _normalize(sol[0:], R, negative=False)
# if initial conditions are given for the resulting function
if args:
return HolonomicFunction(sol, self.x, args[0], args[1])
return HolonomicFunction(sol, self.x)
def to_sequence(self, lb=True):
r"""
Finds recurrence relation for the coefficients in the series expansion
of the function about :math:`x_0`, where :math:`x_0` is the point at
which the initial condition is stored.
Explanation
===========
If the point :math:`x_0` is ordinary, solution of the form :math:`[(R, n_0)]`
is returned. Where :math:`R` is the recurrence relation and :math:`n_0` is the
smallest ``n`` for which the recurrence holds true.
If the point :math:`x_0` is regular singular, a list of solutions in
the format :math:`(R, p, n_0)` is returned, i.e. `[(R, p, n_0), ... ]`.
Each tuple in this vector represents a recurrence relation :math:`R`
associated with a root of the indicial equation ``p``. Conditions of
a different format can also be provided in this case, see the
docstring of HolonomicFunction class.
If it's not possible to numerically compute a initial condition,
it is returned as a symbol :math:`C_j`, denoting the coefficient of
:math:`(x - x_0)^j` in the power series about :math:`x_0`.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_sequence()
[(HolonomicSequence((-1) + (n + 1)Sn, n), u(0) = 1, 0)]
>>> HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1]).to_sequence()
[(HolonomicSequence((n**2) + (n**2 + n)Sn, n), u(0) = 0, u(1) = 1, u(2) = -1/2, 2)]
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_sequence()
[(HolonomicSequence((n), n), u(0) = 1, 1/2, 1)]
See Also
========
HolonomicFunction.series()
References
==========
.. [1] https://hal.inria.fr/inria-00070025/document
.. [2] http://www.risc.jku.at/publications/download/risc_2244/DIPLFORM.pdf
"""
if self.x0 != 0:
return self.shift_x(self.x0).to_sequence()
# check whether a power series exists if the point is singular
if self.annihilator.is_singular(self.x0):
return self._frobenius(lb=lb)
dict1 = {}
n = Symbol('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
# substituting each term of the form `x^k Dx^j` in the
# annihilator, according to the formula below:
# x^k Dx^j = Sum(rf(n + 1 - k, j) * a(n + j - k) * x^n, (n, k, oo))
# for explanation see [2].
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k) in dict1:
dict1[(i - k, k)] += (dom.to_sympy(coeff) * rf(n - k + 1, i))
else:
dict1[(i - k, k)] = (dom.to_sympy(coeff) * rf(n - k + 1, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = self.degree()
# the recurrence relation holds for all values of
# n greater than smallest_n, i.e. n >= smallest_n
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
# an appropriate shift of the recurrence
for j in range(lower, upper + 1):
if j in keylist:
temp = S.Zero
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S.Zero)
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
y0 = _extend_y0(self, order)
u0 = []
# u(n) = y^n(0)/factorial(n)
for i, j in enumerate(y0):
u0.append(j / factorial(i))
# if sufficient conditions can't be computed then
# try to use the series method i.e.
# equate the coefficients of x^k in the equation formed by
# substituting the series in differential equation, to zero.
if len(u0) < order:
for i in range(degree):
eq = S.Zero
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S.Zero
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
dummys[i + j[0]] = Symbol('C_%s' %(i + j[0]))
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
def _frobenius(self, lb=True):
# compute the roots of indicial equation
indicialroots = self._indicial()
reals = []
compl = []
for i in ordered(indicialroots.keys()):
if i.is_real:
reals.extend([i] * indicialroots[i])
else:
a, b = i.as_real_imag()
compl.extend([(i, a, b)] * indicialroots[i])
# sort the roots for a fixed ordering of solution
compl.sort(key=lambda x : x[1])
compl.sort(key=lambda x : x[2])
reals.sort()
# grouping the roots, roots differ by an integer are put in the same group.
grp = []
for i in reals:
intdiff = False
if len(grp) == 0:
grp.append([i])
continue
for j in grp:
if int(j[0] - i) == j[0] - i:
j.append(i)
intdiff = True
break
if not intdiff:
grp.append([i])
# True if none of the roots differ by an integer i.e.
# each element in group have only one member
independent = True if all(len(i) == 1 for i in grp) else False
allpos = all(i >= 0 for i in reals)
allint = all(int(i) == i for i in reals)
# if initial conditions are provided
# then use them.
if self.is_singularics() == True:
rootstoconsider = []
for i in ordered(self.y0.keys()):
for j in ordered(indicialroots.keys()):
if j == i:
rootstoconsider.append(i)
elif allpos and allint:
rootstoconsider = [min(reals)]
elif independent:
rootstoconsider = [i[0] for i in grp] + [j[0] for j in compl]
elif not allint:
rootstoconsider = []
for i in reals:
if not int(i) == i:
rootstoconsider.append(i)
elif not allpos:
if not self._have_init_cond() or S(self.y0[0]).is_finite == False:
rootstoconsider = [min(reals)]
else:
posroots = []
for i in reals:
if i >= 0:
posroots.append(i)
rootstoconsider = [min(posroots)]
n = Symbol('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
finalsol = []
char = ord('C')
for p in rootstoconsider:
dict1 = {}
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k - i) in dict1:
dict1[(i - k, k - i)] += (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
else:
dict1[(i - k, k - i)] = (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = max([i[1] for i in dict1])
degree2 = min([i[1] for i in dict1])
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
for j in range(lower, upper + 1):
if j in keylist:
temp = S.Zero
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S.Zero)
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
u0 = []
if self.is_singularics() == True:
u0 = self.y0[p]
elif self.is_singularics() == False and p >= 0 and int(p) == p and len(rootstoconsider) == 1:
y0 = _extend_y0(self, order + int(p))
# u(n) = y^n(0)/factorial(n)
if len(y0) > int(p):
for i in range(int(p), len(y0)):
u0.append(y0[i] / factorial(i))
if len(u0) < order:
for i in range(degree2, degree):
eq = S.Zero
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S.Zero
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
letter = chr(char) + '_%s' %(i + j[0])
dummys[i + j[0]] = Symbol(letter)
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
continue
else:
finalsol.append((HolonomicSequence(sol, u0), p))
continue
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
else:
finalsol.append((HolonomicSequence(sol, u0), p))
char += 1
return finalsol
def series(self, n=6, coefficient=False, order=True, _recur=None):
r"""
Finds the power series expansion of given holonomic function about :math:`x_0`.
Explanation
===========
A list of series might be returned if :math:`x_0` is a regular point with
multiple roots of the indicial equation.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).series() # e^x
1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).series(n=8) # sin(x)
x - x**3/6 + x**5/120 - x**7/5040 + O(x**8)
See Also
========
HolonomicFunction.to_sequence()
"""
if _recur is None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = []
for i in recurrence:
sol.append(self.series(_recur=i))
return sol
n = n - int(constantpower)
l = len(recurrence.u0) - 1
k = recurrence.recurrence.order
x = self.x
x0 = self.x0
seq_dmp = recurrence.recurrence.listofpoly
R = recurrence.recurrence.parent.base
K = R.get_field()
seq = []
for i, j in enumerate(seq_dmp):
seq.append(K.new(j.rep))
sub = [-seq[i] / seq[k] for i in range(k)]
sol = [i for i in recurrence.u0]
if l + 1 >= n:
pass
else:
# use the initial conditions to find the next term
for i in range(l + 1 - k, n - k):
coeff = S.Zero
for j in range(k):
if i + j >= 0:
coeff += DMFsubs(sub[j], i) * sol[i + j]
sol.append(coeff)
if coefficient:
return sol
ser = S.Zero
for i, j in enumerate(sol):
ser += x**(i + constantpower) * j
if order:
ser += Order(x**(n + int(constantpower)), x)
if x0 != 0:
return ser.subs(x, x - x0)
return ser
def _indicial(self):
"""
Computes roots of the Indicial equation.
"""
if self.x0 != 0:
return self.shift_x(self.x0)._indicial()
list_coeff = self.annihilator.listofpoly
R = self.annihilator.parent.base
x = self.x
s = R.zero
y = R.one
def _pole_degree(poly):
root_all = roots(R.to_sympy(poly), x, filter='Z')
if 0 in root_all.keys():
return root_all[0]
else:
return 0
degree = [j.degree() for j in list_coeff]
degree = max(degree)
inf = 10 * (max(1, degree) + max(1, self.annihilator.order))
deg = lambda q: inf if q.is_zero else _pole_degree(q)
b = deg(list_coeff[0])
for j in range(1, len(list_coeff)):
b = min(b, deg(list_coeff[j]) - j)
for i, j in enumerate(list_coeff):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
if - i - b <= 0 and degree - i - b >= 0:
s = s + listofdmp[degree - i - b] * y
y *= x - i
return roots(R.to_sympy(s), x)
def evalf(self, points, method='RK4', h=0.05, derivatives=False):
r"""
Finds numerical value of a holonomic function using numerical methods.
(RK4 by default). A set of points (real or complex) must be provided
which will be the path for the numerical integration.
Explanation
===========
The path should be given as a list :math:`[x_1, x_2, \dots x_n]`. The numerical
values will be computed at each point in this order
:math:`x_1 \rightarrow x_2 \rightarrow x_3 \dots \rightarrow x_n`.
Returns values of the function at :math:`x_1, x_2, \dots x_n` in a list.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
A straight line on the real axis from (0 to 1)
>>> r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
Runge-Kutta 4th order on e^x from 0.1 to 1.
Exact solution at 1 is 2.71828182845905
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r)
[1.10517083333333, 1.22140257085069, 1.34985849706254, 1.49182424008069,
1.64872063859684, 1.82211796209193, 2.01375162659678, 2.22553956329232,
2.45960141378007, 2.71827974413517]
Euler's method for the same
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r, method='Euler')
[1.1, 1.21, 1.331, 1.4641, 1.61051, 1.771561, 1.9487171, 2.14358881,
2.357947691, 2.5937424601]
One can also observe that the value obtained using Runge-Kutta 4th order
is much more accurate than Euler's method.
"""
from sympy.holonomic.numerical import _evalf
lp = False
# if a point `b` is given instead of a mesh
if not hasattr(points, "__iter__"):
lp = True
b = S(points)
if self.x0 == b:
return _evalf(self, [b], method=method, derivatives=derivatives)[-1]
if not b.is_Number:
raise NotImplementedError
a = self.x0
if a > b:
h = -h
n = int((b - a) / h)
points = [a + h]
for i in range(n - 1):
points.append(points[-1] + h)
for i in roots(self.annihilator.parent.base.to_sympy(self.annihilator.listofpoly[-1]), self.x):
if i == self.x0 or i in points:
raise SingularityError(self, i)
if lp:
return _evalf(self, points, method=method, derivatives=derivatives)[-1]
return _evalf(self, points, method=method, derivatives=derivatives)
def change_x(self, z):
"""
Changes only the variable of Holonomic Function, for internal
purposes. For composition use HolonomicFunction.composition()
"""
dom = self.annihilator.parent.base.dom
R = dom.old_poly_ring(z)
parent, _ = DifferentialOperators(R, 'Dx')
sol = []
for j in self.annihilator.listofpoly:
sol.append(R(j.rep))
sol = DifferentialOperator(sol, parent)
return HolonomicFunction(sol, z, self.x0, self.y0)
def shift_x(self, a):
"""
Substitute `x + a` for `x`.
"""
x = self.x
listaftershift = self.annihilator.listofpoly
base = self.annihilator.parent.base
sol = [base.from_sympy(base.to_sympy(i).subs(x, x + a)) for i in listaftershift]
sol = DifferentialOperator(sol, self.annihilator.parent)
x0 = self.x0 - a
if not self._have_init_cond():
return HolonomicFunction(sol, x)
return HolonomicFunction(sol, x, x0, self.y0)
def to_hyper(self, as_list=False, _recur=None):
r"""
Returns a hypergeometric function (or linear combination of them)
representing the given holonomic function.
Explanation
===========
Returns an answer of the form:
`a_1 \cdot x^{b_1} \cdot{hyper()} + a_2 \cdot x^{b_2} \cdot{hyper()} \dots`
This is very useful as one can now use ``hyperexpand`` to find the
symbolic expressions/functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> # sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).to_hyper()
x*hyper((), (3/2,), -x**2/4)
>>> # exp(x)
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_hyper()
hyper((), (), x)
See Also
========
from_hyper, from_meijerg
"""
if _recur is None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
smallest_n = recurrence[1]
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
smallest_n = recurrence[2]
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
smallest_n = recurrence[0][1]
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
smallest_n = recurrence[0][2]
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = self.to_hyper(as_list=as_list, _recur=recurrence[0])
for i in recurrence[1:]:
sol += self.to_hyper(as_list=as_list, _recur=i)
return sol
u0 = recurrence.u0
r = recurrence.recurrence
x = self.x
x0 = self.x0
# order of the recurrence relation
m = r.order
# when no recurrence exists, and the power series have finite terms
if m == 0:
nonzeroterms = roots(r.parent.base.to_sympy(r.listofpoly[0]), recurrence.n, filter='R')
sol = S.Zero
for j, i in enumerate(nonzeroterms):
if i < 0 or int(i) != i:
continue
i = int(i)
if i < len(u0):
if isinstance(u0[i], (PolyElement, FracElement)):
u0[i] = u0[i].as_expr()
sol += u0[i] * x**i
else:
sol += Symbol('C_%s' %j) * x**i
if isinstance(sol, (PolyElement, FracElement)):
sol = sol.as_expr() * x**constantpower
else:
sol = sol * x**constantpower
if as_list:
if x0 != 0:
return [(sol.subs(x, x - x0), )]
return [(sol, )]
if x0 != 0:
return sol.subs(x, x - x0)
return sol
if smallest_n + m > len(u0):
raise NotImplementedError("Can't compute sufficient Initial Conditions")
# check if the recurrence represents a hypergeometric series
is_hyper = True
for i in range(1, len(r.listofpoly)-1):
if r.listofpoly[i] != r.parent.base.zero:
is_hyper = False
break
if not is_hyper:
raise NotHyperSeriesError(self, self.x0)
a = r.listofpoly[0]
b = r.listofpoly[-1]
# the constant multiple of argument of hypergeometric function
if isinstance(a.rep[0], (PolyElement, FracElement)):
c = - (S(a.rep[0].as_expr()) * m**(a.degree())) / (S(b.rep[0].as_expr()) * m**(b.degree()))
else:
c = - (S(a.rep[0]) * m**(a.degree())) / (S(b.rep[0]) * m**(b.degree()))
sol = 0
arg1 = roots(r.parent.base.to_sympy(a), recurrence.n)
arg2 = roots(r.parent.base.to_sympy(b), recurrence.n)
# iterate through the initial conditions to find
# the hypergeometric representation of the given
# function.
# The answer will be a linear combination
# of different hypergeometric series which satisfies
# the recurrence.
if as_list:
listofsol = []
for i in range(smallest_n + m):
# if the recurrence relation doesn't hold for `n = i`,
# then a Hypergeometric representation doesn't exist.
# add the algebraic term a * x**i to the solution,
# where a is u0[i]
if i < smallest_n:
if as_list:
listofsol.append(((S(u0[i]) * x**(i+constantpower)).subs(x, x-x0), ))
else:
sol += S(u0[i]) * x**i
continue
# if the coefficient u0[i] is zero, then the
# independent hypergeomtric series starting with
# x**i is not a part of the answer.
if S(u0[i]) == 0:
continue
ap = []
bq = []
# substitute m * n + i for n
for k in ordered(arg1.keys()):
ap.extend([nsimplify((i - k) / m)] * arg1[k])
for k in ordered(arg2.keys()):
bq.extend([nsimplify((i - k) / m)] * arg2[k])
# convention of (k + 1) in the denominator
if 1 in bq:
bq.remove(1)
else:
ap.append(1)
if as_list:
listofsol.append(((S(u0[i])*x**(i+constantpower)).subs(x, x-x0), (hyper(ap, bq, c*x**m)).subs(x, x-x0)))
else:
sol += S(u0[i]) * hyper(ap, bq, c * x**m) * x**i
if as_list:
return listofsol
sol = sol * x**constantpower
if x0 != 0:
return sol.subs(x, x - x0)
return sol
def to_expr(self):
"""
Converts a Holonomic Function back to elementary functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(x**2*Dx**2 + x*Dx + (x**2 - 1), x, 0, [0, S(1)/2]).to_expr()
besselj(1, x)
>>> HolonomicFunction((1 + x)*Dx**3 + Dx**2, x, 0, [1, 1, 1]).to_expr()
x*log(x + 1) + log(x + 1) + 1
"""
return hyperexpand(self.to_hyper()).simplify()
def change_ics(self, b, lenics=None):
"""
Changes the point `x0` to ``b`` for initial conditions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import symbols, sin, exp
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x)).change_ics(1)
HolonomicFunction((1) + (1)*Dx**2, x, 1, [sin(1), cos(1)])
>>> expr_to_holonomic(exp(x)).change_ics(2)
HolonomicFunction((-1) + (1)*Dx, x, 2, [exp(2)])
"""
symbolic = True
if lenics is None and len(self.y0) > self.annihilator.order:
lenics = len(self.y0)
dom = self.annihilator.parent.base.domain
try:
sol = expr_to_holonomic(self.to_expr(), x=self.x, x0=b, lenics=lenics, domain=dom)
except (NotPowerSeriesError, NotHyperSeriesError):
symbolic = False
if symbolic and sol.x0 == b:
return sol
y0 = self.evalf(b, derivatives=True)
return HolonomicFunction(self.annihilator, self.x, b, y0)
def to_meijerg(self):
"""
Returns a linear combination of Meijer G-functions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import sin, cos, hyperexpand, log, symbols
>>> x = symbols('x')
>>> hyperexpand(expr_to_holonomic(cos(x) + sin(x)).to_meijerg())
sin(x) + cos(x)
>>> hyperexpand(expr_to_holonomic(log(x)).to_meijerg()).simplify()
log(x)
See Also
========
to_hyper()
"""
# convert to hypergeometric first
rep = self.to_hyper(as_list=True)
sol = S.Zero
for i in rep:
if len(i) == 1:
sol += i[0]
elif len(i) == 2:
sol += i[0] * _hyper_to_meijerg(i[1])
return sol
def from_hyper(func, x0=0, evalf=False):
r"""
Converts a hypergeometric function to holonomic.
``func`` is the Hypergeometric Function and ``x0`` is the point at
which initial conditions are required.
Examples
========
>>> from sympy.holonomic.holonomic import from_hyper
>>> from sympy import symbols, hyper, S
>>> x = symbols('x')
>>> from_hyper(hyper([], [S(3)/2], x**2/4))
HolonomicFunction((-x) + (2)*Dx + (x)*Dx**2, x, 1, [sinh(1), -sinh(1) + cosh(1)])
"""
a = func.ap
b = func.bq
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
# generalized hypergeometric differential equation
r1 = 1
for i in range(len(a)):
r1 = r1 * (x * Dx + a[i])
r2 = Dx
for i in range(len(b)):
r2 = r2 * (x * Dx + b[i] - 1)
sol = r1 - r2
simp = hyperexpand(func)
if simp in (Infinity, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
# return None if it is Infinite or NaN
if val.is_finite is False or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# if the function is known symbolically
if not isinstance(simp, hyper):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
# if values don't exist at 0, then try to find initial
# conditions at 1. If it doesn't exist at 1 too then
# try 2 and so on.
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, hyper):
x0 = 1
# use evalf if the function can't be simplified
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
def from_meijerg(func, x0=0, evalf=False, initcond=True, domain=QQ):
"""
Converts a Meijer G-function to Holonomic.
``func`` is the G-Function and ``x0`` is the point at
which initial conditions are required.
Examples
========
>>> from sympy.holonomic.holonomic import from_meijerg
>>> from sympy import symbols, meijerg, S
>>> x = symbols('x')
>>> from_meijerg(meijerg(([], []), ([S(1)/2], [0]), x**2/4))
HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1/sqrt(pi)])
"""
a = func.ap
b = func.bq
n = len(func.an)
m = len(func.bm)
p = len(a)
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(domain.old_poly_ring(x), 'Dx')
# compute the differential equation satisfied by the
# Meijer G-function.
mnp = (-1)**(m + n - p)
r1 = x * mnp
for i in range(len(a)):
r1 *= x * Dx + 1 - a[i]
r2 = 1
for i in range(len(b)):
r2 *= x * Dx - b[i]
sol = r1 - r2
if not initcond:
return HolonomicFunction(sol, x).composition(z)
simp = hyperexpand(func)
if simp in (Infinity, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
if val.is_finite is False or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# computing initial conditions
if not isinstance(simp, meijerg):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, meijerg):
x0 = 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
x_1 = Dummy('x_1')
_lookup_table = None
domain_for_table = None
from sympy.integrals.meijerint import _mytype
def expr_to_holonomic(func, x=None, x0=0, y0=None, lenics=None, domain=None, initcond=True):
"""
Converts a function or an expression to a holonomic function.
Parameters
==========
func:
The expression to be converted.
x:
variable for the function.
x0:
point at which initial condition must be computed.
y0:
One can optionally provide initial condition if the method
isn't able to do it automatically.
lenics:
Number of terms in the initial condition. By default it is
equal to the order of the annihilator.
domain:
Ground domain for the polynomials in ``x`` appearing as coefficients
in the annihilator.
initcond:
Set it false if you do not want the initial conditions to be computed.
Examples
========
>>> from sympy.holonomic.holonomic import expr_to_holonomic
>>> from sympy import sin, exp, symbols
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x))
HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1])
>>> expr_to_holonomic(exp(x))
HolonomicFunction((-1) + (1)*Dx, x, 0, [1])
See Also
========
sympy.integrals.meijerint._rewrite1, _convert_poly_rat_alg, _create_table
"""
func = sympify(func)
syms = func.free_symbols
if not x:
if len(syms) == 1:
x= syms.pop()
else:
raise ValueError("Specify the variable for the function")
elif x in syms:
syms.remove(x)
extra_syms = list(syms)
if domain is None:
if func.has(Float):
domain = RR
else:
domain = QQ
if len(extra_syms) != 0:
domain = domain[extra_syms].get_field()
# try to convert if the function is polynomial or rational
solpoly = _convert_poly_rat_alg(func, x, x0=x0, y0=y0, lenics=lenics, domain=domain, initcond=initcond)
if solpoly:
return solpoly
# create the lookup table
global _lookup_table, domain_for_table
if not _lookup_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
elif domain != domain_for_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
# use the table directly to convert to Holonomic
if func.is_Function:
f = func.subs(x, x_1)
t = _mytype(f, x_1)
if t in _lookup_table:
l = _lookup_table[t]
sol = l[0][1].change_x(x)
else:
sol = _convert_meijerint(func, x, initcond=False, domain=domain)
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
if y0 or not initcond:
sol = sol.composition(func.args[0])
if y0:
sol.y0 = y0
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return sol.composition(func.args[0], x0, _y0)
# iterate through the expression recursively
args = func.args
f = func.func
sol = expr_to_holonomic(args[0], x=x, initcond=False, domain=domain)
if f is Add:
for i in range(1, len(args)):
sol += expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Mul:
for i in range(1, len(args)):
sol *= expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Pow:
sol = sol**args[1]
sol.x0 = x0
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
return sol
if sol.y0:
return sol
if not lenics:
lenics = sol.annihilator.order
if sol.annihilator.is_singular(x0):
r = sol._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S.One:
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol.annihilator, x, x0, y0)
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
## Some helper functions ##
def _normalize(list_of, parent, negative=True):
"""
Normalize a given annihilator
"""
num = []
denom = []
base = parent.base
K = base.get_field()
lcm_denom = base.from_sympy(S.One)
list_of_coeff = []
# convert polynomials to the elements of associated
# fraction field
for i, j in enumerate(list_of):
if isinstance(j, base.dtype):
list_of_coeff.append(K.new(j.rep))
elif not isinstance(j, K.dtype):
list_of_coeff.append(K.from_sympy(sympify(j)))
else:
list_of_coeff.append(j)
# corresponding numerators of the sequence of polynomials
num.append(list_of_coeff[i].numer())
# corresponding denominators
denom.append(list_of_coeff[i].denom())
# lcm of denominators in the coefficients
for i in denom:
lcm_denom = i.lcm(lcm_denom)
if negative:
lcm_denom = -lcm_denom
lcm_denom = K.new(lcm_denom.rep)
# multiply the coefficients with lcm
for i, j in enumerate(list_of_coeff):
list_of_coeff[i] = j * lcm_denom
gcd_numer = base((list_of_coeff[-1].numer() / list_of_coeff[-1].denom()).rep)
# gcd of numerators in the coefficients
for i in num:
gcd_numer = i.gcd(gcd_numer)
gcd_numer = K.new(gcd_numer.rep)
# divide all the coefficients by the gcd
for i, j in enumerate(list_of_coeff):
frac_ans = j / gcd_numer
list_of_coeff[i] = base((frac_ans.numer() / frac_ans.denom()).rep)
return DifferentialOperator(list_of_coeff, parent)
def _derivate_diff_eq(listofpoly):
"""
Let a differential equation a0(x)y(x) + a1(x)y'(x) + ... = 0
where a0, a1,... are polynomials or rational functions. The function
returns b0, b1, b2... such that the differential equation
b0(x)y(x) + b1(x)y'(x) +... = 0 is formed after differentiating the
former equation.
"""
sol = []
a = len(listofpoly) - 1
sol.append(DMFdiff(listofpoly[0]))
for i, j in enumerate(listofpoly[1:]):
sol.append(DMFdiff(j) + listofpoly[i])
sol.append(listofpoly[a])
return sol
def _hyper_to_meijerg(func):
"""
Converts a `hyper` to meijerg.
"""
ap = func.ap
bq = func.bq
ispoly = any(i <= 0 and int(i) == i for i in ap)
if ispoly:
return hyperexpand(func)
z = func.args[2]
# parameters of the `meijerg` function.
an = (1 - i for i in ap)
anp = ()
bm = (S.Zero, )
bmq = (1 - i for i in bq)
k = S.One
for i in bq:
k = k * gamma(i)
for i in ap:
k = k / gamma(i)
return k * meijerg(an, anp, bm, bmq, -z)
def _add_lists(list1, list2):
"""Takes polynomial sequences of two annihilators a and b and returns
the list of polynomials of sum of a and b.
"""
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
def _extend_y0(Holonomic, n):
"""
Tries to find more initial conditions by substituting the initial
value point in the differential equation.
"""
if Holonomic.annihilator.is_singular(Holonomic.x0) or Holonomic.is_singularics() == True:
return Holonomic.y0
annihilator = Holonomic.annihilator
a = annihilator.order
listofpoly = []
y0 = Holonomic.y0
R = annihilator.parent.base
K = R.get_field()
for i, j in enumerate(annihilator.listofpoly):
if isinstance(j, annihilator.parent.base.dtype):
listofpoly.append(K.new(j.rep))
if len(y0) < a or n <= len(y0):
return y0
else:
list_red = [-listofpoly[i] / listofpoly[a]
for i in range(a)]
if len(y0) > a:
y1 = [y0[i] for i in range(a)]
else:
y1 = [i for i in y0]
for i in range(n - a):
sol = 0
for a, b in zip(y1, list_red):
r = DMFsubs(b, Holonomic.x0)
if not getattr(r, 'is_finite', True):
return y0
if isinstance(r, (PolyElement, FracElement)):
r = r.as_expr()
sol += a * r
y1.append(sol)
list_red = _derivate_diff_eq(list_red)
return y0 + y1[len(y0):]
def DMFdiff(frac):
# differentiate a DMF object represented as p/q
if not isinstance(frac, DMF):
return frac.diff()
K = frac.ring
p = K.numer(frac)
q = K.denom(frac)
sol_num = - p * q.diff() + q * p.diff()
sol_denom = q**2
return K((sol_num.rep, sol_denom.rep))
def DMFsubs(frac, x0, mpm=False):
# substitute the point x0 in DMF object of the form p/q
if not isinstance(frac, DMF):
return frac
p = frac.num
q = frac.den
sol_p = S.Zero
sol_q = S.Zero
if mpm:
from mpmath import mp
for i, j in enumerate(reversed(p)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_p += j * x0**i
for i, j in enumerate(reversed(q)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_q += j * x0**i
if isinstance(sol_p, (PolyElement, FracElement)):
sol_p = sol_p.as_expr()
if isinstance(sol_q, (PolyElement, FracElement)):
sol_q = sol_q.as_expr()
return sol_p / sol_q
def _convert_poly_rat_alg(func, x, x0=0, y0=None, lenics=None, domain=QQ, initcond=True):
"""
Converts polynomials, rationals and algebraic functions to holonomic.
"""
ispoly = func.is_polynomial()
if not ispoly:
israt = func.is_rational_function()
else:
israt = True
if not (ispoly or israt):
basepoly, ratexp = func.as_base_exp()
if basepoly.is_polynomial() and ratexp.is_Number:
if isinstance(ratexp, Float):
ratexp = nsimplify(ratexp)
m, n = ratexp.p, ratexp.q
is_alg = True
else:
is_alg = False
else:
is_alg = True
if not (ispoly or israt or is_alg):
return None
R = domain.old_poly_ring(x)
_, Dx = DifferentialOperators(R, 'Dx')
# if the function is constant
if not func.has(x):
return HolonomicFunction(Dx, x, 0, [func])
if ispoly:
# differential equation satisfied by polynomial
sol = func * Dx - func.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 is None and x0 == 0 and is_singular:
rep = R.from_sympy(func).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
else:
coeff = list(reversed(rep))[i:]
indicial = i
break
for i, j in enumerate(coeff):
if isinstance(j, (PolyElement, FracElement)):
coeff[i] = j.as_expr()
y0 = {indicial: S(coeff)}
elif israt:
p, q = func.as_numer_denom()
# differential equation satisfied by rational
sol = p * q * Dx + p * q.diff(x) - q * p.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
elif is_alg:
sol = n * (x / m) * Dx - 1
sol = HolonomicFunction(sol, x).composition(basepoly).annihilator
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 is None and x0 == 0 and is_singular and \
(lenics is None or lenics <= 1):
rep = R.from_sympy(basepoly).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
if isinstance(j, (PolyElement, FracElement)):
j = j.as_expr()
coeff = S(j)**ratexp
indicial = S(i) * ratexp
break
if isinstance(coeff, (PolyElement, FracElement)):
coeff = coeff.as_expr()
y0 = {indicial: S([coeff])}
if y0 or not initcond:
return HolonomicFunction(sol, x, x0, y0)
if not lenics:
lenics = sol.order
if sol.is_singular(x0):
r = HolonomicFunction(sol, x, x0)._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S.One:
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol, x, x0, y0)
y0 = _find_conditions(func, x, x0, lenics)
while not y0:
x0 += 1
y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol, x, x0, y0)
def _convert_meijerint(func, x, initcond=True, domain=QQ):
args = meijerint._rewrite1(func, x)
if args:
fac, po, g, _ = args
else:
return None
# lists for sum of meijerg functions
fac_list = [fac * i[0] for i in g]
t = po.as_base_exp()
s = t[1] if t[0] == x else S.Zero
po_list = [s + i[1] for i in g]
G_list = [i[2] for i in g]
# finds meijerg representation of x**s * meijerg(a1 ... ap, b1 ... bq, z)
def _shift(func, s):
z = func.args[-1]
if z.has(I):
z = z.subs(exp_polar, exp)
d = z.collect(x, evaluate=False)
b = list(d)[0]
a = d[b]
t = b.as_base_exp()
b = t[1] if t[0] == x else S.Zero
r = s / b
an = (i + r for i in func.args[0][0])
ap = (i + r for i in func.args[0][1])
bm = (i + r for i in func.args[1][0])
bq = (i + r for i in func.args[1][1])
return a**-r, meijerg((an, ap), (bm, bq), z)
coeff, m = _shift(G_list[0], po_list[0])
sol = fac_list[0] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
# add all the meijerg functions after converting to holonomic
for i in range(1, len(G_list)):
coeff, m = _shift(G_list[i], po_list[i])
sol += fac_list[i] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
return sol
def _create_table(table, domain=QQ):
"""
Creates the look-up table. For a similar implementation
see meijerint._create_lookup_table.
"""
def add(formula, annihilator, arg, x0=0, y0=()):
"""
Adds a formula in the dictionary
"""
table.setdefault(_mytype(formula, x_1), []).append((formula,
HolonomicFunction(annihilator, arg, x0, y0)))
R = domain.old_poly_ring(x_1)
_, Dx = DifferentialOperators(R, 'Dx')
# add some basic functions
add(sin(x_1), Dx**2 + 1, x_1, 0, [0, 1])
add(cos(x_1), Dx**2 + 1, x_1, 0, [1, 0])
add(exp(x_1), Dx - 1, x_1, 0, 1)
add(log(x_1), Dx + x_1*Dx**2, x_1, 1, [0, 1])
add(erf(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(erfc(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [1, -2/sqrt(pi)])
add(erfi(x_1), -2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(sinh(x_1), Dx**2 - 1, x_1, 0, [0, 1])
add(cosh(x_1), Dx**2 - 1, x_1, 0, [1, 0])
add(sinc(x_1), x_1 + 2*Dx + x_1*Dx**2, x_1)
add(Si(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Ci(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Shi(x_1), -x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
def _find_conditions(func, x, x0, order):
y0 = []
for i in range(order):
val = func.subs(x, x0)
if isinstance(val, NaN):
val = limit(func, x, x0)
if val.is_finite is False or isinstance(val, NaN):
return None
y0.append(val)
func = func.diff(x)
return y0
|
248c7fe281e515ccb34a4ed2df2c436d48eee282f69bd48cbdc6d824f3cdb4ec | from sympy.printing import pycode, ccode, fcode
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
lfortran = import_module('lfortran')
cin = import_module('clang.cindex', import_kwargs = {'fromlist': ['cindex']})
if lfortran:
from sympy.parsing.fortran.fortran_parser import src_to_sympy
if cin:
from sympy.parsing.c.c_parser import parse_c
@doctest_depends_on(modules=['lfortran', 'clang.cindex'])
class SymPyExpression: # type: ignore
"""Class to store and handle SymPy expressions
This class will hold SymPy Expressions and handle the API for the
conversion to and from different languages.
It works with the C and the Fortran Parser to generate SymPy expressions
which are stored here and which can be converted to multiple language's
source code.
Notes
=====
The module and its API are currently under development and experimental
and can be changed during development.
The Fortran parser does not support numeric assignments, so all the
variables have been Initialized to zero.
The module also depends on external dependencies:
- LFortran which is required to use the Fortran parser
- Clang which is required for the C parser
Examples
========
Example of parsing C code:
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src = '''
... int a,b;
... float c = 2, d =4;
... '''
>>> a = SymPyExpression(src, 'c')
>>> a.return_expr()
[Declaration(Variable(a, type=intc)),
Declaration(Variable(b, type=intc)),
Declaration(Variable(c, type=float32, value=2.0)),
Declaration(Variable(d, type=float32, value=4.0))]
An example of variable definiton:
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src2 = '''
... integer :: a, b, c, d
... real :: p, q, r, s
... '''
>>> p = SymPyExpression()
>>> p.convert_to_expr(src2, 'f')
>>> p.convert_to_c()
['int a = 0', 'int b = 0', 'int c = 0', 'int d = 0', 'double p = 0.0', 'double q = 0.0', 'double r = 0.0', 'double s = 0.0']
An example of Assignment:
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src3 = '''
... integer :: a, b, c, d, e
... d = a + b - c
... e = b * d + c * e / a
... '''
>>> p = SymPyExpression(src3, 'f')
>>> p.convert_to_python()
['a = 0', 'b = 0', 'c = 0', 'd = 0', 'e = 0', 'd = a + b - c', 'e = b*d + c*e/a']
An example of function definition:
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src = '''
... integer function f(a,b)
... integer, intent(in) :: a, b
... integer :: r
... end function
... '''
>>> a = SymPyExpression(src, 'f')
>>> a.convert_to_python()
['def f(a, b):\\n f = 0\\n r = 0\\n return f']
"""
def __init__(self, source_code = None, mode = None):
"""Constructor for SymPyExpression class"""
super().__init__()
if not(mode or source_code):
self._expr = []
elif mode:
if source_code:
if mode.lower() == 'f':
if lfortran:
self._expr = src_to_sympy(source_code)
else:
raise ImportError("LFortran is not installed, cannot parse Fortran code")
elif mode.lower() == 'c':
if cin:
self._expr = parse_c(source_code)
else:
raise ImportError("Clang is not installed, cannot parse C code")
else:
raise NotImplementedError(
'Parser for specified language is not implemented'
)
else:
raise ValueError('Source code not present')
else:
raise ValueError('Please specify a mode for conversion')
def convert_to_expr(self, src_code, mode):
"""Converts the given source code to SymPy Expressions
Attributes
==========
src_code : String
the source code or filename of the source code that is to be
converted
mode: String
the mode to determine which parser is to be used according to
the language of the source code
f or F for Fortran
c or C for C/C++
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src3 = '''
... integer function f(a,b) result(r)
... integer, intent(in) :: a, b
... integer :: x
... r = a + b -x
... end function
... '''
>>> p = SymPyExpression()
>>> p.convert_to_expr(src3, 'f')
>>> p.return_expr()
[FunctionDefinition(integer, name=f, parameters=(Variable(a), Variable(b)), body=CodeBlock(
Declaration(Variable(r, type=integer, value=0)),
Declaration(Variable(x, type=integer, value=0)),
Assignment(Variable(r), a + b - x),
Return(Variable(r))
))]
"""
if mode.lower() == 'f':
if lfortran:
self._expr = src_to_sympy(src_code)
else:
raise ImportError("LFortran is not installed, cannot parse Fortran code")
elif mode.lower() == 'c':
if cin:
self._expr = parse_c(src_code)
else:
raise ImportError("Clang is not installed, cannot parse C code")
else:
raise NotImplementedError(
"Parser for specified language has not been implemented"
)
def convert_to_python(self):
"""Returns a list with Python code for the SymPy expressions
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src2 = '''
... integer :: a, b, c, d
... real :: p, q, r, s
... c = a/b
... d = c/a
... s = p/q
... r = q/p
... '''
>>> p = SymPyExpression(src2, 'f')
>>> p.convert_to_python()
['a = 0', 'b = 0', 'c = 0', 'd = 0', 'p = 0.0', 'q = 0.0', 'r = 0.0', 's = 0.0', 'c = a/b', 'd = c/a', 's = p/q', 'r = q/p']
"""
self._pycode = []
for iter in self._expr:
self._pycode.append(pycode(iter))
return self._pycode
def convert_to_c(self):
"""Returns a list with the c source code for the SymPy expressions
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src2 = '''
... integer :: a, b, c, d
... real :: p, q, r, s
... c = a/b
... d = c/a
... s = p/q
... r = q/p
... '''
>>> p = SymPyExpression()
>>> p.convert_to_expr(src2, 'f')
>>> p.convert_to_c()
['int a = 0', 'int b = 0', 'int c = 0', 'int d = 0', 'double p = 0.0', 'double q = 0.0', 'double r = 0.0', 'double s = 0.0', 'c = a/b;', 'd = c/a;', 's = p/q;', 'r = q/p;']
"""
self._ccode = []
for iter in self._expr:
self._ccode.append(ccode(iter))
return self._ccode
def convert_to_fortran(self):
"""Returns a list with the fortran source code for the SymPy expressions
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src2 = '''
... integer :: a, b, c, d
... real :: p, q, r, s
... c = a/b
... d = c/a
... s = p/q
... r = q/p
... '''
>>> p = SymPyExpression(src2, 'f')
>>> p.convert_to_fortran()
[' integer*4 a', ' integer*4 b', ' integer*4 c', ' integer*4 d', ' real*8 p', ' real*8 q', ' real*8 r', ' real*8 s', ' c = a/b', ' d = c/a', ' s = p/q', ' r = q/p']
"""
self._fcode = []
for iter in self._expr:
self._fcode.append(fcode(iter))
return self._fcode
def return_expr(self):
"""Returns the expression list
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src3 = '''
... integer function f(a,b)
... integer, intent(in) :: a, b
... integer :: r
... r = a+b
... f = r
... end function
... '''
>>> p = SymPyExpression()
>>> p.convert_to_expr(src3, 'f')
>>> p.return_expr()
[FunctionDefinition(integer, name=f, parameters=(Variable(a), Variable(b)), body=CodeBlock(
Declaration(Variable(f, type=integer, value=0)),
Declaration(Variable(r, type=integer, value=0)),
Assignment(Variable(f), Variable(r)),
Return(Variable(f))
))]
"""
return self._expr
|
0e14e75d148071e8a3c80d4e026f3a73a4f25bdf9a79456864410b1aed2866d6 | import re
from sympy.concrete.products import product
from sympy.concrete.summations import Sum
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import (cos, sin)
class MaximaHelpers:
def maxima_expand(expr):
return expr.expand()
def maxima_float(expr):
return expr.evalf()
def maxima_trigexpand(expr):
return expr.expand(trig=True)
def maxima_sum(a1, a2, a3, a4):
return Sum(a1, (a2, a3, a4)).doit()
def maxima_product(a1, a2, a3, a4):
return product(a1, (a2, a3, a4))
def maxima_csc(expr):
return 1/sin(expr)
def maxima_sec(expr):
return 1/cos(expr)
sub_dict = {
'pi': re.compile(r'%pi'),
'E': re.compile(r'%e'),
'I': re.compile(r'%i'),
'**': re.compile(r'\^'),
'oo': re.compile(r'\binf\b'),
'-oo': re.compile(r'\bminf\b'),
"'-'": re.compile(r'\bminus\b'),
'maxima_expand': re.compile(r'\bexpand\b'),
'maxima_float': re.compile(r'\bfloat\b'),
'maxima_trigexpand': re.compile(r'\btrigexpand'),
'maxima_sum': re.compile(r'\bsum\b'),
'maxima_product': re.compile(r'\bproduct\b'),
'cancel': re.compile(r'\bratsimp\b'),
'maxima_csc': re.compile(r'\bcsc\b'),
'maxima_sec': re.compile(r'\bsec\b')
}
var_name = re.compile(r'^\s*(\w+)\s*:')
def parse_maxima(str, globals=None, name_dict={}):
str = str.strip()
str = str.rstrip('; ')
for k, v in sub_dict.items():
str = v.sub(k, str)
assign_var = None
var_match = var_name.search(str)
if var_match:
assign_var = var_match.group(1)
str = str[var_match.end():].strip()
dct = MaximaHelpers.__dict__.copy()
dct.update(name_dict)
obj = sympify(str, locals=dct)
if assign_var and globals:
globals[assign_var] = obj
return obj
|
5a08f6f74c4ea41f49aa4693ced57882d62b409ca57d5210dc906007f3a5d3c7 | from typing import Any, Dict as tDict, Tuple as tTuple
from itertools import product
import re
from sympy.core.sympify import sympify
def mathematica(s, additional_translations=None):
'''
Users can add their own translation dictionary.
variable-length argument needs '*' character.
Examples
========
>>> from sympy.parsing.mathematica import mathematica
>>> mathematica('Log3[9]', {'Log3[x]':'log(x,3)'})
2
>>> mathematica('F[7,5,3]', {'F[*x]':'Max(*x)*Min(*x)'})
21
'''
parser = MathematicaParser(additional_translations)
return sympify(parser.parse(s))
def _deco(cls):
cls._initialize_class()
return cls
@_deco
class MathematicaParser:
'''An instance of this class converts a string of a basic Mathematica
expression to SymPy style. Output is string type.'''
# left: Mathematica, right: SymPy
CORRESPONDENCES = {
'Sqrt[x]': 'sqrt(x)',
'Exp[x]': 'exp(x)',
'Log[x]': 'log(x)',
'Log[x,y]': 'log(y,x)',
'Log2[x]': 'log(x,2)',
'Log10[x]': 'log(x,10)',
'Mod[x,y]': 'Mod(x,y)',
'Max[*x]': 'Max(*x)',
'Min[*x]': 'Min(*x)',
'Pochhammer[x,y]':'rf(x,y)',
'ArcTan[x,y]':'atan2(y,x)',
'ExpIntegralEi[x]': 'Ei(x)',
'SinIntegral[x]': 'Si(x)',
'CosIntegral[x]': 'Ci(x)',
'AiryAi[x]': 'airyai(x)',
'AiryAiPrime[x]': 'airyaiprime(x)',
'AiryBi[x]' :'airybi(x)',
'AiryBiPrime[x]' :'airybiprime(x)',
'LogIntegral[x]':' li(x)',
'PrimePi[x]': 'primepi(x)',
'Prime[x]': 'prime(x)',
'PrimeQ[x]': 'isprime(x)'
}
# trigonometric, e.t.c.
for arc, tri, h in product(('', 'Arc'), (
'Sin', 'Cos', 'Tan', 'Cot', 'Sec', 'Csc'), ('', 'h')):
fm = arc + tri + h + '[x]'
if arc: # arc func
fs = 'a' + tri.lower() + h + '(x)'
else: # non-arc func
fs = tri.lower() + h + '(x)'
CORRESPONDENCES.update({fm: fs})
REPLACEMENTS = {
' ': '',
'^': '**',
'{': '[',
'}': ']',
}
RULES = {
# a single whitespace to '*'
'whitespace': (
re.compile(r'''
(?<=[a-zA-Z\d]) # a letter or a number
\ # a whitespace
(?=[a-zA-Z\d]) # a letter or a number
''', re.VERBOSE),
'*'),
# add omitted '*' character
'add*_1': (
re.compile(r'''
(?<=[])\d]) # ], ) or a number
# ''
(?=[(a-zA-Z]) # ( or a single letter
''', re.VERBOSE),
'*'),
# add omitted '*' character (variable letter preceding)
'add*_2': (
re.compile(r'''
(?<=[a-zA-Z]) # a letter
\( # ( as a character
(?=.) # any characters
''', re.VERBOSE),
'*('),
# convert 'Pi' to 'pi'
'Pi': (
re.compile(r'''
(?:
\A|(?<=[^a-zA-Z])
)
Pi # 'Pi' is 3.14159... in Mathematica
(?=[^a-zA-Z])
''', re.VERBOSE),
'pi'),
}
# Mathematica function name pattern
FM_PATTERN = re.compile(r'''
(?:
\A|(?<=[^a-zA-Z]) # at the top or a non-letter
)
[A-Z][a-zA-Z\d]* # Function
(?=\[) # [ as a character
''', re.VERBOSE)
# list or matrix pattern (for future usage)
ARG_MTRX_PATTERN = re.compile(r'''
\{.*\}
''', re.VERBOSE)
# regex string for function argument pattern
ARGS_PATTERN_TEMPLATE = r'''
(?:
\A|(?<=[^a-zA-Z])
)
{arguments} # model argument like x, y,...
(?=[^a-zA-Z])
'''
# will contain transformed CORRESPONDENCES dictionary
TRANSLATIONS = {} # type: tDict[tTuple[str, int], tDict[str, Any]]
# cache for a raw users' translation dictionary
cache_original = {} # type: tDict[tTuple[str, int], tDict[str, Any]]
# cache for a compiled users' translation dictionary
cache_compiled = {} # type: tDict[tTuple[str, int], tDict[str, Any]]
@classmethod
def _initialize_class(cls):
# get a transformed CORRESPONDENCES dictionary
d = cls._compile_dictionary(cls.CORRESPONDENCES)
cls.TRANSLATIONS.update(d)
def __init__(self, additional_translations=None):
self.translations = {}
# update with TRANSLATIONS (class constant)
self.translations.update(self.TRANSLATIONS)
if additional_translations is None:
additional_translations = {}
# check the latest added translations
if self.__class__.cache_original != additional_translations:
if not isinstance(additional_translations, dict):
raise ValueError('The argument must be dict type')
# get a transformed additional_translations dictionary
d = self._compile_dictionary(additional_translations)
# update cache
self.__class__.cache_original = additional_translations
self.__class__.cache_compiled = d
# merge user's own translations
self.translations.update(self.__class__.cache_compiled)
@classmethod
def _compile_dictionary(cls, dic):
# for return
d = {}
for fm, fs in dic.items():
# check function form
cls._check_input(fm)
cls._check_input(fs)
# uncover '*' hiding behind a whitespace
fm = cls._apply_rules(fm, 'whitespace')
fs = cls._apply_rules(fs, 'whitespace')
# remove whitespace(s)
fm = cls._replace(fm, ' ')
fs = cls._replace(fs, ' ')
# search Mathematica function name
m = cls.FM_PATTERN.search(fm)
# if no-hit
if m is None:
err = "'{f}' function form is invalid.".format(f=fm)
raise ValueError(err)
# get Mathematica function name like 'Log'
fm_name = m.group()
# get arguments of Mathematica function
args, end = cls._get_args(m)
# function side check. (e.g.) '2*Func[x]' is invalid.
if m.start() != 0 or end != len(fm):
err = "'{f}' function form is invalid.".format(f=fm)
raise ValueError(err)
# check the last argument's 1st character
if args[-1][0] == '*':
key_arg = '*'
else:
key_arg = len(args)
key = (fm_name, key_arg)
# convert '*x' to '\\*x' for regex
re_args = [x if x[0] != '*' else '\\' + x for x in args]
# for regex. Example: (?:(x|y|z))
xyz = '(?:(' + '|'.join(re_args) + '))'
# string for regex compile
patStr = cls.ARGS_PATTERN_TEMPLATE.format(arguments=xyz)
pat = re.compile(patStr, re.VERBOSE)
# update dictionary
d[key] = {}
d[key]['fs'] = fs # SymPy function template
d[key]['args'] = args # args are ['x', 'y'] for example
d[key]['pat'] = pat
return d
def _convert_function(self, s):
'''Parse Mathematica function to SymPy one'''
# compiled regex object
pat = self.FM_PATTERN
scanned = '' # converted string
cur = 0 # position cursor
while True:
m = pat.search(s)
if m is None:
# append the rest of string
scanned += s
break
# get Mathematica function name
fm = m.group()
# get arguments, and the end position of fm function
args, end = self._get_args(m)
# the start position of fm function
bgn = m.start()
# convert Mathematica function to SymPy one
s = self._convert_one_function(s, fm, args, bgn, end)
# update cursor
cur = bgn
# append converted part
scanned += s[:cur]
# shrink s
s = s[cur:]
return scanned
def _convert_one_function(self, s, fm, args, bgn, end):
# no variable-length argument
if (fm, len(args)) in self.translations:
key = (fm, len(args))
# x, y,... model arguments
x_args = self.translations[key]['args']
# make CORRESPONDENCES between model arguments and actual ones
d = {k: v for k, v in zip(x_args, args)}
# with variable-length argument
elif (fm, '*') in self.translations:
key = (fm, '*')
# x, y,..*args (model arguments)
x_args = self.translations[key]['args']
# make CORRESPONDENCES between model arguments and actual ones
d = {}
for i, x in enumerate(x_args):
if x[0] == '*':
d[x] = ','.join(args[i:])
break
d[x] = args[i]
# out of self.translations
else:
err = "'{f}' is out of the whitelist.".format(f=fm)
raise ValueError(err)
# template string of converted function
template = self.translations[key]['fs']
# regex pattern for x_args
pat = self.translations[key]['pat']
scanned = ''
cur = 0
while True:
m = pat.search(template)
if m is None:
scanned += template
break
# get model argument
x = m.group()
# get a start position of the model argument
xbgn = m.start()
# add the corresponding actual argument
scanned += template[:xbgn] + d[x]
# update cursor to the end of the model argument
cur = m.end()
# shrink template
template = template[cur:]
# update to swapped string
s = s[:bgn] + scanned + s[end:]
return s
@classmethod
def _get_args(cls, m):
'''Get arguments of a Mathematica function'''
s = m.string # whole string
anc = m.end() + 1 # pointing the first letter of arguments
square, curly = [], [] # stack for brakets
args = []
# current cursor
cur = anc
for i, c in enumerate(s[anc:], anc):
# extract one argument
if c == ',' and (not square) and (not curly):
args.append(s[cur:i]) # add an argument
cur = i + 1 # move cursor
# handle list or matrix (for future usage)
if c == '{':
curly.append(c)
elif c == '}':
curly.pop()
# seek corresponding ']' with skipping irrevant ones
if c == '[':
square.append(c)
elif c == ']':
if square:
square.pop()
else: # empty stack
args.append(s[cur:i])
break
# the next position to ']' bracket (the function end)
func_end = i + 1
return args, func_end
@classmethod
def _replace(cls, s, bef):
aft = cls.REPLACEMENTS[bef]
s = s.replace(bef, aft)
return s
@classmethod
def _apply_rules(cls, s, bef):
pat, aft = cls.RULES[bef]
return pat.sub(aft, s)
@classmethod
def _check_input(cls, s):
for bracket in (('[', ']'), ('{', '}'), ('(', ')')):
if s.count(bracket[0]) != s.count(bracket[1]):
err = "'{f}' function form is invalid.".format(f=s)
raise ValueError(err)
if '{' in s:
err = "Currently list is not supported."
raise ValueError(err)
def parse(self, s):
# input check
self._check_input(s)
# uncover '*' hiding behind a whitespace
s = self._apply_rules(s, 'whitespace')
# remove whitespace(s)
s = self._replace(s, ' ')
# add omitted '*' character
s = self._apply_rules(s, 'add*_1')
s = self._apply_rules(s, 'add*_2')
# translate function
s = self._convert_function(s)
# '^' to '**'
s = self._replace(s, '^')
# 'Pi' to 'pi'
s = self._apply_rules(s, 'Pi')
# '{', '}' to '[', ']', respectively
# s = cls._replace(s, '{') # currently list is not taken into account
# s = cls._replace(s, '}')
return s
|
0b16e2c9bdfa8cdd41e265c9f8d5330c26b3fdb9a7d8147a451b8e7ce4a38186 | """Transform a string with Python-like source code into SymPy expression. """
from tokenize import (generate_tokens, untokenize, TokenError,
NUMBER, STRING, NAME, OP, ENDMARKER, ERRORTOKEN, NEWLINE)
from keyword import iskeyword
import ast
import unicodedata
from io import StringIO
from sympy.assumptions.ask import AssumptionKeys
from sympy.core.basic import Basic
from sympy.core import Symbol
from sympy.core.function import arity, Function
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import filldedent, func_name
def _token_splittable(token):
"""
Predicate for whether a token name can be split into multiple tokens.
A token is splittable if it does not contain an underscore character and
it is not the name of a Greek letter. This is used to implicitly convert
expressions like 'xyz' into 'x*y*z'.
"""
if '_' in token:
return False
else:
try:
return not unicodedata.lookup('GREEK SMALL LETTER ' + token)
except KeyError:
pass
if len(token) > 1:
return True
return False
def _token_callable(token, local_dict, global_dict, nextToken=None):
"""
Predicate for whether a token name represents a callable function.
Essentially wraps ``callable``, but looks up the token name in the
locals and globals.
"""
func = local_dict.get(token[1])
if not func:
func = global_dict.get(token[1])
return callable(func) and not isinstance(func, Symbol)
def _add_factorial_tokens(name, result):
if result == [] or result[-1][1] == '(':
raise TokenError()
beginning = [(NAME, name), (OP, '(')]
end = [(OP, ')')]
diff = 0
length = len(result)
for index, token in enumerate(result[::-1]):
toknum, tokval = token
i = length - index - 1
if tokval == ')':
diff += 1
elif tokval == '(':
diff -= 1
if diff == 0:
if i - 1 >= 0 and result[i - 1][0] == NAME:
return result[:i - 1] + beginning + result[i - 1:] + end
else:
return result[:i] + beginning + result[i:] + end
return result
class AppliedFunction:
"""
A group of tokens representing a function and its arguments.
`exponent` is for handling the shorthand sin^2, ln^2, etc.
"""
def __init__(self, function, args, exponent=None):
if exponent is None:
exponent = []
self.function = function
self.args = args
self.exponent = exponent
self.items = ['function', 'args', 'exponent']
def expand(self):
"""Return a list of tokens representing the function"""
result = []
result.append(self.function)
result.extend(self.args)
return result
def __getitem__(self, index):
return getattr(self, self.items[index])
def __repr__(self):
return "AppliedFunction(%s, %s, %s)" % (self.function, self.args,
self.exponent)
class ParenthesisGroup(list):
"""List of tokens representing an expression in parentheses."""
pass
def _flatten(result):
result2 = []
for tok in result:
if isinstance(tok, AppliedFunction):
result2.extend(tok.expand())
else:
result2.append(tok)
return result2
def _group_parentheses(recursor):
def _inner(tokens, local_dict, global_dict):
"""Group tokens between parentheses with ParenthesisGroup.
Also processes those tokens recursively.
"""
result = []
stacks = []
stacklevel = 0
for token in tokens:
if token[0] == OP:
if token[1] == '(':
stacks.append(ParenthesisGroup([]))
stacklevel += 1
elif token[1] == ')':
stacks[-1].append(token)
stack = stacks.pop()
if len(stacks) > 0:
# We don't recurse here since the upper-level stack
# would reprocess these tokens
stacks[-1].extend(stack)
else:
# Recurse here to handle nested parentheses
# Strip off the outer parentheses to avoid an infinite loop
inner = stack[1:-1]
inner = recursor(inner,
local_dict,
global_dict)
parenGroup = [stack[0]] + inner + [stack[-1]]
result.append(ParenthesisGroup(parenGroup))
stacklevel -= 1
continue
if stacklevel:
stacks[-1].append(token)
else:
result.append(token)
if stacklevel:
raise TokenError("Mismatched parentheses")
return result
return _inner
def _apply_functions(tokens, local_dict, global_dict):
"""Convert a NAME token + ParenthesisGroup into an AppliedFunction.
Note that ParenthesisGroups, if not applied to any function, are
converted back into lists of tokens.
"""
result = []
symbol = None
for tok in tokens:
if tok[0] == NAME:
symbol = tok
result.append(tok)
elif isinstance(tok, ParenthesisGroup):
if symbol and _token_callable(symbol, local_dict, global_dict):
result[-1] = AppliedFunction(symbol, tok)
symbol = None
else:
result.extend(tok)
else:
symbol = None
result.append(tok)
return result
def _implicit_multiplication(tokens, local_dict, global_dict):
"""Implicitly adds '*' tokens.
Cases:
- Two AppliedFunctions next to each other ("sin(x)cos(x)")
- AppliedFunction next to an open parenthesis ("sin x (cos x + 1)")
- A close parenthesis next to an AppliedFunction ("(x+2)sin x")\
- A close parenthesis next to an open parenthesis ("(x+2)(x+3)")
- AppliedFunction next to an implicitly applied function ("sin(x)cos x")
"""
result = []
skip = False
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if skip:
skip = False
continue
if tok[0] == OP and tok[1] == '.' and nextTok[0] == NAME:
# Dotted name. Do not do implicit multiplication
skip = True
continue
if (isinstance(tok, AppliedFunction) and
isinstance(nextTok, AppliedFunction)):
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and
nextTok[0] == OP and nextTok[1] == '('):
# Applied function followed by an open parenthesis
if tok.function[1] == "Function":
result[-1].function = (result[-1].function[0], 'Symbol')
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
isinstance(nextTok, AppliedFunction)):
# Close parenthesis followed by an applied function
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
nextTok[0] == NAME):
# Close parenthesis followed by an implicitly applied function
result.append((OP, '*'))
elif (tok[0] == nextTok[0] == OP
and tok[1] == ')' and nextTok[1] == '('):
# Close parenthesis followed by an open parenthesis
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and nextTok[0] == NAME):
# Applied function followed by implicitly applied function
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == OP and nextTok[1] == '('):
# Constant followed by parenthesis
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == NAME and
not _token_callable(nextTok, local_dict, global_dict)):
# Constant followed by constant
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
(isinstance(nextTok, AppliedFunction) or nextTok[0] == NAME)):
# Constant followed by (implicitly applied) function
result.append((OP, '*'))
if tokens:
result.append(tokens[-1])
return result
def _implicit_application(tokens, local_dict, global_dict):
"""Adds parentheses as needed after functions."""
result = []
appendParen = 0 # number of closing parentheses to add
skip = 0 # number of tokens to delay before adding a ')' (to
# capture **, ^, etc.)
exponentSkip = False # skipping tokens before inserting parentheses to
# work with function exponentiation
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if (tok[0] == NAME and nextTok[0] not in [OP, ENDMARKER, NEWLINE]):
if _token_callable(tok, local_dict, global_dict, nextTok):
result.append((OP, '('))
appendParen += 1
# name followed by exponent - function exponentiation
elif (tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**'):
if _token_callable(tok, local_dict, global_dict):
exponentSkip = True
elif exponentSkip:
# if the last token added was an applied function (i.e. the
# power of the function exponent) OR a multiplication (as
# implicit multiplication would have added an extraneous
# multiplication)
if (isinstance(tok, AppliedFunction)
or (tok[0] == OP and tok[1] == '*')):
# don't add anything if the next token is a multiplication
# or if there's already a parenthesis (if parenthesis, still
# stop skipping tokens)
if not (nextTok[0] == OP and nextTok[1] == '*'):
if not(nextTok[0] == OP and nextTok[1] == '('):
result.append((OP, '('))
appendParen += 1
exponentSkip = False
elif appendParen:
if nextTok[0] == OP and nextTok[1] in ('^', '**', '*'):
skip = 1
continue
if skip:
skip -= 1
continue
result.append((OP, ')'))
appendParen -= 1
if tokens:
result.append(tokens[-1])
if appendParen:
result.extend([(OP, ')')] * appendParen)
return result
def function_exponentiation(tokens, local_dict, global_dict):
"""Allows functions to be exponentiated, e.g. ``cos**2(x)``.
Examples
========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, function_exponentiation)
>>> transformations = standard_transformations + (function_exponentiation,)
>>> parse_expr('sin**4(x)', transformations=transformations)
sin(x)**4
"""
result = []
exponent = []
consuming_exponent = False
level = 0
for tok, nextTok in zip(tokens, tokens[1:]):
if tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**':
if _token_callable(tok, local_dict, global_dict):
consuming_exponent = True
elif consuming_exponent:
if tok[0] == NAME and tok[1] == 'Function':
tok = (NAME, 'Symbol')
exponent.append(tok)
# only want to stop after hitting )
if tok[0] == nextTok[0] == OP and tok[1] == ')' and nextTok[1] == '(':
consuming_exponent = False
# if implicit multiplication was used, we may have )*( instead
if tok[0] == nextTok[0] == OP and tok[1] == '*' and nextTok[1] == '(':
consuming_exponent = False
del exponent[-1]
continue
elif exponent and not consuming_exponent:
if tok[0] == OP:
if tok[1] == '(':
level += 1
elif tok[1] == ')':
level -= 1
if level == 0:
result.append(tok)
result.extend(exponent)
exponent = []
continue
result.append(tok)
if tokens:
result.append(tokens[-1])
if exponent:
result.extend(exponent)
return result
def split_symbols_custom(predicate):
"""Creates a transformation that splits symbol names.
``predicate`` should return True if the symbol name is to be split.
For instance, to retain the default behavior but avoid splitting certain
symbol names, a predicate like this would work:
>>> from sympy.parsing.sympy_parser import (parse_expr, _token_splittable,
... standard_transformations, implicit_multiplication,
... split_symbols_custom)
>>> def can_split(symbol):
... if symbol not in ('list', 'of', 'unsplittable', 'names'):
... return _token_splittable(symbol)
... return False
...
>>> transformation = split_symbols_custom(can_split)
>>> parse_expr('unsplittable', transformations=standard_transformations +
... (transformation, implicit_multiplication))
unsplittable
"""
def _split_symbols(tokens, local_dict, global_dict):
result = []
split = False
split_previous=False
for tok in tokens:
if split_previous:
# throw out closing parenthesis of Symbol that was split
split_previous=False
continue
split_previous=False
if tok[0] == NAME and tok[1] in ['Symbol', 'Function']:
split = True
elif split and tok[0] == NAME:
symbol = tok[1][1:-1]
if predicate(symbol):
tok_type = result[-2][1] # Symbol or Function
del result[-2:] # Get rid of the call to Symbol
i = 0
while i < len(symbol):
char = symbol[i]
if char in local_dict or char in global_dict:
result.append((NAME, "%s" % char))
elif char.isdigit():
char = [char]
for i in range(i + 1, len(symbol)):
if not symbol[i].isdigit():
i -= 1
break
char.append(symbol[i])
char = ''.join(char)
result.extend([(NAME, 'Number'), (OP, '('),
(NAME, "'%s'" % char), (OP, ')')])
else:
use = tok_type if i == len(symbol) else 'Symbol'
result.extend([(NAME, use), (OP, '('),
(NAME, "'%s'" % char), (OP, ')')])
i += 1
# Set split_previous=True so will skip
# the closing parenthesis of the original Symbol
split = False
split_previous = True
continue
else:
split = False
result.append(tok)
return result
return _split_symbols
#: Splits symbol names for implicit multiplication.
#:
#: Intended to let expressions like ``xyz`` be parsed as ``x*y*z``. Does not
#: split Greek character names, so ``theta`` will *not* become
#: ``t*h*e*t*a``. Generally this should be used with
#: ``implicit_multiplication``.
split_symbols = split_symbols_custom(_token_splittable)
def implicit_multiplication(result, local_dict, global_dict):
"""Makes the multiplication operator optional in most cases.
Use this before :func:`implicit_application`, otherwise expressions like
``sin 2x`` will be parsed as ``x * sin(2)`` rather than ``sin(2*x)``.
Examples
========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication)
>>> transformations = standard_transformations + (implicit_multiplication,)
>>> parse_expr('3 x y', transformations=transformations)
3*x*y
"""
# These are interdependent steps, so we don't expose them separately
for step in (_group_parentheses(implicit_multiplication),
_apply_functions,
_implicit_multiplication):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def implicit_application(result, local_dict, global_dict):
"""Makes parentheses optional in some cases for function calls.
Use this after :func:`implicit_multiplication`, otherwise expressions
like ``sin 2x`` will be parsed as ``x * sin(2)`` rather than
``sin(2*x)``.
Examples
========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_application)
>>> transformations = standard_transformations + (implicit_application,)
>>> parse_expr('cot z + csc z', transformations=transformations)
cot(z) + csc(z)
"""
for step in (_group_parentheses(implicit_application),
_apply_functions,
_implicit_application,):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def implicit_multiplication_application(result, local_dict, global_dict):
"""Allows a slightly relaxed syntax.
- Parentheses for single-argument method calls are optional.
- Multiplication is implicit.
- Symbol names can be split (i.e. spaces are not needed between
symbols).
- Functions can be exponentiated.
Examples
========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication_application)
>>> parse_expr("10sin**2 x**2 + 3xyz + tan theta",
... transformations=(standard_transformations +
... (implicit_multiplication_application,)))
3*x*y*z + 10*sin(x**2)**2 + tan(theta)
"""
for step in (split_symbols, implicit_multiplication,
implicit_application, function_exponentiation):
result = step(result, local_dict, global_dict)
return result
def auto_symbol(tokens, local_dict, global_dict):
"""Inserts calls to ``Symbol``/``Function`` for undefined variables."""
result = []
prevTok = (None, None)
tokens.append((None, None)) # so zip traverses all tokens
for tok, nextTok in zip(tokens, tokens[1:]):
tokNum, tokVal = tok
nextTokNum, nextTokVal = nextTok
if tokNum == NAME:
name = tokVal
if (name in ['True', 'False', 'None']
or iskeyword(name)
# Don't convert attribute access
or (prevTok[0] == OP and prevTok[1] == '.')
# Don't convert keyword arguments
or (prevTok[0] == OP and prevTok[1] in ('(', ',')
and nextTokNum == OP and nextTokVal == '=')
# the name has already been defined
or name in local_dict and local_dict[name] is not None):
result.append((NAME, name))
continue
elif name in local_dict:
local_dict.setdefault(None, set()).add(name)
if nextTokVal == '(':
local_dict[name] = Function(name)
else:
local_dict[name] = Symbol(name)
result.append((NAME, name))
continue
elif name in global_dict:
obj = global_dict[name]
if isinstance(obj, (AssumptionKeys, Basic, type)) or callable(obj):
result.append((NAME, name))
continue
result.extend([
(NAME, 'Symbol' if nextTokVal != '(' else 'Function'),
(OP, '('),
(NAME, repr(str(name))),
(OP, ')'),
])
else:
result.append((tokNum, tokVal))
prevTok = (tokNum, tokVal)
return result
def lambda_notation(tokens, local_dict, global_dict):
"""Substitutes "lambda" with its SymPy equivalent Lambda().
However, the conversion doesn't take place if only "lambda"
is passed because that is a syntax error.
"""
result = []
flag = False
toknum, tokval = tokens[0]
tokLen = len(tokens)
if toknum == NAME and tokval == 'lambda':
if tokLen == 2 or tokLen == 3 and tokens[1][0] == NEWLINE:
# In Python 3.6.7+, inputs without a newline get NEWLINE added to
# the tokens
result.extend(tokens)
elif tokLen > 2:
result.extend([
(NAME, 'Lambda'),
(OP, '('),
(OP, '('),
(OP, ')'),
(OP, ')'),
])
for tokNum, tokVal in tokens[1:]:
if tokNum == OP and tokVal == ':':
tokVal = ','
flag = True
if not flag and tokNum == OP and tokVal in ('*', '**'):
raise TokenError("Starred arguments in lambda not supported")
if flag:
result.insert(-1, (tokNum, tokVal))
else:
result.insert(-2, (tokNum, tokVal))
else:
result.extend(tokens)
return result
def factorial_notation(tokens, local_dict, global_dict):
"""Allows standard notation for factorial."""
result = []
nfactorial = 0
for toknum, tokval in tokens:
if toknum == ERRORTOKEN:
op = tokval
if op == '!':
nfactorial += 1
else:
nfactorial = 0
result.append((OP, op))
else:
if nfactorial == 1:
result = _add_factorial_tokens('factorial', result)
elif nfactorial == 2:
result = _add_factorial_tokens('factorial2', result)
elif nfactorial > 2:
raise TokenError
nfactorial = 0
result.append((toknum, tokval))
return result
def convert_xor(tokens, local_dict, global_dict):
"""Treats XOR, ``^``, as exponentiation, ``**``."""
result = []
for toknum, tokval in tokens:
if toknum == OP:
if tokval == '^':
result.append((OP, '**'))
else:
result.append((toknum, tokval))
else:
result.append((toknum, tokval))
return result
def repeated_decimals(tokens, local_dict, global_dict):
"""
Allows 0.2[1] notation to represent the repeated decimal 0.2111... (19/90)
Run this before auto_number.
"""
result = []
def is_digit(s):
return all(i in '0123456789_' for i in s)
# num will running match any DECIMAL [ INTEGER ]
num = []
for toknum, tokval in tokens:
if toknum == NUMBER:
if (not num and '.' in tokval and 'e' not in tokval.lower() and
'j' not in tokval.lower()):
num.append((toknum, tokval))
elif is_digit(tokval)and len(num) == 2:
num.append((toknum, tokval))
elif is_digit(tokval) and len(num) == 3 and is_digit(num[-1][1]):
# Python 2 tokenizes 00123 as '00', '123'
# Python 3 tokenizes 01289 as '012', '89'
num.append((toknum, tokval))
else:
num = []
elif toknum == OP:
if tokval == '[' and len(num) == 1:
num.append((OP, tokval))
elif tokval == ']' and len(num) >= 3:
num.append((OP, tokval))
elif tokval == '.' and not num:
# handle .[1]
num.append((NUMBER, '0.'))
else:
num = []
else:
num = []
result.append((toknum, tokval))
if num and num[-1][1] == ']':
# pre.post[repetend] = a + b/c + d/e where a = pre, b/c = post,
# and d/e = repetend
result = result[:-len(num)]
pre, post = num[0][1].split('.')
repetend = num[2][1]
if len(num) == 5:
repetend += num[3][1]
pre = pre.replace('_', '')
post = post.replace('_', '')
repetend = repetend.replace('_', '')
zeros = '0'*len(post)
post, repetends = [w.lstrip('0') for w in [post, repetend]]
# or else interpreted as octal
a = pre or '0'
b, c = post or '0', '1' + zeros
d, e = repetends, ('9'*len(repetend)) + zeros
seq = [
(OP, '('),
(NAME, 'Integer'),
(OP, '('),
(NUMBER, a),
(OP, ')'),
(OP, '+'),
(NAME, 'Rational'),
(OP, '('),
(NUMBER, b),
(OP, ','),
(NUMBER, c),
(OP, ')'),
(OP, '+'),
(NAME, 'Rational'),
(OP, '('),
(NUMBER, d),
(OP, ','),
(NUMBER, e),
(OP, ')'),
(OP, ')'),
]
result.extend(seq)
num = []
return result
def auto_number(tokens, local_dict, global_dict):
"""
Converts numeric literals to use SymPy equivalents.
Complex numbers use ``I``, integer literals use ``Integer``, and float
literals use ``Float``.
"""
result = []
for toknum, tokval in tokens:
if toknum == NUMBER:
number = tokval
postfix = []
if number.endswith('j') or number.endswith('J'):
number = number[:-1]
postfix = [(OP, '*'), (NAME, 'I')]
if '.' in number or (('e' in number or 'E' in number) and
not (number.startswith('0x') or number.startswith('0X'))):
seq = [(NAME, 'Float'), (OP, '('),
(NUMBER, repr(str(number))), (OP, ')')]
else:
seq = [(NAME, 'Integer'), (OP, '('), (
NUMBER, number), (OP, ')')]
result.extend(seq + postfix)
else:
result.append((toknum, tokval))
return result
def rationalize(tokens, local_dict, global_dict):
"""Converts floats into ``Rational``. Run AFTER ``auto_number``."""
result = []
passed_float = False
for toknum, tokval in tokens:
if toknum == NAME:
if tokval == 'Float':
passed_float = True
tokval = 'Rational'
result.append((toknum, tokval))
elif passed_float == True and toknum == NUMBER:
passed_float = False
result.append((STRING, tokval))
else:
result.append((toknum, tokval))
return result
def _transform_equals_sign(tokens, local_dict, global_dict):
"""Transforms the equals sign ``=`` to instances of Eq.
This is a helper function for ``convert_equals_signs``.
Works with expressions containing one equals sign and no
nesting. Expressions like ``(1=2)=False`` will not work with this
and should be used with ``convert_equals_signs``.
Examples: 1=2 to Eq(1,2)
1*2=x to Eq(1*2, x)
This does not deal with function arguments yet.
"""
result = []
if (OP, "=") in tokens:
result.append((NAME, "Eq"))
result.append((OP, "("))
for index, token in enumerate(tokens):
if token == (OP, "="):
result.append((OP, ","))
continue
result.append(token)
result.append((OP, ")"))
else:
result = tokens
return result
def convert_equals_signs(result, local_dict, global_dict):
""" Transforms all the equals signs ``=`` to instances of Eq.
Parses the equals signs in the expression and replaces them with
appropriate Eq instances. Also works with nested equals signs.
Does not yet play well with function arguments.
For example, the expression ``(x=y)`` is ambiguous and can be interpreted
as x being an argument to a function and ``convert_equals_signs`` will not
work for this.
See also
========
convert_equality_operators
Examples
========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, convert_equals_signs)
>>> parse_expr("1*2=x", transformations=(
... standard_transformations + (convert_equals_signs,)))
Eq(2, x)
>>> parse_expr("(1*2=x)=False", transformations=(
... standard_transformations + (convert_equals_signs,)))
Eq(Eq(2, x), False)
"""
for step in (_group_parentheses(convert_equals_signs),
_apply_functions,
_transform_equals_sign):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
#: Standard transformations for :func:`parse_expr`.
#: Inserts calls to :class:`~.Symbol`, :class:`~.Integer`, and other SymPy
#: datatypes and allows the use of standard factorial notation (e.g. ``x!``).
standard_transformations = (lambda_notation, auto_symbol, repeated_decimals, auto_number,
factorial_notation)
def stringify_expr(s, local_dict, global_dict, transformations):
"""
Converts the string ``s`` to Python code, in ``local_dict``
Generally, ``parse_expr`` should be used.
"""
tokens = []
input_code = StringIO(s.strip())
for toknum, tokval, _, _, _ in generate_tokens(input_code.readline):
tokens.append((toknum, tokval))
for transform in transformations:
tokens = transform(tokens, local_dict, global_dict)
return untokenize(tokens)
def eval_expr(code, local_dict, global_dict):
"""
Evaluate Python code generated by ``stringify_expr``.
Generally, ``parse_expr`` should be used.
"""
expr = eval(
code, global_dict, local_dict) # take local objects in preference
return expr
def parse_expr(s, local_dict=None, transformations=standard_transformations,
global_dict=None, evaluate=True):
"""Converts the string ``s`` to a SymPy expression, in ``local_dict``
Parameters
==========
s : str
The string to parse.
local_dict : dict, optional
A dictionary of local variables to use when parsing.
global_dict : dict, optional
A dictionary of global variables. By default, this is initialized
with ``from sympy import *``; provide this parameter to override
this behavior (for instance, to parse ``"Q & S"``).
transformations : tuple or str, optional
A tuple of transformation functions used to modify the tokens of the
parsed expression before evaluation. The default transformations
convert numeric literals into their SymPy equivalents, convert
undefined variables into SymPy symbols, and allow the use of standard
mathematical factorial notation (e.g. ``x!``). Selection via
string is available (see below).
evaluate : bool, optional
When False, the order of the arguments will remain as they were in the
string and automatic simplification that would normally occur is
suppressed. (see examples)
Examples
========
>>> from sympy.parsing.sympy_parser import parse_expr
>>> parse_expr("1/2")
1/2
>>> type(_)
<class 'sympy.core.numbers.Half'>
>>> from sympy.parsing.sympy_parser import standard_transformations,\\
... implicit_multiplication_application
>>> transformations = (standard_transformations +
... (implicit_multiplication_application,))
>>> parse_expr("2x", transformations=transformations)
2*x
When evaluate=False, some automatic simplifications will not occur:
>>> parse_expr("2**3"), parse_expr("2**3", evaluate=False)
(8, 2**3)
In addition the order of the arguments will not be made canonical.
This feature allows one to tell exactly how the expression was entered:
>>> a = parse_expr('1 + x', evaluate=False)
>>> b = parse_expr('x + 1', evaluate=0)
>>> a == b
False
>>> a.args
(1, x)
>>> b.args
(x, 1)
Note, however, that when these expressions are printed they will
appear the same:
>>> assert str(a) == str(b)
As a convenience, transformations can be seen by printing ``transformations``:
>>> from sympy.parsing.sympy_parser import transformations
>>> print(transformations)
0: lambda_notation
1: auto_symbol
2: repeated_decimals
3: auto_number
4: factorial_notation
5: implicit_multiplication_application
6: convert_xor
7: implicit_application
8: implicit_multiplication
9: convert_equals_signs
10: function_exponentiation
11: rationalize
The ``T`` object provides a way to select these transformations:
>>> from sympy.parsing.sympy_parser import T
If you print it, you will see the same list as shown above.
>>> str(T) == str(transformations)
True
Standard slicing will return a tuple of transformations:
>>> T[:5] == standard_transformations
True
So ``T`` can be used to specify the parsing transformations:
>>> parse_expr("2x", transformations=T[:5])
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> parse_expr("2x", transformations=T[:6])
2*x
>>> parse_expr('.3', transformations=T[3, 11])
3/10
>>> parse_expr('.3x', transformations=T[:])
3*x/10
As a further convenience, strings 'implicit' and 'all' can be used
to select 0-5 and all the transformations, respectively.
>>> parse_expr('.3x', transformations='all')
3*x/10
See Also
========
stringify_expr, eval_expr, standard_transformations,
implicit_multiplication_application
"""
if local_dict is None:
local_dict = {}
elif not isinstance(local_dict, dict):
raise TypeError('expecting local_dict to be a dict')
if global_dict is None:
global_dict = {}
exec('from sympy import *', global_dict)
elif not isinstance(global_dict, dict):
raise TypeError('expecting global_dict to be a dict')
transformations = transformations or ()
if type(transformations) is str:
if transformations == 'all':
transformations = T[:]
elif transformations == 'implicit':
transformations = T[:6]
else:
raise ValueError('unknown transformation group name')
if transformations:
if not iterable(transformations):
raise TypeError(
'`transformations` should be a list of functions.')
for _ in transformations:
if not callable(_):
raise TypeError(filldedent('''
expected a function in `transformations`,
not %s''' % func_name(_)))
if arity(_) != 3:
raise TypeError(filldedent('''
a transformation should be function that
takes 3 arguments'''))
code = stringify_expr(s, local_dict, global_dict, transformations)
if not evaluate:
code = compile(evaluateFalse(code), '<string>', 'eval')
try:
rv = eval_expr(code, local_dict, global_dict)
# restore neutral definitions for names
for i in local_dict.pop(None, ()):
local_dict[i] = None
return rv
except Exception as e:
# restore neutral definitions for names
for i in local_dict.pop(None, ()):
local_dict[i] = None
raise e from ValueError(f"Error from parse_expr with transformed code: {code!r}")
def evaluateFalse(s):
"""
Replaces operators with the SymPy equivalent and sets evaluate=False.
"""
node = ast.parse(s)
node = EvaluateFalseTransformer().visit(node)
# node is a Module, we want an Expression
node = ast.Expression(node.body[0].value)
return ast.fix_missing_locations(node)
class EvaluateFalseTransformer(ast.NodeTransformer):
operators = {
ast.Add: 'Add',
ast.Mult: 'Mul',
ast.Pow: 'Pow',
ast.Sub: 'Add',
ast.Div: 'Mul',
ast.BitOr: 'Or',
ast.BitAnd: 'And',
ast.BitXor: 'Not',
}
functions = (
'Abs', 'im', 're', 'sign', 'arg', 'conjugate',
'acos', 'acot', 'acsc', 'asec', 'asin', 'atan',
'acosh', 'acoth', 'acsch', 'asech', 'asinh', 'atanh',
'cos', 'cot', 'csc', 'sec', 'sin', 'tan',
'cosh', 'coth', 'csch', 'sech', 'sinh', 'tanh',
'exp', 'ln', 'log', 'sqrt', 'cbrt',
)
def flatten(self, args, func):
result = []
for arg in args:
if isinstance(arg, ast.Call):
arg_func = arg.func
if isinstance(arg_func, ast.Call):
arg_func = arg_func.func
if arg_func.id == func:
result.extend(self.flatten(arg.args, func))
else:
result.append(arg)
else:
result.append(arg)
return result
def visit_BinOp(self, node):
if node.op.__class__ in self.operators:
sympy_class = self.operators[node.op.__class__]
right = self.visit(node.right)
left = self.visit(node.left)
rev = False
if isinstance(node.op, ast.Sub):
right = ast.Call(
func=ast.Name(id='Mul', ctx=ast.Load()),
args=[ast.UnaryOp(op=ast.USub(), operand=ast.Num(1)), right],
keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load()))],
starargs=None,
kwargs=None
)
elif isinstance(node.op, ast.Div):
if isinstance(node.left, ast.UnaryOp):
left, right = right, left
rev = True
left = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[left, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))],
keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load()))],
starargs=None,
kwargs=None
)
else:
right = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[right, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))],
keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load()))],
starargs=None,
kwargs=None
)
if rev: # undo reversal
left, right = right, left
new_node = ast.Call(
func=ast.Name(id=sympy_class, ctx=ast.Load()),
args=[left, right],
keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load()))],
starargs=None,
kwargs=None
)
if sympy_class in ('Add', 'Mul'):
# Denest Add or Mul as appropriate
new_node.args = self.flatten(new_node.args, sympy_class)
return new_node
return node
def visit_Call(self, node):
new_node = self.generic_visit(node)
if isinstance(node.func, ast.Name) and node.func.id in self.functions:
new_node.keywords.append(ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load())))
return new_node
_transformation = { # items can be added but never re-ordered
0: lambda_notation,
1: auto_symbol,
2: repeated_decimals,
3: auto_number,
4: factorial_notation,
5: implicit_multiplication_application,
6: convert_xor,
7: implicit_application,
8: implicit_multiplication,
9: convert_equals_signs,
10: function_exponentiation,
11: rationalize}
transformations = '\n'.join('%s: %s' % (i, func_name(f)) for i, f in _transformation.items())
class _T():
"""class to retrieve transformations from a given slice
EXAMPLES
========
>>> from sympy.parsing.sympy_parser import T, standard_transformations
>>> assert T[:5] == standard_transformations
"""
def __init__(self):
self.N = len(_transformation)
def __str__(self):
return transformations
def __getitem__(self, t):
if not type(t) is tuple:
t = (t,)
i = []
for ti in t:
if type(ti) is int:
i.append(range(self.N)[ti])
elif type(ti) is slice:
i.extend(list(range(*ti.indices(self.N))))
else:
raise TypeError('unexpected slice arg')
return tuple([_transformation[_] for _ in i])
T = _T()
|
b6d342b594fa77ea23d3af3a803450e1c105e5f2827442e6c9b5db0c5b6fbead | """
This module implements the functionality to take any Python expression as a
string and fix all numbers and other things before evaluating it,
thus
1/2
returns
Integer(1)/Integer(2)
We use the ast module for this. It is well documented at docs.python.org.
Some tips to understand how this works: use dump() to get a nice
representation of any node. Then write a string of what you want to get,
e.g. "Integer(1)", parse it, dump it and you'll see that you need to do
"Call(Name('Integer', Load()), [node], [], None, None)". You do not need
to bother with lineno and col_offset, just call fix_missing_locations()
before returning the node.
"""
from sympy.core.basic import Basic
from sympy.core.sympify import SympifyError
from ast import parse, NodeTransformer, Call, Name, Load, \
fix_missing_locations, Str, Tuple
class Transform(NodeTransformer):
def __init__(self, local_dict, global_dict):
NodeTransformer.__init__(self)
self.local_dict = local_dict
self.global_dict = global_dict
def visit_Num(self, node):
if isinstance(node.n, int):
return fix_missing_locations(Call(func=Name('Integer', Load()),
args=[node], keywords=[]))
elif isinstance(node.n, float):
return fix_missing_locations(Call(func=Name('Float', Load()),
args=[node], keywords=[]))
return node
def visit_Name(self, node):
if node.id in self.local_dict:
return node
elif node.id in self.global_dict:
name_obj = self.global_dict[node.id]
if isinstance(name_obj, (Basic, type)) or callable(name_obj):
return node
elif node.id in ['True', 'False']:
return node
return fix_missing_locations(Call(func=Name('Symbol', Load()),
args=[Str(node.id)], keywords=[]))
def visit_Lambda(self, node):
args = [self.visit(arg) for arg in node.args.args]
body = self.visit(node.body)
n = Call(func=Name('Lambda', Load()),
args=[Tuple(args, Load()), body], keywords=[])
return fix_missing_locations(n)
def parse_expr(s, local_dict):
"""
Converts the string "s" to a SymPy expression, in local_dict.
It converts all numbers to Integers before feeding it to Python and
automatically creates Symbols.
"""
global_dict = {}
exec('from sympy import *', global_dict)
try:
a = parse(s.strip(), mode="eval")
except SyntaxError:
raise SympifyError("Cannot parse %s." % repr(s))
a = Transform(local_dict, global_dict).visit(a)
e = compile(a, "<string>", "eval")
return eval(e, global_dict, local_dict)
|
9fa82efd0defcd23ad6299fa1ae9bb9f010b6a870188bcf3033c10b7de1ab0b1 | # -*- coding: utf-8 -*-
r"""
Wigner, Clebsch-Gordan, Racah, and Gaunt coefficients
Collection of functions for calculating Wigner 3j, 6j, 9j,
Clebsch-Gordan, Racah as well as Gaunt coefficients exactly, all
evaluating to a rational number times the square root of a rational
number [Rasch03]_.
Please see the description of the individual functions for further
details and examples.
References
==========
.. [Regge58] 'Symmetry Properties of Clebsch-Gordan Coefficients',
T. Regge, Nuovo Cimento, Volume 10, pp. 544 (1958)
.. [Regge59] 'Symmetry Properties of Racah Coefficients',
T. Regge, Nuovo Cimento, Volume 11, pp. 116 (1959)
.. [Edmonds74] A. R. Edmonds. Angular momentum in quantum mechanics.
Investigations in physics, 4.; Investigations in physics, no. 4.
Princeton, N.J., Princeton University Press, 1957.
.. [Rasch03] J. Rasch and A. C. H. Yu, 'Efficient Storage Scheme for
Pre-calculated Wigner 3j, 6j and Gaunt Coefficients', SIAM
J. Sci. Comput. Volume 25, Issue 4, pp. 1416-1428 (2003)
.. [Liberatodebrito82] 'FORTRAN program for the integral of three
spherical harmonics', A. Liberato de Brito,
Comput. Phys. Commun., Volume 25, pp. 81-85 (1982)
Credits and Copyright
=====================
This code was taken from Sage with the permission of all authors:
https://groups.google.com/forum/#!topic/sage-devel/M4NZdu-7O38
Authors
=======
- Jens Rasch (2009-03-24): initial version for Sage
- Jens Rasch (2009-05-31): updated to sage-4.0
- Oscar Gerardo Lazo Arjona (2017-06-18): added Wigner D matrices
Copyright (C) 2008 Jens Rasch <[email protected]>
"""
from sympy.concrete.summations import Sum
from sympy.core.add import Add
from sympy.core.function import Function
from sympy.core.numbers import (I, Integer, pi)
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.factorials import (binomial, factorial)
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.functions.special.spherical_harmonics import Ynm
from sympy.matrices.dense import zeros
from sympy.matrices.immutable import ImmutableMatrix
# This list of precomputed factorials is needed to massively
# accelerate future calculations of the various coefficients
_Factlist = [1]
def _calc_factlist(nn):
r"""
Function calculates a list of precomputed factorials in order to
massively accelerate future calculations of the various
coefficients.
Parameters
==========
nn : integer
Highest factorial to be computed.
Returns
=======
list of integers :
The list of precomputed factorials.
Examples
========
Calculate list of factorials::
sage: from sage.functions.wigner import _calc_factlist
sage: _calc_factlist(10)
[1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800]
"""
if nn >= len(_Factlist):
for ii in range(len(_Factlist), int(nn + 1)):
_Factlist.append(_Factlist[ii - 1] * ii)
return _Factlist[:int(nn) + 1]
def wigner_3j(j_1, j_2, j_3, m_1, m_2, m_3):
r"""
Calculate the Wigner 3j symbol `\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)`.
Parameters
==========
j_1, j_2, j_3, m_1, m_2, m_3 :
Integer or half integer.
Returns
=======
Rational number times the square root of a rational number.
Examples
========
>>> from sympy.physics.wigner import wigner_3j
>>> wigner_3j(2, 6, 4, 0, 0, 0)
sqrt(715)/143
>>> wigner_3j(2, 6, 4, 0, 0, 1)
0
It is an error to have arguments that are not integer or half
integer values::
sage: wigner_3j(2.1, 6, 4, 0, 0, 0)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer
sage: wigner_3j(2, 6, 4, 1, 0, -1.1)
Traceback (most recent call last):
...
ValueError: m values must be integer or half integer
Notes
=====
The Wigner 3j symbol obeys the following symmetry rules:
- invariant under any permutation of the columns (with the
exception of a sign change where `J:=j_1+j_2+j_3`):
.. math::
\begin{aligned}
\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)
&=\operatorname{Wigner3j}(j_3,j_1,j_2,m_3,m_1,m_2) \\
&=\operatorname{Wigner3j}(j_2,j_3,j_1,m_2,m_3,m_1) \\
&=(-1)^J \operatorname{Wigner3j}(j_3,j_2,j_1,m_3,m_2,m_1) \\
&=(-1)^J \operatorname{Wigner3j}(j_1,j_3,j_2,m_1,m_3,m_2) \\
&=(-1)^J \operatorname{Wigner3j}(j_2,j_1,j_3,m_2,m_1,m_3)
\end{aligned}
- invariant under space inflection, i.e.
.. math::
\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)
=(-1)^J \operatorname{Wigner3j}(j_1,j_2,j_3,-m_1,-m_2,-m_3)
- symmetric with respect to the 72 additional symmetries based on
the work by [Regge58]_
- zero for `j_1`, `j_2`, `j_3` not fulfilling triangle relation
- zero for `m_1 + m_2 + m_3 \neq 0`
- zero for violating any one of the conditions
`j_1 \ge |m_1|`, `j_2 \ge |m_2|`, `j_3 \ge |m_3|`
Algorithm
=========
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 3j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
Authors
=======
- Jens Rasch (2009-03-24): initial version
"""
if int(j_1 * 2) != j_1 * 2 or int(j_2 * 2) != j_2 * 2 or \
int(j_3 * 2) != j_3 * 2:
raise ValueError("j values must be integer or half integer")
if int(m_1 * 2) != m_1 * 2 or int(m_2 * 2) != m_2 * 2 or \
int(m_3 * 2) != m_3 * 2:
raise ValueError("m values must be integer or half integer")
if m_1 + m_2 + m_3 != 0:
return S.Zero
prefid = Integer((-1) ** int(j_1 - j_2 - m_3))
m_3 = -m_3
a1 = j_1 + j_2 - j_3
if a1 < 0:
return S.Zero
a2 = j_1 - j_2 + j_3
if a2 < 0:
return S.Zero
a3 = -j_1 + j_2 + j_3
if a3 < 0:
return S.Zero
if (abs(m_1) > j_1) or (abs(m_2) > j_2) or (abs(m_3) > j_3):
return S.Zero
maxfact = max(j_1 + j_2 + j_3 + 1, j_1 + abs(m_1), j_2 + abs(m_2),
j_3 + abs(m_3))
_calc_factlist(int(maxfact))
argsqrt = Integer(_Factlist[int(j_1 + j_2 - j_3)] *
_Factlist[int(j_1 - j_2 + j_3)] *
_Factlist[int(-j_1 + j_2 + j_3)] *
_Factlist[int(j_1 - m_1)] *
_Factlist[int(j_1 + m_1)] *
_Factlist[int(j_2 - m_2)] *
_Factlist[int(j_2 + m_2)] *
_Factlist[int(j_3 - m_3)] *
_Factlist[int(j_3 + m_3)]) / \
_Factlist[int(j_1 + j_2 + j_3 + 1)]
ressqrt = sqrt(argsqrt)
if ressqrt.is_complex or ressqrt.is_infinite:
ressqrt = ressqrt.as_real_imag()[0]
imin = max(-j_3 + j_1 + m_2, -j_3 + j_2 - m_1, 0)
imax = min(j_2 + m_2, j_1 - m_1, j_1 + j_2 - j_3)
sumres = 0
for ii in range(int(imin), int(imax) + 1):
den = _Factlist[ii] * \
_Factlist[int(ii + j_3 - j_1 - m_2)] * \
_Factlist[int(j_2 + m_2 - ii)] * \
_Factlist[int(j_1 - ii - m_1)] * \
_Factlist[int(ii + j_3 - j_2 + m_1)] * \
_Factlist[int(j_1 + j_2 - j_3 - ii)]
sumres = sumres + Integer((-1) ** ii) / den
res = ressqrt * sumres * prefid
return res
def clebsch_gordan(j_1, j_2, j_3, m_1, m_2, m_3):
r"""
Calculates the Clebsch-Gordan coefficient.
`\left\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \right\rangle`.
The reference for this function is [Edmonds74]_.
Parameters
==========
j_1, j_2, j_3, m_1, m_2, m_3 :
Integer or half integer.
Returns
=======
Rational number times the square root of a rational number.
Examples
========
>>> from sympy import S
>>> from sympy.physics.wigner import clebsch_gordan
>>> clebsch_gordan(S(3)/2, S(1)/2, 2, S(3)/2, S(1)/2, 2)
1
>>> clebsch_gordan(S(3)/2, S(1)/2, 1, S(3)/2, -S(1)/2, 1)
sqrt(3)/2
>>> clebsch_gordan(S(3)/2, S(1)/2, 1, -S(1)/2, S(1)/2, 0)
-sqrt(2)/2
Notes
=====
The Clebsch-Gordan coefficient will be evaluated via its relation
to Wigner 3j symbols:
.. math::
\left\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \right\rangle
=(-1)^{j_1-j_2+m_3} \sqrt{2j_3+1}
\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,-m_3)
See also the documentation on Wigner 3j symbols which exhibit much
higher symmetry relations than the Clebsch-Gordan coefficient.
Authors
=======
- Jens Rasch (2009-03-24): initial version
"""
res = (-1) ** sympify(j_1 - j_2 + m_3) * sqrt(2 * j_3 + 1) * \
wigner_3j(j_1, j_2, j_3, m_1, m_2, -m_3)
return res
def _big_delta_coeff(aa, bb, cc, prec=None):
r"""
Calculates the Delta coefficient of the 3 angular momenta for
Racah symbols. Also checks that the differences are of integer
value.
Parameters
==========
aa :
First angular momentum, integer or half integer.
bb :
Second angular momentum, integer or half integer.
cc :
Third angular momentum, integer or half integer.
prec :
Precision of the ``sqrt()`` calculation.
Returns
=======
double : Value of the Delta coefficient.
Examples
========
sage: from sage.functions.wigner import _big_delta_coeff
sage: _big_delta_coeff(1,1,1)
1/2*sqrt(1/6)
"""
if int(aa + bb - cc) != (aa + bb - cc):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if int(aa + cc - bb) != (aa + cc - bb):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if int(bb + cc - aa) != (bb + cc - aa):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if (aa + bb - cc) < 0:
return S.Zero
if (aa + cc - bb) < 0:
return S.Zero
if (bb + cc - aa) < 0:
return S.Zero
maxfact = max(aa + bb - cc, aa + cc - bb, bb + cc - aa, aa + bb + cc + 1)
_calc_factlist(maxfact)
argsqrt = Integer(_Factlist[int(aa + bb - cc)] *
_Factlist[int(aa + cc - bb)] *
_Factlist[int(bb + cc - aa)]) / \
Integer(_Factlist[int(aa + bb + cc + 1)])
ressqrt = sqrt(argsqrt)
if prec:
ressqrt = ressqrt.evalf(prec).as_real_imag()[0]
return ressqrt
def racah(aa, bb, cc, dd, ee, ff, prec=None):
r"""
Calculate the Racah symbol `W(a,b,c,d;e,f)`.
Parameters
==========
a, ..., f :
Integer or half integer.
prec :
Precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
Returns
=======
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
Examples
========
>>> from sympy.physics.wigner import racah
>>> racah(3,3,3,3,3,3)
-1/14
Notes
=====
The Racah symbol is related to the Wigner 6j symbol:
.. math::
\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)
=(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6)
Please see the 6j symbol for its much richer symmetries and for
additional properties.
Algorithm
=========
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 6j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
Authors
=======
- Jens Rasch (2009-03-24): initial version
"""
prefac = _big_delta_coeff(aa, bb, ee, prec) * \
_big_delta_coeff(cc, dd, ee, prec) * \
_big_delta_coeff(aa, cc, ff, prec) * \
_big_delta_coeff(bb, dd, ff, prec)
if prefac == 0:
return S.Zero
imin = max(aa + bb + ee, cc + dd + ee, aa + cc + ff, bb + dd + ff)
imax = min(aa + bb + cc + dd, aa + dd + ee + ff, bb + cc + ee + ff)
maxfact = max(imax + 1, aa + bb + cc + dd, aa + dd + ee + ff,
bb + cc + ee + ff)
_calc_factlist(maxfact)
sumres = 0
for kk in range(int(imin), int(imax) + 1):
den = _Factlist[int(kk - aa - bb - ee)] * \
_Factlist[int(kk - cc - dd - ee)] * \
_Factlist[int(kk - aa - cc - ff)] * \
_Factlist[int(kk - bb - dd - ff)] * \
_Factlist[int(aa + bb + cc + dd - kk)] * \
_Factlist[int(aa + dd + ee + ff - kk)] * \
_Factlist[int(bb + cc + ee + ff - kk)]
sumres = sumres + Integer((-1) ** kk * _Factlist[kk + 1]) / den
res = prefac * sumres * (-1) ** int(aa + bb + cc + dd)
return res
def wigner_6j(j_1, j_2, j_3, j_4, j_5, j_6, prec=None):
r"""
Calculate the Wigner 6j symbol `\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)`.
Parameters
==========
j_1, ..., j_6 :
Integer or half integer.
prec :
Precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
Returns
=======
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
Examples
========
>>> from sympy.physics.wigner import wigner_6j
>>> wigner_6j(3,3,3,3,3,3)
-1/14
>>> wigner_6j(5,5,5,5,5,5)
1/52
It is an error to have arguments that are not integer or half
integer values or do not fulfill the triangle relation::
sage: wigner_6j(2.5,2.5,2.5,2.5,2.5,2.5)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
sage: wigner_6j(0.5,0.5,1.1,0.5,0.5,1.1)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
Notes
=====
The Wigner 6j symbol is related to the Racah symbol but exhibits
more symmetries as detailed below.
.. math::
\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)
=(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6)
The Wigner 6j symbol obeys the following symmetry rules:
- Wigner 6j symbols are left invariant under any permutation of
the columns:
.. math::
\begin{aligned}
\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)
&=\operatorname{Wigner6j}(j_3,j_1,j_2,j_6,j_4,j_5) \\
&=\operatorname{Wigner6j}(j_2,j_3,j_1,j_5,j_6,j_4) \\
&=\operatorname{Wigner6j}(j_3,j_2,j_1,j_6,j_5,j_4) \\
&=\operatorname{Wigner6j}(j_1,j_3,j_2,j_4,j_6,j_5) \\
&=\operatorname{Wigner6j}(j_2,j_1,j_3,j_5,j_4,j_6)
\end{aligned}
- They are invariant under the exchange of the upper and lower
arguments in each of any two columns, i.e.
.. math::
\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)
=\operatorname{Wigner6j}(j_1,j_5,j_6,j_4,j_2,j_3)
=\operatorname{Wigner6j}(j_4,j_2,j_6,j_1,j_5,j_3)
=\operatorname{Wigner6j}(j_4,j_5,j_3,j_1,j_2,j_6)
- additional 6 symmetries [Regge59]_ giving rise to 144 symmetries
in total
- only non-zero if any triple of `j`'s fulfill a triangle relation
Algorithm
=========
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 6j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
"""
res = (-1) ** int(j_1 + j_2 + j_4 + j_5) * \
racah(j_1, j_2, j_5, j_4, j_3, j_6, prec)
return res
def wigner_9j(j_1, j_2, j_3, j_4, j_5, j_6, j_7, j_8, j_9, prec=None):
r"""
Calculate the Wigner 9j symbol
`\operatorname{Wigner9j}(j_1,j_2,j_3,j_4,j_5,j_6,j_7,j_8,j_9)`.
Parameters
==========
j_1, ..., j_9 :
Integer or half integer.
prec : precision, default
``None``. Providing a precision can
drastically speed up the calculation.
Returns
=======
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
Examples
========
>>> from sympy.physics.wigner import wigner_9j
>>> wigner_9j(1,1,1, 1,1,1, 1,1,0, prec=64) # ==1/18
0.05555555...
>>> wigner_9j(1/2,1/2,0, 1/2,3/2,1, 0,1,1, prec=64) # ==1/6
0.1666666...
It is an error to have arguments that are not integer or half
integer values or do not fulfill the triangle relation::
sage: wigner_9j(0.5,0.5,0.5, 0.5,0.5,0.5, 0.5,0.5,0.5,prec=64)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
sage: wigner_9j(1,1,1, 0.5,1,1.5, 0.5,1,2.5,prec=64)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
Algorithm
=========
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 3j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
"""
imax = int(min(j_1 + j_9, j_2 + j_6, j_4 + j_8) * 2)
imin = imax % 2
sumres = 0
for kk in range(imin, int(imax) + 1, 2):
sumres = sumres + (kk + 1) * \
racah(j_1, j_2, j_9, j_6, j_3, kk / 2, prec) * \
racah(j_4, j_6, j_8, j_2, j_5, kk / 2, prec) * \
racah(j_1, j_4, j_9, j_8, j_7, kk / 2, prec)
return sumres
def gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None):
r"""
Calculate the Gaunt coefficient.
Explanation
===========
The Gaunt coefficient is defined as the integral over three
spherical harmonics:
.. math::
\begin{aligned}
\operatorname{Gaunt}(l_1,l_2,l_3,m_1,m_2,m_3)
&=\int Y_{l_1,m_1}(\Omega)
Y_{l_2,m_2}(\Omega) Y_{l_3,m_3}(\Omega) \,d\Omega \\
&=\sqrt{\frac{(2l_1+1)(2l_2+1)(2l_3+1)}{4\pi}}
\operatorname{Wigner3j}(l_1,l_2,l_3,0,0,0)
\operatorname{Wigner3j}(l_1,l_2,l_3,m_1,m_2,m_3)
\end{aligned}
Parameters
==========
l_1, l_2, l_3, m_1, m_2, m_3 :
Integer.
prec - precision, default: ``None``.
Providing a precision can
drastically speed up the calculation.
Returns
=======
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
Examples
========
>>> from sympy.physics.wigner import gaunt
>>> gaunt(1,0,1,1,0,-1)
-1/(2*sqrt(pi))
>>> gaunt(1000,1000,1200,9,3,-12).n(64)
0.00689500421922113448...
It is an error to use non-integer values for `l` and `m`::
sage: gaunt(1.2,0,1.2,0,0,0)
Traceback (most recent call last):
...
ValueError: l values must be integer
sage: gaunt(1,0,1,1.1,0,-1.1)
Traceback (most recent call last):
...
ValueError: m values must be integer
Notes
=====
The Gaunt coefficient obeys the following symmetry rules:
- invariant under any permutation of the columns
.. math::
\begin{aligned}
Y(l_1,l_2,l_3,m_1,m_2,m_3)
&=Y(l_3,l_1,l_2,m_3,m_1,m_2) \\
&=Y(l_2,l_3,l_1,m_2,m_3,m_1) \\
&=Y(l_3,l_2,l_1,m_3,m_2,m_1) \\
&=Y(l_1,l_3,l_2,m_1,m_3,m_2) \\
&=Y(l_2,l_1,l_3,m_2,m_1,m_3)
\end{aligned}
- invariant under space inflection, i.e.
.. math::
Y(l_1,l_2,l_3,m_1,m_2,m_3)
=Y(l_1,l_2,l_3,-m_1,-m_2,-m_3)
- symmetric with respect to the 72 Regge symmetries as inherited
for the `3j` symbols [Regge58]_
- zero for `l_1`, `l_2`, `l_3` not fulfilling triangle relation
- zero for violating any one of the conditions: `l_1 \ge |m_1|`,
`l_2 \ge |m_2|`, `l_3 \ge |m_3|`
- non-zero only for an even sum of the `l_i`, i.e.
`L = l_1 + l_2 + l_3 = 2n` for `n` in `\mathbb{N}`
Algorithms
==========
This function uses the algorithm of [Liberatodebrito82]_ to
calculate the value of the Gaunt coefficient exactly. Note that
the formula contains alternating sums over large factorials and is
therefore unsuitable for finite precision arithmetic and only
useful for a computer algebra system [Rasch03]_.
Authors
=======
Jens Rasch (2009-03-24): initial version for Sage.
"""
if int(l_1) != l_1 or int(l_2) != l_2 or int(l_3) != l_3:
raise ValueError("l values must be integer")
if int(m_1) != m_1 or int(m_2) != m_2 or int(m_3) != m_3:
raise ValueError("m values must be integer")
sumL = l_1 + l_2 + l_3
bigL = sumL // 2
a1 = l_1 + l_2 - l_3
if a1 < 0:
return S.Zero
a2 = l_1 - l_2 + l_3
if a2 < 0:
return S.Zero
a3 = -l_1 + l_2 + l_3
if a3 < 0:
return S.Zero
if sumL % 2:
return S.Zero
if (m_1 + m_2 + m_3) != 0:
return S.Zero
if (abs(m_1) > l_1) or (abs(m_2) > l_2) or (abs(m_3) > l_3):
return S.Zero
imin = max(-l_3 + l_1 + m_2, -l_3 + l_2 - m_1, 0)
imax = min(l_2 + m_2, l_1 - m_1, l_1 + l_2 - l_3)
maxfact = max(l_1 + l_2 + l_3 + 1, imax + 1)
_calc_factlist(maxfact)
argsqrt = (2 * l_1 + 1) * (2 * l_2 + 1) * (2 * l_3 + 1) * \
_Factlist[l_1 - m_1] * _Factlist[l_1 + m_1] * _Factlist[l_2 - m_2] * \
_Factlist[l_2 + m_2] * _Factlist[l_3 - m_3] * _Factlist[l_3 + m_3] / \
(4*pi)
ressqrt = sqrt(argsqrt)
prefac = Integer(_Factlist[bigL] * _Factlist[l_2 - l_1 + l_3] *
_Factlist[l_1 - l_2 + l_3] * _Factlist[l_1 + l_2 - l_3])/ \
_Factlist[2 * bigL + 1]/ \
(_Factlist[bigL - l_1] *
_Factlist[bigL - l_2] * _Factlist[bigL - l_3])
sumres = 0
for ii in range(int(imin), int(imax) + 1):
den = _Factlist[ii] * _Factlist[ii + l_3 - l_1 - m_2] * \
_Factlist[l_2 + m_2 - ii] * _Factlist[l_1 - ii - m_1] * \
_Factlist[ii + l_3 - l_2 + m_1] * _Factlist[l_1 + l_2 - l_3 - ii]
sumres = sumres + Integer((-1) ** ii) / den
res = ressqrt * prefac * sumres * Integer((-1) ** (bigL + l_3 + m_1 - m_2))
if prec is not None:
res = res.n(prec)
return res
class Wigner3j(Function):
def doit(self, **hints):
if all(obj.is_number for obj in self.args):
return wigner_3j(*self.args)
else:
return self
def dot_rot_grad_Ynm(j, p, l, m, theta, phi):
r"""
Returns dot product of rotational gradients of spherical harmonics.
Explanation
===========
This function returns the right hand side of the following expression:
.. math ::
\vec{R}Y{_j^{p}} \cdot \vec{R}Y{_l^{m}} = (-1)^{m+p}
\sum\limits_{k=|l-j|}^{l+j}Y{_k^{m+p}} * \alpha_{l,m,j,p,k} *
\frac{1}{2} (k^2-j^2-l^2+k-j-l)
Arguments
=========
j, p, l, m .... indices in spherical harmonics (expressions or integers)
theta, phi .... angle arguments in spherical harmonics
Example
=======
>>> from sympy import symbols
>>> from sympy.physics.wigner import dot_rot_grad_Ynm
>>> theta, phi = symbols("theta phi")
>>> dot_rot_grad_Ynm(3, 2, 2, 0, theta, phi).doit()
3*sqrt(55)*Ynm(5, 2, theta, phi)/(11*sqrt(pi))
"""
j = sympify(j)
p = sympify(p)
l = sympify(l)
m = sympify(m)
theta = sympify(theta)
phi = sympify(phi)
k = Dummy("k")
def alpha(l,m,j,p,k):
return sqrt((2*l+1)*(2*j+1)*(2*k+1)/(4*pi)) * \
Wigner3j(j, l, k, S.Zero, S.Zero, S.Zero) * \
Wigner3j(j, l, k, p, m, -m-p)
return (S.NegativeOne)**(m+p) * Sum(Ynm(k, m+p, theta, phi) * alpha(l,m,j,p,k) / 2 \
*(k**2-j**2-l**2+k-j-l), (k, abs(l-j), l+j))
def wigner_d_small(J, beta):
"""Return the small Wigner d matrix for angular momentum J.
Explanation
===========
J : An integer, half-integer, or SymPy symbol for the total angular
momentum of the angular momentum space being rotated.
beta : A real number representing the Euler angle of rotation about
the so-called line of nodes. See [Edmonds74]_.
Returns
=======
A matrix representing the corresponding Euler angle rotation( in the basis
of eigenvectors of `J_z`).
.. math ::
\\mathcal{d}_{\\beta} = \\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big)
The components are calculated using the general form [Edmonds74]_,
equation 4.1.15.
Examples
========
>>> from sympy import Integer, symbols, pi, pprint
>>> from sympy.physics.wigner import wigner_d_small
>>> half = 1/Integer(2)
>>> beta = symbols("beta", real=True)
>>> pprint(wigner_d_small(half, beta), use_unicode=True)
⎡ ⎛β⎞ ⎛β⎞⎤
⎢cos⎜─⎟ sin⎜─⎟⎥
⎢ ⎝2⎠ ⎝2⎠⎥
⎢ ⎥
⎢ ⎛β⎞ ⎛β⎞⎥
⎢-sin⎜─⎟ cos⎜─⎟⎥
⎣ ⎝2⎠ ⎝2⎠⎦
>>> pprint(wigner_d_small(2*half, beta), use_unicode=True)
⎡ 2⎛β⎞ ⎛β⎞ ⎛β⎞ 2⎛β⎞ ⎤
⎢ cos ⎜─⎟ √2⋅sin⎜─⎟⋅cos⎜─⎟ sin ⎜─⎟ ⎥
⎢ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎥
⎢ ⎥
⎢ ⎛β⎞ ⎛β⎞ 2⎛β⎞ 2⎛β⎞ ⎛β⎞ ⎛β⎞⎥
⎢-√2⋅sin⎜─⎟⋅cos⎜─⎟ - sin ⎜─⎟ + cos ⎜─⎟ √2⋅sin⎜─⎟⋅cos⎜─⎟⎥
⎢ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠⎥
⎢ ⎥
⎢ 2⎛β⎞ ⎛β⎞ ⎛β⎞ 2⎛β⎞ ⎥
⎢ sin ⎜─⎟ -√2⋅sin⎜─⎟⋅cos⎜─⎟ cos ⎜─⎟ ⎥
⎣ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎦
From table 4 in [Edmonds74]_
>>> pprint(wigner_d_small(half, beta).subs({beta:pi/2}), use_unicode=True)
⎡ √2 √2⎤
⎢ ── ──⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢-√2 √2⎥
⎢──── ──⎥
⎣ 2 2 ⎦
>>> pprint(wigner_d_small(2*half, beta).subs({beta:pi/2}),
... use_unicode=True)
⎡ √2 ⎤
⎢1/2 ── 1/2⎥
⎢ 2 ⎥
⎢ ⎥
⎢-√2 √2 ⎥
⎢──── 0 ── ⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢ -√2 ⎥
⎢1/2 ──── 1/2⎥
⎣ 2 ⎦
>>> pprint(wigner_d_small(3*half, beta).subs({beta:pi/2}),
... use_unicode=True)
⎡ √2 √6 √6 √2⎤
⎢ ── ── ── ──⎥
⎢ 4 4 4 4 ⎥
⎢ ⎥
⎢-√6 -√2 √2 √6⎥
⎢──── ──── ── ──⎥
⎢ 4 4 4 4 ⎥
⎢ ⎥
⎢ √6 -√2 -√2 √6⎥
⎢ ── ──── ──── ──⎥
⎢ 4 4 4 4 ⎥
⎢ ⎥
⎢-√2 √6 -√6 √2⎥
⎢──── ── ──── ──⎥
⎣ 4 4 4 4 ⎦
>>> pprint(wigner_d_small(4*half, beta).subs({beta:pi/2}),
... use_unicode=True)
⎡ √6 ⎤
⎢1/4 1/2 ── 1/2 1/4⎥
⎢ 4 ⎥
⎢ ⎥
⎢-1/2 -1/2 0 1/2 1/2⎥
⎢ ⎥
⎢ √6 √6 ⎥
⎢ ── 0 -1/2 0 ── ⎥
⎢ 4 4 ⎥
⎢ ⎥
⎢-1/2 1/2 0 -1/2 1/2⎥
⎢ ⎥
⎢ √6 ⎥
⎢1/4 -1/2 ── -1/2 1/4⎥
⎣ 4 ⎦
"""
M = [J-i for i in range(2*J+1)]
d = zeros(2*J+1)
for i, Mi in enumerate(M):
for j, Mj in enumerate(M):
# We get the maximum and minimum value of sigma.
sigmamax = max([-Mi-Mj, J-Mj])
sigmamin = min([0, J-Mi])
dij = sqrt(factorial(J+Mi)*factorial(J-Mi) /
factorial(J+Mj)/factorial(J-Mj))
terms = [(-1)**(J-Mi-s) *
binomial(J+Mj, J-Mi-s) *
binomial(J-Mj, s) *
cos(beta/2)**(2*s+Mi+Mj) *
sin(beta/2)**(2*J-2*s-Mj-Mi)
for s in range(sigmamin, sigmamax+1)]
d[i, j] = dij*Add(*terms)
return ImmutableMatrix(d)
def wigner_d(J, alpha, beta, gamma):
"""Return the Wigner D matrix for angular momentum J.
Explanation
===========
J :
An integer, half-integer, or SymPy symbol for the total angular
momentum of the angular momentum space being rotated.
alpha, beta, gamma - Real numbers representing the Euler.
Angles of rotation about the so-called vertical, line of nodes, and
figure axes. See [Edmonds74]_.
Returns
=======
A matrix representing the corresponding Euler angle rotation( in the basis
of eigenvectors of `J_z`).
.. math ::
\\mathcal{D}_{\\alpha \\beta \\gamma} =
\\exp\\big( \\frac{i\\alpha}{\\hbar} J_z\\big)
\\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big)
\\exp\\big( \\frac{i\\gamma}{\\hbar} J_z\\big)
The components are calculated using the general form [Edmonds74]_,
equation 4.1.12.
Examples
========
The simplest possible example:
>>> from sympy.physics.wigner import wigner_d
>>> from sympy import Integer, symbols, pprint
>>> half = 1/Integer(2)
>>> alpha, beta, gamma = symbols("alpha, beta, gamma", real=True)
>>> pprint(wigner_d(half, alpha, beta, gamma), use_unicode=True)
⎡ ⅈ⋅α ⅈ⋅γ ⅈ⋅α -ⅈ⋅γ ⎤
⎢ ─── ─── ─── ───── ⎥
⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞ ⎥
⎢ ℯ ⋅ℯ ⋅cos⎜─⎟ ℯ ⋅ℯ ⋅sin⎜─⎟ ⎥
⎢ ⎝2⎠ ⎝2⎠ ⎥
⎢ ⎥
⎢ -ⅈ⋅α ⅈ⋅γ -ⅈ⋅α -ⅈ⋅γ ⎥
⎢ ───── ─── ───── ───── ⎥
⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞⎥
⎢-ℯ ⋅ℯ ⋅sin⎜─⎟ ℯ ⋅ℯ ⋅cos⎜─⎟⎥
⎣ ⎝2⎠ ⎝2⎠⎦
"""
d = wigner_d_small(J, beta)
M = [J-i for i in range(2*J+1)]
D = [[exp(I*Mi*alpha)*d[i, j]*exp(I*Mj*gamma)
for j, Mj in enumerate(M)] for i, Mi in enumerate(M)]
return ImmutableMatrix(D)
|
5cf6347bf36b4ce3905a69ba9bb308c3c1053f206a5fb869ef2a5c58b0afc573 | """
This module implements Pauli algebra by subclassing Symbol. Only algebraic
properties of Pauli matrices are used (we do not use the Matrix class).
See the documentation to the class Pauli for examples.
References
==========
.. [1] https://en.wikipedia.org/wiki/Pauli_matrices
"""
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.numbers import I
from sympy.core.power import Pow
from sympy.core.symbol import Symbol
from sympy.physics.quantum import TensorProduct
__all__ = ['evaluate_pauli_product']
def delta(i, j):
"""
Returns 1 if ``i == j``, else 0.
This is used in the multiplication of Pauli matrices.
Examples
========
>>> from sympy.physics.paulialgebra import delta
>>> delta(1, 1)
1
>>> delta(2, 3)
0
"""
if i == j:
return 1
else:
return 0
def epsilon(i, j, k):
"""
Return 1 if i,j,k is equal to (1,2,3), (2,3,1), or (3,1,2);
-1 if ``i``,``j``,``k`` is equal to (1,3,2), (3,2,1), or (2,1,3);
else return 0.
This is used in the multiplication of Pauli matrices.
Examples
========
>>> from sympy.physics.paulialgebra import epsilon
>>> epsilon(1, 2, 3)
1
>>> epsilon(1, 3, 2)
-1
"""
if (i, j, k) in ((1, 2, 3), (2, 3, 1), (3, 1, 2)):
return 1
elif (i, j, k) in ((1, 3, 2), (3, 2, 1), (2, 1, 3)):
return -1
else:
return 0
class Pauli(Symbol):
"""
The class representing algebraic properties of Pauli matrices.
Explanation
===========
The symbol used to display the Pauli matrices can be changed with an
optional parameter ``label="sigma"``. Pauli matrices with different
``label`` attributes cannot multiply together.
If the left multiplication of symbol or number with Pauli matrix is needed,
please use parentheses to separate Pauli and symbolic multiplication
(for example: 2*I*(Pauli(3)*Pauli(2))).
Another variant is to use evaluate_pauli_product function to evaluate
the product of Pauli matrices and other symbols (with commutative
multiply rules).
See Also
========
evaluate_pauli_product
Examples
========
>>> from sympy.physics.paulialgebra import Pauli
>>> Pauli(1)
sigma1
>>> Pauli(1)*Pauli(2)
I*sigma3
>>> Pauli(1)*Pauli(1)
1
>>> Pauli(3)**4
1
>>> Pauli(1)*Pauli(2)*Pauli(3)
I
>>> from sympy.physics.paulialgebra import Pauli
>>> Pauli(1, label="tau")
tau1
>>> Pauli(1)*Pauli(2, label="tau")
sigma1*tau2
>>> Pauli(1, label="tau")*Pauli(2, label="tau")
I*tau3
>>> from sympy import I
>>> I*(Pauli(2)*Pauli(3))
-sigma1
>>> from sympy.physics.paulialgebra import evaluate_pauli_product
>>> f = I*Pauli(2)*Pauli(3)
>>> f
I*sigma2*sigma3
>>> evaluate_pauli_product(f)
-sigma1
"""
__slots__ = ("i", "label")
def __new__(cls, i, label="sigma"):
if not i in [1, 2, 3]:
raise IndexError("Invalid Pauli index")
obj = Symbol.__new__(cls, "%s%d" %(label,i), commutative=False, hermitian=True)
obj.i = i
obj.label = label
return obj
def __getnewargs_ex__(self):
return (self.i, self.label), {}
def _hashable_content(self):
return (self.i, self.label)
# FIXME don't work for -I*Pauli(2)*Pauli(3)
def __mul__(self, other):
if isinstance(other, Pauli):
j = self.i
k = other.i
jlab = self.label
klab = other.label
if jlab == klab:
return delta(j, k) \
+ I*epsilon(j, k, 1)*Pauli(1,jlab) \
+ I*epsilon(j, k, 2)*Pauli(2,jlab) \
+ I*epsilon(j, k, 3)*Pauli(3,jlab)
return super().__mul__(other)
def _eval_power(b, e):
if e.is_Integer and e.is_positive:
return super().__pow__(int(e) % 2)
def evaluate_pauli_product(arg):
'''Help function to evaluate Pauli matrices product
with symbolic objects.
Parameters
==========
arg: symbolic expression that contains Paulimatrices
Examples
========
>>> from sympy.physics.paulialgebra import Pauli, evaluate_pauli_product
>>> from sympy import I
>>> evaluate_pauli_product(I*Pauli(1)*Pauli(2))
-sigma3
>>> from sympy.abc import x
>>> evaluate_pauli_product(x**2*Pauli(2)*Pauli(1))
-I*x**2*sigma3
'''
start = arg
end = arg
if isinstance(arg, Pow) and isinstance(arg.args[0], Pauli):
if arg.args[1].is_odd:
return arg.args[0]
else:
return 1
if isinstance(arg, Add):
return Add(*[evaluate_pauli_product(part) for part in arg.args])
if isinstance(arg, TensorProduct):
return TensorProduct(*[evaluate_pauli_product(part) for part in arg.args])
elif not(isinstance(arg, Mul)):
return arg
while not start == end or start == arg and end == arg:
start = end
tmp = start.as_coeff_mul()
sigma_product = 1
com_product = 1
keeper = 1
for el in tmp[1]:
if isinstance(el, Pauli):
sigma_product *= el
elif not el.is_commutative:
if isinstance(el, Pow) and isinstance(el.args[0], Pauli):
if el.args[1].is_odd:
sigma_product *= el.args[0]
elif isinstance(el, TensorProduct):
keeper = keeper*sigma_product*\
TensorProduct(
*[evaluate_pauli_product(part) for part in el.args]
)
sigma_product = 1
else:
keeper = keeper*sigma_product*el
sigma_product = 1
else:
com_product *= el
end = tmp[0]*keeper*sigma_product*com_product
if end == arg: break
return end
|
b424fc31cd4095c668fec8dff387dd7654623f0193e965b67e2bbb63cbda691a | from sympy.core.numbers import Float
from sympy.core.singleton import S
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.special.polynomials import assoc_laguerre
from sympy.functions.special.spherical_harmonics import Ynm
def R_nl(n, l, r, Z=1):
"""
Returns the Hydrogen radial wavefunction R_{nl}.
Parameters
==========
n : integer
Principal Quantum Number which is
an integer with possible values as 1, 2, 3, 4,...
l : integer
``l`` is the Angular Momentum Quantum Number with
values ranging from 0 to ``n-1``.
r :
Radial coordinate.
Z :
Atomic number (1 for Hydrogen, 2 for Helium, ...)
Everything is in Hartree atomic units.
Examples
========
>>> from sympy.physics.hydrogen import R_nl
>>> from sympy.abc import r, Z
>>> R_nl(1, 0, r, Z)
2*sqrt(Z**3)*exp(-Z*r)
>>> R_nl(2, 0, r, Z)
sqrt(2)*(-Z*r + 2)*sqrt(Z**3)*exp(-Z*r/2)/4
>>> R_nl(2, 1, r, Z)
sqrt(6)*Z*r*sqrt(Z**3)*exp(-Z*r/2)/12
For Hydrogen atom, you can just use the default value of Z=1:
>>> R_nl(1, 0, r)
2*exp(-r)
>>> R_nl(2, 0, r)
sqrt(2)*(2 - r)*exp(-r/2)/4
>>> R_nl(3, 0, r)
2*sqrt(3)*(2*r**2/9 - 2*r + 3)*exp(-r/3)/27
For Silver atom, you would use Z=47:
>>> R_nl(1, 0, r, Z=47)
94*sqrt(47)*exp(-47*r)
>>> R_nl(2, 0, r, Z=47)
47*sqrt(94)*(2 - 47*r)*exp(-47*r/2)/4
>>> R_nl(3, 0, r, Z=47)
94*sqrt(141)*(4418*r**2/9 - 94*r + 3)*exp(-47*r/3)/27
The normalization of the radial wavefunction is:
>>> from sympy import integrate, oo
>>> integrate(R_nl(1, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r)**2 * r**2, (r, 0, oo))
1
It holds for any atomic number:
>>> integrate(R_nl(1, 0, r, Z=2)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r, Z=3)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r, Z=4)**2 * r**2, (r, 0, oo))
1
"""
# sympify arguments
n, l, r, Z = map(S, [n, l, r, Z])
# radial quantum number
n_r = n - l - 1
# rescaled "r"
a = 1/Z # Bohr radius
r0 = 2 * r / (n * a)
# normalization coefficient
C = sqrt((S(2)/(n*a))**3 * factorial(n_r) / (2*n*factorial(n + l)))
# This is an equivalent normalization coefficient, that can be found in
# some books. Both coefficients seem to be the same fast:
# C = S(2)/n**2 * sqrt(1/a**3 * factorial(n_r) / (factorial(n+l)))
return C * r0**l * assoc_laguerre(n_r, 2*l + 1, r0).expand() * exp(-r0/2)
def Psi_nlm(n, l, m, r, phi, theta, Z=1):
"""
Returns the Hydrogen wave function psi_{nlm}. It's the product of
the radial wavefunction R_{nl} and the spherical harmonic Y_{l}^{m}.
Parameters
==========
n : integer
Principal Quantum Number which is
an integer with possible values as 1, 2, 3, 4,...
l : integer
``l`` is the Angular Momentum Quantum Number with
values ranging from 0 to ``n-1``.
m : integer
``m`` is the Magnetic Quantum Number with values
ranging from ``-l`` to ``l``.
r :
radial coordinate
phi :
azimuthal angle
theta :
polar angle
Z :
atomic number (1 for Hydrogen, 2 for Helium, ...)
Everything is in Hartree atomic units.
Examples
========
>>> from sympy.physics.hydrogen import Psi_nlm
>>> from sympy import Symbol
>>> r=Symbol("r", real=True, positive=True)
>>> phi=Symbol("phi", real=True)
>>> theta=Symbol("theta", real=True)
>>> Z=Symbol("Z", positive=True, integer=True, nonzero=True)
>>> Psi_nlm(1,0,0,r,phi,theta,Z)
Z**(3/2)*exp(-Z*r)/sqrt(pi)
>>> Psi_nlm(2,1,1,r,phi,theta,Z)
-Z**(5/2)*r*exp(I*phi)*exp(-Z*r/2)*sin(theta)/(8*sqrt(pi))
Integrating the absolute square of a hydrogen wavefunction psi_{nlm}
over the whole space leads 1.
The normalization of the hydrogen wavefunctions Psi_nlm is:
>>> from sympy import integrate, conjugate, pi, oo, sin
>>> wf=Psi_nlm(2,1,1,r,phi,theta,Z)
>>> abs_sqrd=wf*conjugate(wf)
>>> jacobi=r**2*sin(theta)
>>> integrate(abs_sqrd*jacobi, (r,0,oo), (phi,0,2*pi), (theta,0,pi))
1
"""
# sympify arguments
n, l, m, r, phi, theta, Z = map(S, [n, l, m, r, phi, theta, Z])
# check if values for n,l,m make physically sense
if n.is_integer and n < 1:
raise ValueError("'n' must be positive integer")
if l.is_integer and not (n > l):
raise ValueError("'n' must be greater than 'l'")
if m.is_integer and not (abs(m) <= l):
raise ValueError("|'m'| must be less or equal 'l'")
# return the hydrogen wave function
return R_nl(n, l, r, Z)*Ynm(l, m, theta, phi).expand(func=True)
def E_nl(n, Z=1):
"""
Returns the energy of the state (n, l) in Hartree atomic units.
The energy doesn't depend on "l".
Parameters
==========
n : integer
Principal Quantum Number which is
an integer with possible values as 1, 2, 3, 4,...
Z :
Atomic number (1 for Hydrogen, 2 for Helium, ...)
Examples
========
>>> from sympy.physics.hydrogen import E_nl
>>> from sympy.abc import n, Z
>>> E_nl(n, Z)
-Z**2/(2*n**2)
>>> E_nl(1)
-1/2
>>> E_nl(2)
-1/8
>>> E_nl(3)
-1/18
>>> E_nl(3, 47)
-2209/18
"""
n, Z = S(n), S(Z)
if n.is_integer and (n < 1):
raise ValueError("'n' must be positive integer")
return -Z**2/(2*n**2)
def E_nl_dirac(n, l, spin_up=True, Z=1, c=Float("137.035999037")):
"""
Returns the relativistic energy of the state (n, l, spin) in Hartree atomic
units.
The energy is calculated from the Dirac equation. The rest mass energy is
*not* included.
Parameters
==========
n : integer
Principal Quantum Number which is
an integer with possible values as 1, 2, 3, 4,...
l : integer
``l`` is the Angular Momentum Quantum Number with
values ranging from 0 to ``n-1``.
spin_up :
True if the electron spin is up (default), otherwise down
Z :
Atomic number (1 for Hydrogen, 2 for Helium, ...)
c :
Speed of light in atomic units. Default value is 137.035999037,
taken from http://arxiv.org/abs/1012.3627
Examples
========
>>> from sympy.physics.hydrogen import E_nl_dirac
>>> E_nl_dirac(1, 0)
-0.500006656595360
>>> E_nl_dirac(2, 0)
-0.125002080189006
>>> E_nl_dirac(2, 1)
-0.125000416028342
>>> E_nl_dirac(2, 1, False)
-0.125002080189006
>>> E_nl_dirac(3, 0)
-0.0555562951740285
>>> E_nl_dirac(3, 1)
-0.0555558020932949
>>> E_nl_dirac(3, 1, False)
-0.0555562951740285
>>> E_nl_dirac(3, 2)
-0.0555556377366884
>>> E_nl_dirac(3, 2, False)
-0.0555558020932949
"""
n, l, Z, c = map(S, [n, l, Z, c])
if not (l >= 0):
raise ValueError("'l' must be positive or zero")
if not (n > l):
raise ValueError("'n' must be greater than 'l'")
if (l == 0 and spin_up is False):
raise ValueError("Spin must be up for l==0.")
# skappa is sign*kappa, where sign contains the correct sign
if spin_up:
skappa = -l - 1
else:
skappa = -l
beta = sqrt(skappa**2 - Z**2/c**2)
return c**2/sqrt(1 + Z**2/(n + skappa + beta)**2/c**2) - c**2
|
d91d1368b8dba37f66cfffeb8b1de93ab2a07eb6c0dcbfd5a96432f55b760f08 | """Known matrices related to physics"""
from sympy.core.numbers import I
from sympy.matrices.dense import MutableDenseMatrix as Matrix
from sympy.utilities.decorator import deprecated
def msigma(i):
r"""Returns a Pauli matrix `\sigma_i` with ``i=1,2,3``.
References
==========
.. [1] https://en.wikipedia.org/wiki/Pauli_matrices
Examples
========
>>> from sympy.physics.matrices import msigma
>>> msigma(1)
Matrix([
[0, 1],
[1, 0]])
"""
if i == 1:
mat = (
(0, 1),
(1, 0)
)
elif i == 2:
mat = (
(0, -I),
(I, 0)
)
elif i == 3:
mat = (
(1, 0),
(0, -1)
)
else:
raise IndexError("Invalid Pauli index")
return Matrix(mat)
def pat_matrix(m, dx, dy, dz):
"""Returns the Parallel Axis Theorem matrix to translate the inertia
matrix a distance of `(dx, dy, dz)` for a body of mass m.
Examples
========
To translate a body having a mass of 2 units a distance of 1 unit along
the `x`-axis we get:
>>> from sympy.physics.matrices import pat_matrix
>>> pat_matrix(2, 1, 0, 0)
Matrix([
[0, 0, 0],
[0, 2, 0],
[0, 0, 2]])
"""
dxdy = -dx*dy
dydz = -dy*dz
dzdx = -dz*dx
dxdx = dx**2
dydy = dy**2
dzdz = dz**2
mat = ((dydy + dzdz, dxdy, dzdx),
(dxdy, dxdx + dzdz, dydz),
(dzdx, dydz, dydy + dxdx))
return m*Matrix(mat)
def mgamma(mu, lower=False):
r"""Returns a Dirac gamma matrix `\gamma^\mu` in the standard
(Dirac) representation.
Explanation
===========
If you want `\gamma_\mu`, use ``gamma(mu, True)``.
We use a convention:
`\gamma^5 = i \cdot \gamma^0 \cdot \gamma^1 \cdot \gamma^2 \cdot \gamma^3`
`\gamma_5 = i \cdot \gamma_0 \cdot \gamma_1 \cdot \gamma_2 \cdot \gamma_3 = - \gamma^5`
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_matrices
Examples
========
>>> from sympy.physics.matrices import mgamma
>>> mgamma(1)
Matrix([
[ 0, 0, 0, 1],
[ 0, 0, 1, 0],
[ 0, -1, 0, 0],
[-1, 0, 0, 0]])
"""
if not mu in (0, 1, 2, 3, 5):
raise IndexError("Invalid Dirac index")
if mu == 0:
mat = (
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
)
elif mu == 1:
mat = (
(0, 0, 0, 1),
(0, 0, 1, 0),
(0, -1, 0, 0),
(-1, 0, 0, 0)
)
elif mu == 2:
mat = (
(0, 0, 0, -I),
(0, 0, I, 0),
(0, I, 0, 0),
(-I, 0, 0, 0)
)
elif mu == 3:
mat = (
(0, 0, 1, 0),
(0, 0, 0, -1),
(-1, 0, 0, 0),
(0, 1, 0, 0)
)
elif mu == 5:
mat = (
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 0, 0, 0),
(0, 1, 0, 0)
)
m = Matrix(mat)
if lower:
if mu in (1, 2, 3, 5):
m = -m
return m
#Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field
#Theory
minkowski_tensor = Matrix( (
(1, 0, 0, 0),
(0, -1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
))
@deprecated(issue=20246, useinstead="DFT(n).as_mutable(), DFT(n), DFT(n).as_explicit()",
deprecated_since_version="1.9")
def mdft(n):
r"""
Deprecated. Use DFT from sympy.matrices.expressions.fourier instead.
To get identical behavior to ``mdft(n)``, use ``DFT(n).as_mutable()``.
"""
from sympy.matrices.expressions.fourier import DFT
return DFT(n).as_mutable()
|
7d2b5727d072d444e2d573f46513b2168c6ee5f58c163a4dae40030151e45a49 | """
Second quantization operators and states for bosons.
This follow the formulation of Fetter and Welecka, "Quantum Theory
of Many-Particle Systems."
"""
from collections import defaultdict
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import Function
from sympy.core.mul import Mul
from sympy.core.numbers import I
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.sorting import default_sort_key
from sympy.core.symbol import Dummy, Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.matrices.dense import zeros
from sympy.printing.str import StrPrinter
from sympy.utilities.iterables import has_dups
__all__ = [
'Dagger',
'KroneckerDelta',
'BosonicOperator',
'AnnihilateBoson',
'CreateBoson',
'AnnihilateFermion',
'CreateFermion',
'FockState',
'FockStateBra',
'FockStateKet',
'FockStateBosonKet',
'FockStateBosonBra',
'FockStateFermionKet',
'FockStateFermionBra',
'BBra',
'BKet',
'FBra',
'FKet',
'F',
'Fd',
'B',
'Bd',
'apply_operators',
'InnerProduct',
'BosonicBasis',
'VarBosonicBasis',
'FixedBosonicBasis',
'Commutator',
'matrix_rep',
'contraction',
'wicks',
'NO',
'evaluate_deltas',
'AntiSymmetricTensor',
'substitute_dummies',
'PermutationOperator',
'simplify_index_permutations',
]
class SecondQuantizationError(Exception):
pass
class AppliesOnlyToSymbolicIndex(SecondQuantizationError):
pass
class ContractionAppliesOnlyToFermions(SecondQuantizationError):
pass
class ViolationOfPauliPrinciple(SecondQuantizationError):
pass
class SubstitutionOfAmbigousOperatorFailed(SecondQuantizationError):
pass
class WicksTheoremDoesNotApply(SecondQuantizationError):
pass
class Dagger(Expr):
"""
Hermitian conjugate of creation/annihilation operators.
Examples
========
>>> from sympy import I
>>> from sympy.physics.secondquant import Dagger, B, Bd
>>> Dagger(2*I)
-2*I
>>> Dagger(B(0))
CreateBoson(0)
>>> Dagger(Bd(0))
AnnihilateBoson(0)
"""
def __new__(cls, arg):
arg = sympify(arg)
r = cls.eval(arg)
if isinstance(r, Basic):
return r
obj = Basic.__new__(cls, arg)
return obj
@classmethod
def eval(cls, arg):
"""
Evaluates the Dagger instance.
Examples
========
>>> from sympy import I
>>> from sympy.physics.secondquant import Dagger, B, Bd
>>> Dagger(2*I)
-2*I
>>> Dagger(B(0))
CreateBoson(0)
>>> Dagger(Bd(0))
AnnihilateBoson(0)
The eval() method is called automatically.
"""
dagger = getattr(arg, '_dagger_', None)
if dagger is not None:
return dagger()
if isinstance(arg, Basic):
if arg.is_Add:
return Add(*tuple(map(Dagger, arg.args)))
if arg.is_Mul:
return Mul(*tuple(map(Dagger, reversed(arg.args))))
if arg.is_Number:
return arg
if arg.is_Pow:
return Pow(Dagger(arg.args[0]), arg.args[1])
if arg == I:
return -arg
else:
return None
def _dagger_(self):
return self.args[0]
class TensorSymbol(Expr):
is_commutative = True
class AntiSymmetricTensor(TensorSymbol):
"""Stores upper and lower indices in separate Tuple's.
Each group of indices is assumed to be antisymmetric.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (i, a), (b, j))
-AntiSymmetricTensor(v, (a, i), (b, j))
As you can see, the indices are automatically sorted to a canonical form.
"""
def __new__(cls, symbol, upper, lower):
try:
upper, signu = _sort_anticommuting_fermions(
upper, key=cls._sortkey)
lower, signl = _sort_anticommuting_fermions(
lower, key=cls._sortkey)
except ViolationOfPauliPrinciple:
return S.Zero
symbol = sympify(symbol)
upper = Tuple(*upper)
lower = Tuple(*lower)
if (signu + signl) % 2:
return -TensorSymbol.__new__(cls, symbol, upper, lower)
else:
return TensorSymbol.__new__(cls, symbol, upper, lower)
@classmethod
def _sortkey(cls, index):
"""Key for sorting of indices.
particle < hole < general
FIXME: This is a bottle-neck, can we do it faster?
"""
h = hash(index)
label = str(index)
if isinstance(index, Dummy):
if index.assumptions0.get('above_fermi'):
return (20, label, h)
elif index.assumptions0.get('below_fermi'):
return (21, label, h)
else:
return (22, label, h)
if index.assumptions0.get('above_fermi'):
return (10, label, h)
elif index.assumptions0.get('below_fermi'):
return (11, label, h)
else:
return (12, label, h)
def _latex(self, printer):
return "{%s^{%s}_{%s}}" % (
self.symbol,
"".join([ i.name for i in self.args[1]]),
"".join([ i.name for i in self.args[2]])
)
@property
def symbol(self):
"""
Returns the symbol of the tensor.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (a, i), (b, j)).symbol
v
"""
return self.args[0]
@property
def upper(self):
"""
Returns the upper indices.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (a, i), (b, j)).upper
(a, i)
"""
return self.args[1]
@property
def lower(self):
"""
Returns the lower indices.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (a, i), (b, j)).lower
(b, j)
"""
return self.args[2]
def __str__(self):
return "%s(%s,%s)" % self.args
def doit(self, **kw_args):
"""
Returns self.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j)).doit()
AntiSymmetricTensor(v, (a, i), (b, j))
"""
return self
class SqOperator(Expr):
"""
Base class for Second Quantization operators.
"""
op_symbol = 'sq'
is_commutative = False
def __new__(cls, k):
obj = Basic.__new__(cls, sympify(k))
return obj
@property
def state(self):
"""
Returns the state index related to this operator.
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F, Fd, B, Bd
>>> p = Symbol('p')
>>> F(p).state
p
>>> Fd(p).state
p
>>> B(p).state
p
>>> Bd(p).state
p
"""
return self.args[0]
@property
def is_symbolic(self):
"""
Returns True if the state is a symbol (as opposed to a number).
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> p = Symbol('p')
>>> F(p).is_symbolic
True
>>> F(1).is_symbolic
False
"""
if self.state.is_Integer:
return False
else:
return True
def doit(self, **kw_args):
"""
FIXME: hack to prevent crash further up...
"""
return self
def __repr__(self):
return NotImplemented
def __str__(self):
return "%s(%r)" % (self.op_symbol, self.state)
def apply_operator(self, state):
"""
Applies an operator to itself.
"""
raise NotImplementedError('implement apply_operator in a subclass')
class BosonicOperator(SqOperator):
pass
class Annihilator(SqOperator):
pass
class Creator(SqOperator):
pass
class AnnihilateBoson(BosonicOperator, Annihilator):
"""
Bosonic annihilation operator.
Examples
========
>>> from sympy.physics.secondquant import B
>>> from sympy.abc import x
>>> B(x)
AnnihilateBoson(x)
"""
op_symbol = 'b'
def _dagger_(self):
return CreateBoson(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, BKet
>>> from sympy.abc import x, y, n
>>> B(x).apply_operator(y)
y*AnnihilateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if not self.is_symbolic and isinstance(state, FockStateKet):
element = self.state
amp = sqrt(state[element])
return amp*state.down(element)
else:
return Mul(self, state)
def __repr__(self):
return "AnnihilateBoson(%s)" % self.state
def _latex(self, printer):
return "b_{%s}" % self.state.name
class CreateBoson(BosonicOperator, Creator):
"""
Bosonic creation operator.
"""
op_symbol = 'b+'
def _dagger_(self):
return AnnihilateBoson(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if not self.is_symbolic and isinstance(state, FockStateKet):
element = self.state
amp = sqrt(state[element] + 1)
return amp*state.up(element)
else:
return Mul(self, state)
def __repr__(self):
return "CreateBoson(%s)" % self.state
def _latex(self, printer):
return "{b^\\dagger_{%s}}" % self.state.name
B = AnnihilateBoson
Bd = CreateBoson
class FermionicOperator(SqOperator):
@property
def is_restricted(self):
"""
Is this FermionicOperator restricted with respect to fermi level?
Returns
=======
1 : restricted to orbits above fermi
0 : no restriction
-1 : restricted to orbits below fermi
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F, Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_restricted
1
>>> Fd(a).is_restricted
1
>>> F(i).is_restricted
-1
>>> Fd(i).is_restricted
-1
>>> F(p).is_restricted
0
>>> Fd(p).is_restricted
0
"""
ass = self.args[0].assumptions0
if ass.get("below_fermi"):
return -1
if ass.get("above_fermi"):
return 1
return 0
@property
def is_above_fermi(self):
"""
Does the index of this FermionicOperator allow values above fermi?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_above_fermi
True
>>> F(i).is_above_fermi
False
>>> F(p).is_above_fermi
True
Note
====
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("below_fermi")
@property
def is_below_fermi(self):
"""
Does the index of this FermionicOperator allow values below fermi?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_below_fermi
False
>>> F(i).is_below_fermi
True
>>> F(p).is_below_fermi
True
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("above_fermi")
@property
def is_only_below_fermi(self):
"""
Is the index of this FermionicOperator restricted to values below fermi?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_below_fermi
False
>>> F(i).is_only_below_fermi
True
>>> F(p).is_only_below_fermi
False
The same applies to creation operators Fd
"""
return self.is_below_fermi and not self.is_above_fermi
@property
def is_only_above_fermi(self):
"""
Is the index of this FermionicOperator restricted to values above fermi?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_above_fermi
True
>>> F(i).is_only_above_fermi
False
>>> F(p).is_only_above_fermi
False
The same applies to creation operators Fd
"""
return self.is_above_fermi and not self.is_below_fermi
def _sortkey(self):
h = hash(self)
label = str(self.args[0])
if self.is_only_q_creator:
return 1, label, h
if self.is_only_q_annihilator:
return 4, label, h
if isinstance(self, Annihilator):
return 3, label, h
if isinstance(self, Creator):
return 2, label, h
class AnnihilateFermion(FermionicOperator, Annihilator):
"""
Fermionic annihilation operator.
"""
op_symbol = 'f'
def _dagger_(self):
return CreateFermion(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if isinstance(state, FockStateFermionKet):
element = self.state
return state.down(element)
elif isinstance(state, Mul):
c_part, nc_part = state.args_cnc()
if isinstance(nc_part[0], FockStateFermionKet):
element = self.state
return Mul(*(c_part + [nc_part[0].down(element)] + nc_part[1:]))
else:
return Mul(self, state)
else:
return Mul(self, state)
@property
def is_q_creator(self):
"""
Can we create a quasi-particle? (create hole or create particle)
If so, would that be above or below the fermi surface?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_q_creator
0
>>> F(i).is_q_creator
-1
>>> F(p).is_q_creator
-1
"""
if self.is_below_fermi:
return -1
return 0
@property
def is_q_annihilator(self):
"""
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
If so, would that be above or below the fermi surface?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=1)
>>> i = Symbol('i', below_fermi=1)
>>> p = Symbol('p')
>>> F(a).is_q_annihilator
1
>>> F(i).is_q_annihilator
0
>>> F(p).is_q_annihilator
1
"""
if self.is_above_fermi:
return 1
return 0
@property
def is_only_q_creator(self):
"""
Always create a quasi-particle? (create hole or create particle)
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_q_creator
False
>>> F(i).is_only_q_creator
True
>>> F(p).is_only_q_creator
False
"""
return self.is_only_below_fermi
@property
def is_only_q_annihilator(self):
"""
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_q_annihilator
True
>>> F(i).is_only_q_annihilator
False
>>> F(p).is_only_q_annihilator
False
"""
return self.is_only_above_fermi
def __repr__(self):
return "AnnihilateFermion(%s)" % self.state
def _latex(self, printer):
return "a_{%s}" % self.state.name
class CreateFermion(FermionicOperator, Creator):
"""
Fermionic creation operator.
"""
op_symbol = 'f+'
def _dagger_(self):
return AnnihilateFermion(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if isinstance(state, FockStateFermionKet):
element = self.state
return state.up(element)
elif isinstance(state, Mul):
c_part, nc_part = state.args_cnc()
if isinstance(nc_part[0], FockStateFermionKet):
element = self.state
return Mul(*(c_part + [nc_part[0].up(element)] + nc_part[1:]))
return Mul(self, state)
@property
def is_q_creator(self):
"""
Can we create a quasi-particle? (create hole or create particle)
If so, would that be above or below the fermi surface?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_q_creator
1
>>> Fd(i).is_q_creator
0
>>> Fd(p).is_q_creator
1
"""
if self.is_above_fermi:
return 1
return 0
@property
def is_q_annihilator(self):
"""
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
If so, would that be above or below the fermi surface?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=1)
>>> i = Symbol('i', below_fermi=1)
>>> p = Symbol('p')
>>> Fd(a).is_q_annihilator
0
>>> Fd(i).is_q_annihilator
-1
>>> Fd(p).is_q_annihilator
-1
"""
if self.is_below_fermi:
return -1
return 0
@property
def is_only_q_creator(self):
"""
Always create a quasi-particle? (create hole or create particle)
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_creator
True
>>> Fd(i).is_only_q_creator
False
>>> Fd(p).is_only_q_creator
False
"""
return self.is_only_above_fermi
@property
def is_only_q_annihilator(self):
"""
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_annihilator
False
>>> Fd(i).is_only_q_annihilator
True
>>> Fd(p).is_only_q_annihilator
False
"""
return self.is_only_below_fermi
def __repr__(self):
return "CreateFermion(%s)" % self.state
def _latex(self, printer):
return "{a^\\dagger_{%s}}" % self.state.name
Fd = CreateFermion
F = AnnihilateFermion
class FockState(Expr):
"""
Many particle Fock state with a sequence of occupation numbers.
Anywhere you can have a FockState, you can also have S.Zero.
All code must check for this!
Base class to represent FockStates.
"""
is_commutative = False
def __new__(cls, occupations):
"""
occupations is a list with two possible meanings:
- For bosons it is a list of occupation numbers.
Element i is the number of particles in state i.
- For fermions it is a list of occupied orbits.
Element 0 is the state that was occupied first, element i
is the i'th occupied state.
"""
occupations = list(map(sympify, occupations))
obj = Basic.__new__(cls, Tuple(*occupations))
return obj
def __getitem__(self, i):
i = int(i)
return self.args[0][i]
def __repr__(self):
return ("FockState(%r)") % (self.args)
def __str__(self):
return "%s%r%s" % (self.lbracket, self._labels(), self.rbracket)
def _labels(self):
return self.args[0]
def __len__(self):
return len(self.args[0])
class BosonState(FockState):
"""
Base class for FockStateBoson(Ket/Bra).
"""
def up(self, i):
"""
Performs the action of a creation operator.
Examples
========
>>> from sympy.physics.secondquant import BBra
>>> b = BBra([1, 2])
>>> b
FockStateBosonBra((1, 2))
>>> b.up(1)
FockStateBosonBra((1, 3))
"""
i = int(i)
new_occs = list(self.args[0])
new_occs[i] = new_occs[i] + S.One
return self.__class__(new_occs)
def down(self, i):
"""
Performs the action of an annihilation operator.
Examples
========
>>> from sympy.physics.secondquant import BBra
>>> b = BBra([1, 2])
>>> b
FockStateBosonBra((1, 2))
>>> b.down(1)
FockStateBosonBra((1, 1))
"""
i = int(i)
new_occs = list(self.args[0])
if new_occs[i] == S.Zero:
return S.Zero
else:
new_occs[i] = new_occs[i] - S.One
return self.__class__(new_occs)
class FermionState(FockState):
"""
Base class for FockStateFermion(Ket/Bra).
"""
fermi_level = 0
def __new__(cls, occupations, fermi_level=0):
occupations = list(map(sympify, occupations))
if len(occupations) > 1:
try:
(occupations, sign) = _sort_anticommuting_fermions(
occupations, key=hash)
except ViolationOfPauliPrinciple:
return S.Zero
else:
sign = 0
cls.fermi_level = fermi_level
if cls._count_holes(occupations) > fermi_level:
return S.Zero
if sign % 2:
return S.NegativeOne*FockState.__new__(cls, occupations)
else:
return FockState.__new__(cls, occupations)
def up(self, i):
"""
Performs the action of a creation operator.
Explanation
===========
If below fermi we try to remove a hole,
if above fermi we try to create a particle.
If general index p we return ``Kronecker(p,i)*self``
where ``i`` is a new symbol with restriction above or below.
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import FKet
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> FKet([]).up(a)
FockStateFermionKet((a,))
A creator acting on vacuum below fermi vanishes
>>> FKet([]).up(i)
0
"""
present = i in self.args[0]
if self._only_above_fermi(i):
if present:
return S.Zero
else:
return self._add_orbit(i)
elif self._only_below_fermi(i):
if present:
return self._remove_orbit(i)
else:
return S.Zero
else:
if present:
hole = Dummy("i", below_fermi=True)
return KroneckerDelta(i, hole)*self._remove_orbit(i)
else:
particle = Dummy("a", above_fermi=True)
return KroneckerDelta(i, particle)*self._add_orbit(i)
def down(self, i):
"""
Performs the action of an annihilation operator.
Explanation
===========
If below fermi we try to create a hole,
If above fermi we try to remove a particle.
If general index p we return ``Kronecker(p,i)*self``
where ``i`` is a new symbol with restriction above or below.
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import FKet
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
An annihilator acting on vacuum above fermi vanishes
>>> FKet([]).down(a)
0
Also below fermi, it vanishes, unless we specify a fermi level > 0
>>> FKet([]).down(i)
0
>>> FKet([],4).down(i)
FockStateFermionKet((i,))
"""
present = i in self.args[0]
if self._only_above_fermi(i):
if present:
return self._remove_orbit(i)
else:
return S.Zero
elif self._only_below_fermi(i):
if present:
return S.Zero
else:
return self._add_orbit(i)
else:
if present:
hole = Dummy("i", below_fermi=True)
return KroneckerDelta(i, hole)*self._add_orbit(i)
else:
particle = Dummy("a", above_fermi=True)
return KroneckerDelta(i, particle)*self._remove_orbit(i)
@classmethod
def _only_below_fermi(cls, i):
"""
Tests if given orbit is only below fermi surface.
If nothing can be concluded we return a conservative False.
"""
if i.is_number:
return i <= cls.fermi_level
if i.assumptions0.get('below_fermi'):
return True
return False
@classmethod
def _only_above_fermi(cls, i):
"""
Tests if given orbit is only above fermi surface.
If fermi level has not been set we return True.
If nothing can be concluded we return a conservative False.
"""
if i.is_number:
return i > cls.fermi_level
if i.assumptions0.get('above_fermi'):
return True
return not cls.fermi_level
def _remove_orbit(self, i):
"""
Removes particle/fills hole in orbit i. No input tests performed here.
"""
new_occs = list(self.args[0])
pos = new_occs.index(i)
del new_occs[pos]
if (pos) % 2:
return S.NegativeOne*self.__class__(new_occs, self.fermi_level)
else:
return self.__class__(new_occs, self.fermi_level)
def _add_orbit(self, i):
"""
Adds particle/creates hole in orbit i. No input tests performed here.
"""
return self.__class__((i,) + self.args[0], self.fermi_level)
@classmethod
def _count_holes(cls, list):
"""
Returns the number of identified hole states in list.
"""
return len([i for i in list if cls._only_below_fermi(i)])
def _negate_holes(self, list):
return tuple([-i if i <= self.fermi_level else i for i in list])
def __repr__(self):
if self.fermi_level:
return "FockStateKet(%r, fermi_level=%s)" % (self.args[0], self.fermi_level)
else:
return "FockStateKet(%r)" % (self.args[0],)
def _labels(self):
return self._negate_holes(self.args[0])
class FockStateKet(FockState):
"""
Representation of a ket.
"""
lbracket = '|'
rbracket = '>'
class FockStateBra(FockState):
"""
Representation of a bra.
"""
lbracket = '<'
rbracket = '|'
def __mul__(self, other):
if isinstance(other, FockStateKet):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
class FockStateBosonKet(BosonState, FockStateKet):
"""
Many particle Fock state with a sequence of occupation numbers.
Occupation numbers can be any integer >= 0.
Examples
========
>>> from sympy.physics.secondquant import BKet
>>> BKet([1, 2])
FockStateBosonKet((1, 2))
"""
def _dagger_(self):
return FockStateBosonBra(*self.args)
class FockStateBosonBra(BosonState, FockStateBra):
"""
Describes a collection of BosonBra particles.
Examples
========
>>> from sympy.physics.secondquant import BBra
>>> BBra([1, 2])
FockStateBosonBra((1, 2))
"""
def _dagger_(self):
return FockStateBosonKet(*self.args)
class FockStateFermionKet(FermionState, FockStateKet):
"""
Many-particle Fock state with a sequence of occupied orbits.
Explanation
===========
Each state can only have one particle, so we choose to store a list of
occupied orbits rather than a tuple with occupation numbers (zeros and ones).
states below fermi level are holes, and are represented by negative labels
in the occupation list.
For symbolic state labels, the fermi_level caps the number of allowed hole-
states.
Examples
========
>>> from sympy.physics.secondquant import FKet
>>> FKet([1, 2])
FockStateFermionKet((1, 2))
"""
def _dagger_(self):
return FockStateFermionBra(*self.args)
class FockStateFermionBra(FermionState, FockStateBra):
"""
See Also
========
FockStateFermionKet
Examples
========
>>> from sympy.physics.secondquant import FBra
>>> FBra([1, 2])
FockStateFermionBra((1, 2))
"""
def _dagger_(self):
return FockStateFermionKet(*self.args)
BBra = FockStateBosonBra
BKet = FockStateBosonKet
FBra = FockStateFermionBra
FKet = FockStateFermionKet
def _apply_Mul(m):
"""
Take a Mul instance with operators and apply them to states.
Explanation
===========
This method applies all operators with integer state labels
to the actual states. For symbolic state labels, nothing is done.
When inner products of FockStates are encountered (like <a|b>),
they are converted to instances of InnerProduct.
This does not currently work on double inner products like,
<a|b><c|d>.
If the argument is not a Mul, it is simply returned as is.
"""
if not isinstance(m, Mul):
return m
c_part, nc_part = m.args_cnc()
n_nc = len(nc_part)
if n_nc in (0, 1):
return m
else:
last = nc_part[-1]
next_to_last = nc_part[-2]
if isinstance(last, FockStateKet):
if isinstance(next_to_last, SqOperator):
if next_to_last.is_symbolic:
return m
else:
result = next_to_last.apply_operator(last)
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
elif isinstance(next_to_last, Pow):
if isinstance(next_to_last.base, SqOperator) and \
next_to_last.exp.is_Integer:
if next_to_last.base.is_symbolic:
return m
else:
result = last
for i in range(next_to_last.exp):
result = next_to_last.base.apply_operator(result)
if result == 0:
break
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
else:
return m
elif isinstance(next_to_last, FockStateBra):
result = InnerProduct(next_to_last, last)
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
else:
return m
else:
return m
def apply_operators(e):
"""
Take a SymPy expression with operators and states and apply the operators.
Examples
========
>>> from sympy.physics.secondquant import apply_operators
>>> from sympy import sympify
>>> apply_operators(sympify(3)+4)
7
"""
e = e.expand()
muls = e.atoms(Mul)
subs_list = [(m, _apply_Mul(m)) for m in iter(muls)]
return e.subs(subs_list)
class InnerProduct(Basic):
"""
An unevaluated inner product between a bra and ket.
Explanation
===========
Currently this class just reduces things to a product of
Kronecker Deltas. In the future, we could introduce abstract
states like ``|a>`` and ``|b>``, and leave the inner product unevaluated as
``<a|b>``.
"""
is_commutative = True
def __new__(cls, bra, ket):
if not isinstance(bra, FockStateBra):
raise TypeError("must be a bra")
if not isinstance(ket, FockStateKet):
raise TypeError("must be a key")
return cls.eval(bra, ket)
@classmethod
def eval(cls, bra, ket):
result = S.One
for i, j in zip(bra.args[0], ket.args[0]):
result *= KroneckerDelta(i, j)
if result == 0:
break
return result
@property
def bra(self):
"""Returns the bra part of the state"""
return self.args[0]
@property
def ket(self):
"""Returns the ket part of the state"""
return self.args[1]
def __repr__(self):
sbra = repr(self.bra)
sket = repr(self.ket)
return "%s|%s" % (sbra[:-1], sket[1:])
def __str__(self):
return self.__repr__()
def matrix_rep(op, basis):
"""
Find the representation of an operator in a basis.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis, B, matrix_rep
>>> b = VarBosonicBasis(5)
>>> o = B(0)
>>> matrix_rep(o, b)
Matrix([
[0, 1, 0, 0, 0],
[0, 0, sqrt(2), 0, 0],
[0, 0, 0, sqrt(3), 0],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 0]])
"""
a = zeros(len(basis))
for i in range(len(basis)):
for j in range(len(basis)):
a[i, j] = apply_operators(Dagger(basis[i])*op*basis[j])
return a
class BosonicBasis:
"""
Base class for a basis set of bosonic Fock states.
"""
pass
class VarBosonicBasis:
"""
A single state, variable particle number basis set.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis
>>> b = VarBosonicBasis(5)
>>> b
[FockState((0,)), FockState((1,)), FockState((2,)),
FockState((3,)), FockState((4,))]
"""
def __init__(self, n_max):
self.n_max = n_max
self._build_states()
def _build_states(self):
self.basis = []
for i in range(self.n_max):
self.basis.append(FockStateBosonKet([i]))
self.n_basis = len(self.basis)
def index(self, state):
"""
Returns the index of state in basis.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis
>>> b = VarBosonicBasis(3)
>>> state = b.state(1)
>>> b
[FockState((0,)), FockState((1,)), FockState((2,))]
>>> state
FockStateBosonKet((1,))
>>> b.index(state)
1
"""
return self.basis.index(state)
def state(self, i):
"""
The state of a single basis.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis
>>> b = VarBosonicBasis(5)
>>> b.state(3)
FockStateBosonKet((3,))
"""
return self.basis[i]
def __getitem__(self, i):
return self.state(i)
def __len__(self):
return len(self.basis)
def __repr__(self):
return repr(self.basis)
class FixedBosonicBasis(BosonicBasis):
"""
Fixed particle number basis set.
Examples
========
>>> from sympy.physics.secondquant import FixedBosonicBasis
>>> b = FixedBosonicBasis(2, 2)
>>> state = b.state(1)
>>> b
[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]
>>> state
FockStateBosonKet((1, 1))
>>> b.index(state)
1
"""
def __init__(self, n_particles, n_levels):
self.n_particles = n_particles
self.n_levels = n_levels
self._build_particle_locations()
self._build_states()
def _build_particle_locations(self):
tup = ["i%i" % i for i in range(self.n_particles)]
first_loop = "for i0 in range(%i)" % self.n_levels
other_loops = ''
for cur, prev in zip(tup[1:], tup):
temp = "for %s in range(%s + 1) " % (cur, prev)
other_loops = other_loops + temp
tup_string = "(%s)" % ", ".join(tup)
list_comp = "[%s %s %s]" % (tup_string, first_loop, other_loops)
result = eval(list_comp)
if self.n_particles == 1:
result = [(item,) for item in result]
self.particle_locations = result
def _build_states(self):
self.basis = []
for tuple_of_indices in self.particle_locations:
occ_numbers = self.n_levels*[0]
for level in tuple_of_indices:
occ_numbers[level] += 1
self.basis.append(FockStateBosonKet(occ_numbers))
self.n_basis = len(self.basis)
def index(self, state):
"""Returns the index of state in basis.
Examples
========
>>> from sympy.physics.secondquant import FixedBosonicBasis
>>> b = FixedBosonicBasis(2, 3)
>>> b.index(b.state(3))
3
"""
return self.basis.index(state)
def state(self, i):
"""Returns the state that lies at index i of the basis
Examples
========
>>> from sympy.physics.secondquant import FixedBosonicBasis
>>> b = FixedBosonicBasis(2, 3)
>>> b.state(3)
FockStateBosonKet((1, 0, 1))
"""
return self.basis[i]
def __getitem__(self, i):
return self.state(i)
def __len__(self):
return len(self.basis)
def __repr__(self):
return repr(self.basis)
class Commutator(Function):
"""
The Commutator: [A, B] = A*B - B*A
The arguments are ordered according to .__cmp__()
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import Commutator
>>> A, B = symbols('A,B', commutative=False)
>>> Commutator(B, A)
-Commutator(A, B)
Evaluate the commutator with .doit()
>>> comm = Commutator(A,B); comm
Commutator(A, B)
>>> comm.doit()
A*B - B*A
For two second quantization operators the commutator is evaluated
immediately:
>>> from sympy.physics.secondquant import Fd, F
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> p,q = symbols('p,q')
>>> Commutator(Fd(a),Fd(i))
2*NO(CreateFermion(a)*CreateFermion(i))
But for more complicated expressions, the evaluation is triggered by
a call to .doit()
>>> comm = Commutator(Fd(p)*Fd(q),F(i)); comm
Commutator(CreateFermion(p)*CreateFermion(q), AnnihilateFermion(i))
>>> comm.doit(wicks=True)
-KroneckerDelta(i, p)*CreateFermion(q) +
KroneckerDelta(i, q)*CreateFermion(p)
"""
is_commutative = False
@classmethod
def eval(cls, a, b):
"""
The Commutator [A,B] is on canonical form if A < B.
Examples
========
>>> from sympy.physics.secondquant import Commutator, F, Fd
>>> from sympy.abc import x
>>> c1 = Commutator(F(x), Fd(x))
>>> c2 = Commutator(Fd(x), F(x))
>>> Commutator.eval(c1, c2)
0
"""
if not (a and b):
return S.Zero
if a == b:
return S.Zero
if a.is_commutative or b.is_commutative:
return S.Zero
#
# [A+B,C] -> [A,C] + [B,C]
#
a = a.expand()
if isinstance(a, Add):
return Add(*[cls(term, b) for term in a.args])
b = b.expand()
if isinstance(b, Add):
return Add(*[cls(a, term) for term in b.args])
#
# [xA,yB] -> xy*[A,B]
#
ca, nca = a.args_cnc()
cb, ncb = b.args_cnc()
c_part = list(ca) + list(cb)
if c_part:
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
#
# single second quantization operators
#
if isinstance(a, BosonicOperator) and isinstance(b, BosonicOperator):
if isinstance(b, CreateBoson) and isinstance(a, AnnihilateBoson):
return KroneckerDelta(a.state, b.state)
if isinstance(a, CreateBoson) and isinstance(b, AnnihilateBoson):
return S.NegativeOne*KroneckerDelta(a.state, b.state)
else:
return S.Zero
if isinstance(a, FermionicOperator) and isinstance(b, FermionicOperator):
return wicks(a*b) - wicks(b*a)
#
# Canonical ordering of arguments
#
if a.sort_key() > b.sort_key():
return S.NegativeOne*cls(b, a)
def doit(self, **hints):
"""
Enables the computation of complex expressions.
Examples
========
>>> from sympy.physics.secondquant import Commutator, F, Fd
>>> from sympy import symbols
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> c = Commutator(Fd(a)*F(i),Fd(b)*F(j))
>>> c.doit(wicks=True)
0
"""
a = self.args[0]
b = self.args[1]
if hints.get("wicks"):
a = a.doit(**hints)
b = b.doit(**hints)
try:
return wicks(a*b) - wicks(b*a)
except ContractionAppliesOnlyToFermions:
pass
except WicksTheoremDoesNotApply:
pass
return (a*b - b*a).doit(**hints)
def __repr__(self):
return "Commutator(%s,%s)" % (self.args[0], self.args[1])
def __str__(self):
return "[%s,%s]" % (self.args[0], self.args[1])
def _latex(self, printer):
return "\\left[%s,%s\\right]" % tuple([
printer._print(arg) for arg in self.args])
class NO(Expr):
"""
This Object is used to represent normal ordering brackets.
i.e. {abcd} sometimes written :abcd:
Explanation
===========
Applying the function NO(arg) to an argument means that all operators in
the argument will be assumed to anticommute, and have vanishing
contractions. This allows an immediate reordering to canonical form
upon object creation.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> p,q = symbols('p,q')
>>> NO(Fd(p)*F(q))
NO(CreateFermion(p)*AnnihilateFermion(q))
>>> NO(F(q)*Fd(p))
-NO(CreateFermion(p)*AnnihilateFermion(q))
Note
====
If you want to generate a normal ordered equivalent of an expression, you
should use the function wicks(). This class only indicates that all
operators inside the brackets anticommute, and have vanishing contractions.
Nothing more, nothing less.
"""
is_commutative = False
def __new__(cls, arg):
"""
Use anticommutation to get canonical form of operators.
Explanation
===========
Employ associativity of normal ordered product: {ab{cd}} = {abcd}
but note that {ab}{cd} /= {abcd}.
We also employ distributivity: {ab + cd} = {ab} + {cd}.
Canonical form also implies expand() {ab(c+d)} = {abc} + {abd}.
"""
# {ab + cd} = {ab} + {cd}
arg = sympify(arg)
arg = arg.expand()
if arg.is_Add:
return Add(*[ cls(term) for term in arg.args])
if arg.is_Mul:
# take coefficient outside of normal ordering brackets
c_part, seq = arg.args_cnc()
if c_part:
coeff = Mul(*c_part)
if not seq:
return coeff
else:
coeff = S.One
# {ab{cd}} = {abcd}
newseq = []
foundit = False
for fac in seq:
if isinstance(fac, NO):
newseq.extend(fac.args)
foundit = True
else:
newseq.append(fac)
if foundit:
return coeff*cls(Mul(*newseq))
# We assume that the user don't mix B and F operators
if isinstance(seq[0], BosonicOperator):
raise NotImplementedError
try:
newseq, sign = _sort_anticommuting_fermions(seq)
except ViolationOfPauliPrinciple:
return S.Zero
if sign % 2:
return (S.NegativeOne*coeff)*cls(Mul(*newseq))
elif sign:
return coeff*cls(Mul(*newseq))
else:
pass # since sign==0, no permutations was necessary
# if we couldn't do anything with Mul object, we just
# mark it as normal ordered
if coeff != S.One:
return coeff*cls(Mul(*newseq))
return Expr.__new__(cls, Mul(*newseq))
if isinstance(arg, NO):
return arg
# if object was not Mul or Add, normal ordering does not apply
return arg
@property
def has_q_creators(self):
"""
Return 0 if the leftmost argument of the first argument is a not a
q_creator, else 1 if it is above fermi or -1 if it is below fermi.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> NO(Fd(a)*Fd(i)).has_q_creators
1
>>> NO(F(i)*F(a)).has_q_creators
-1
>>> NO(Fd(i)*F(a)).has_q_creators #doctest: +SKIP
0
"""
return self.args[0].args[0].is_q_creator
@property
def has_q_annihilators(self):
"""
Return 0 if the rightmost argument of the first argument is a not a
q_annihilator, else 1 if it is above fermi or -1 if it is below fermi.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> NO(Fd(a)*Fd(i)).has_q_annihilators
-1
>>> NO(F(i)*F(a)).has_q_annihilators
1
>>> NO(Fd(a)*F(i)).has_q_annihilators
0
"""
return self.args[0].args[-1].is_q_annihilator
def doit(self, **kw_args):
"""
Either removes the brackets or enables complex computations
in its arguments.
Examples
========
>>> from sympy.physics.secondquant import NO, Fd, F
>>> from textwrap import fill
>>> from sympy import symbols, Dummy
>>> p,q = symbols('p,q', cls=Dummy)
>>> print(fill(str(NO(Fd(p)*F(q)).doit())))
KroneckerDelta(_a, _p)*KroneckerDelta(_a,
_q)*CreateFermion(_a)*AnnihilateFermion(_a) + KroneckerDelta(_a,
_p)*KroneckerDelta(_i, _q)*CreateFermion(_a)*AnnihilateFermion(_i) -
KroneckerDelta(_a, _q)*KroneckerDelta(_i,
_p)*AnnihilateFermion(_a)*CreateFermion(_i) - KroneckerDelta(_i,
_p)*KroneckerDelta(_i, _q)*AnnihilateFermion(_i)*CreateFermion(_i)
"""
if kw_args.get("remove_brackets", True):
return self._remove_brackets()
else:
return self.__new__(type(self), self.args[0].doit(**kw_args))
def _remove_brackets(self):
"""
Returns the sorted string without normal order brackets.
The returned string have the property that no nonzero
contractions exist.
"""
# check if any creator is also an annihilator
subslist = []
for i in self.iter_q_creators():
if self[i].is_q_annihilator:
assume = self[i].state.assumptions0
# only operators with a dummy index can be split in two terms
if isinstance(self[i].state, Dummy):
# create indices with fermi restriction
assume.pop("above_fermi", None)
assume["below_fermi"] = True
below = Dummy('i', **assume)
assume.pop("below_fermi", None)
assume["above_fermi"] = True
above = Dummy('a', **assume)
cls = type(self[i])
split = (
self[i].__new__(cls, below)
* KroneckerDelta(below, self[i].state)
+ self[i].__new__(cls, above)
* KroneckerDelta(above, self[i].state)
)
subslist.append((self[i], split))
else:
raise SubstitutionOfAmbigousOperatorFailed(self[i])
if subslist:
result = NO(self.subs(subslist))
if isinstance(result, Add):
return Add(*[term.doit() for term in result.args])
else:
return self.args[0]
def _expand_operators(self):
"""
Returns a sum of NO objects that contain no ambiguous q-operators.
Explanation
===========
If an index q has range both above and below fermi, the operator F(q)
is ambiguous in the sense that it can be both a q-creator and a q-annihilator.
If q is dummy, it is assumed to be a summation variable and this method
rewrites it into a sum of NO terms with unambiguous operators:
{Fd(p)*F(q)} = {Fd(a)*F(b)} + {Fd(a)*F(i)} + {Fd(j)*F(b)} -{F(i)*Fd(j)}
where a,b are above and i,j are below fermi level.
"""
return NO(self._remove_brackets)
def __getitem__(self, i):
if isinstance(i, slice):
indices = i.indices(len(self))
return [self.args[0].args[i] for i in range(*indices)]
else:
return self.args[0].args[i]
def __len__(self):
return len(self.args[0].args)
def iter_q_annihilators(self):
"""
Iterates over the annihilation operators.
Examples
========
>>> from sympy import symbols
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> from sympy.physics.secondquant import NO, F, Fd
>>> no = NO(Fd(a)*F(i)*F(b)*Fd(j))
>>> no.iter_q_creators()
<generator object... at 0x...>
>>> list(no.iter_q_creators())
[0, 1]
>>> list(no.iter_q_annihilators())
[3, 2]
"""
ops = self.args[0].args
iter = range(len(ops) - 1, -1, -1)
for i in iter:
if ops[i].is_q_annihilator:
yield i
else:
break
def iter_q_creators(self):
"""
Iterates over the creation operators.
Examples
========
>>> from sympy import symbols
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> from sympy.physics.secondquant import NO, F, Fd
>>> no = NO(Fd(a)*F(i)*F(b)*Fd(j))
>>> no.iter_q_creators()
<generator object... at 0x...>
>>> list(no.iter_q_creators())
[0, 1]
>>> list(no.iter_q_annihilators())
[3, 2]
"""
ops = self.args[0].args
iter = range(0, len(ops))
for i in iter:
if ops[i].is_q_creator:
yield i
else:
break
def get_subNO(self, i):
"""
Returns a NO() without FermionicOperator at index i.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import F, NO
>>> p, q, r = symbols('p,q,r')
>>> NO(F(p)*F(q)*F(r)).get_subNO(1)
NO(AnnihilateFermion(p)*AnnihilateFermion(r))
"""
arg0 = self.args[0] # it's a Mul by definition of how it's created
mul = arg0._new_rawargs(*(arg0.args[:i] + arg0.args[i + 1:]))
return NO(mul)
def _latex(self, printer):
return "\\left\\{%s\\right\\}" % printer._print(self.args[0])
def __repr__(self):
return "NO(%s)" % self.args[0]
def __str__(self):
return ":%s:" % self.args[0]
def contraction(a, b):
"""
Calculates contraction of Fermionic operators a and b.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import F, Fd, contraction
>>> p, q = symbols('p,q')
>>> a, b = symbols('a,b', above_fermi=True)
>>> i, j = symbols('i,j', below_fermi=True)
A contraction is non-zero only if a quasi-creator is to the right of a
quasi-annihilator:
>>> contraction(F(a),Fd(b))
KroneckerDelta(a, b)
>>> contraction(Fd(i),F(j))
KroneckerDelta(i, j)
For general indices a non-zero result restricts the indices to below/above
the fermi surface:
>>> contraction(Fd(p),F(q))
KroneckerDelta(_i, q)*KroneckerDelta(p, q)
>>> contraction(F(p),Fd(q))
KroneckerDelta(_a, q)*KroneckerDelta(p, q)
Two creators or two annihilators always vanishes:
>>> contraction(F(p),F(q))
0
>>> contraction(Fd(p),Fd(q))
0
"""
if isinstance(b, FermionicOperator) and isinstance(a, FermionicOperator):
if isinstance(a, AnnihilateFermion) and isinstance(b, CreateFermion):
if b.state.assumptions0.get("below_fermi"):
return S.Zero
if a.state.assumptions0.get("below_fermi"):
return S.Zero
if b.state.assumptions0.get("above_fermi"):
return KroneckerDelta(a.state, b.state)
if a.state.assumptions0.get("above_fermi"):
return KroneckerDelta(a.state, b.state)
return (KroneckerDelta(a.state, b.state)*
KroneckerDelta(b.state, Dummy('a', above_fermi=True)))
if isinstance(b, AnnihilateFermion) and isinstance(a, CreateFermion):
if b.state.assumptions0.get("above_fermi"):
return S.Zero
if a.state.assumptions0.get("above_fermi"):
return S.Zero
if b.state.assumptions0.get("below_fermi"):
return KroneckerDelta(a.state, b.state)
if a.state.assumptions0.get("below_fermi"):
return KroneckerDelta(a.state, b.state)
return (KroneckerDelta(a.state, b.state)*
KroneckerDelta(b.state, Dummy('i', below_fermi=True)))
# vanish if 2xAnnihilator or 2xCreator
return S.Zero
else:
#not fermion operators
t = ( isinstance(i, FermionicOperator) for i in (a, b) )
raise ContractionAppliesOnlyToFermions(*t)
def _sqkey(sq_operator):
"""Generates key for canonical sorting of SQ operators."""
return sq_operator._sortkey()
def _sort_anticommuting_fermions(string1, key=_sqkey):
"""Sort fermionic operators to canonical order, assuming all pairs anticommute.
Explanation
===========
Uses a bidirectional bubble sort. Items in string1 are not referenced
so in principle they may be any comparable objects. The sorting depends on the
operators '>' and '=='.
If the Pauli principle is violated, an exception is raised.
Returns
=======
tuple (sorted_str, sign)
sorted_str: list containing the sorted operators
sign: int telling how many times the sign should be changed
(if sign==0 the string was already sorted)
"""
verified = False
sign = 0
rng = list(range(len(string1) - 1))
rev = list(range(len(string1) - 3, -1, -1))
keys = list(map(key, string1))
key_val = dict(list(zip(keys, string1)))
while not verified:
verified = True
for i in rng:
left = keys[i]
right = keys[i + 1]
if left == right:
raise ViolationOfPauliPrinciple([left, right])
if left > right:
verified = False
keys[i:i + 2] = [right, left]
sign = sign + 1
if verified:
break
for i in rev:
left = keys[i]
right = keys[i + 1]
if left == right:
raise ViolationOfPauliPrinciple([left, right])
if left > right:
verified = False
keys[i:i + 2] = [right, left]
sign = sign + 1
string1 = [ key_val[k] for k in keys ]
return (string1, sign)
def evaluate_deltas(e):
"""
We evaluate KroneckerDelta symbols in the expression assuming Einstein summation.
Explanation
===========
If one index is repeated it is summed over and in effect substituted with
the other one. If both indices are repeated we substitute according to what
is the preferred index. this is determined by
KroneckerDelta.preferred_index and KroneckerDelta.killable_index.
In case there are no possible substitutions or if a substitution would
imply a loss of information, nothing is done.
In case an index appears in more than one KroneckerDelta, the resulting
substitution depends on the order of the factors. Since the ordering is platform
dependent, the literal expression resulting from this function may be hard to
predict.
Examples
========
We assume the following:
>>> from sympy import symbols, Function, Dummy, KroneckerDelta
>>> from sympy.physics.secondquant import evaluate_deltas
>>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
>>> a,b = symbols('a b', above_fermi=True, cls=Dummy)
>>> p,q = symbols('p q', cls=Dummy)
>>> f = Function('f')
>>> t = Function('t')
The order of preference for these indices according to KroneckerDelta is
(a, b, i, j, p, q).
Trivial cases:
>>> evaluate_deltas(KroneckerDelta(i,j)*f(i)) # d_ij f(i) -> f(j)
f(_j)
>>> evaluate_deltas(KroneckerDelta(i,j)*f(j)) # d_ij f(j) -> f(i)
f(_i)
>>> evaluate_deltas(KroneckerDelta(i,p)*f(p)) # d_ip f(p) -> f(i)
f(_i)
>>> evaluate_deltas(KroneckerDelta(q,p)*f(p)) # d_qp f(p) -> f(q)
f(_q)
>>> evaluate_deltas(KroneckerDelta(q,p)*f(q)) # d_qp f(q) -> f(p)
f(_p)
More interesting cases:
>>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q))
f(_i, _q)*t(_a, _i)
>>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q))
f(_a, _q)*t(_a, _i)
>>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q))
f(_p, _p)
Finally, here are some cases where nothing is done, because that would
imply a loss of information:
>>> evaluate_deltas(KroneckerDelta(i,p)*f(q))
f(_q)*KroneckerDelta(_i, _p)
>>> evaluate_deltas(KroneckerDelta(i,p)*f(i))
f(_i)*KroneckerDelta(_i, _p)
"""
# We treat Deltas only in mul objects
# for general function objects we don't evaluate KroneckerDeltas in arguments,
# but here we hard code exceptions to this rule
accepted_functions = (
Add,
)
if isinstance(e, accepted_functions):
return e.func(*[evaluate_deltas(arg) for arg in e.args])
elif isinstance(e, Mul):
# find all occurrences of delta function and count each index present in
# expression.
deltas = []
indices = {}
for i in e.args:
for s in i.free_symbols:
if s in indices:
indices[s] += 1
else:
indices[s] = 0 # geek counting simplifies logic below
if isinstance(i, KroneckerDelta):
deltas.append(i)
for d in deltas:
# If we do something, and there are more deltas, we should recurse
# to treat the resulting expression properly
if d.killable_index.is_Symbol and indices[d.killable_index]:
e = e.subs(d.killable_index, d.preferred_index)
if len(deltas) > 1:
return evaluate_deltas(e)
elif (d.preferred_index.is_Symbol and indices[d.preferred_index]
and d.indices_contain_equal_information):
e = e.subs(d.preferred_index, d.killable_index)
if len(deltas) > 1:
return evaluate_deltas(e)
else:
pass
return e
# nothing to do, maybe we hit a Symbol or a number
else:
return e
def substitute_dummies(expr, new_indices=False, pretty_indices={}):
"""
Collect terms by substitution of dummy variables.
Explanation
===========
This routine allows simplification of Add expressions containing terms
which differ only due to dummy variables.
The idea is to substitute all dummy variables consistently depending on
the structure of the term. For each term, we obtain a sequence of all
dummy variables, where the order is determined by the index range, what
factors the index belongs to and its position in each factor. See
_get_ordered_dummies() for more information about the sorting of dummies.
The index sequence is then substituted consistently in each term.
Examples
========
>>> from sympy import symbols, Function, Dummy
>>> from sympy.physics.secondquant import substitute_dummies
>>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy)
>>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
>>> f = Function('f')
>>> expr = f(a,b) + f(c,d); expr
f(_a, _b) + f(_c, _d)
Since a, b, c and d are equivalent summation indices, the expression can be
simplified to a single term (for which the dummy indices are still summed over)
>>> substitute_dummies(expr)
2*f(_a, _b)
Controlling output:
By default the dummy symbols that are already present in the expression
will be reused in a different permutation. However, if new_indices=True,
new dummies will be generated and inserted. The keyword 'pretty_indices'
can be used to control this generation of new symbols.
By default the new dummies will be generated on the form i_1, i_2, a_1,
etc. If you supply a dictionary with key:value pairs in the form:
{ index_group: string_of_letters }
The letters will be used as labels for the new dummy symbols. The
index_groups must be one of 'above', 'below' or 'general'.
>>> expr = f(a,b,i,j)
>>> my_dummies = { 'above':'st', 'below':'uv' }
>>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
f(_s, _t, _u, _v)
If we run out of letters, or if there is no keyword for some index_group
the default dummy generator will be used as a fallback:
>>> p,q = symbols('p q', cls=Dummy) # general indices
>>> expr = f(p,q)
>>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
f(_p_0, _p_1)
"""
# setup the replacing dummies
if new_indices:
letters_above = pretty_indices.get('above', "")
letters_below = pretty_indices.get('below', "")
letters_general = pretty_indices.get('general', "")
len_above = len(letters_above)
len_below = len(letters_below)
len_general = len(letters_general)
def _i(number):
try:
return letters_below[number]
except IndexError:
return 'i_' + str(number - len_below)
def _a(number):
try:
return letters_above[number]
except IndexError:
return 'a_' + str(number - len_above)
def _p(number):
try:
return letters_general[number]
except IndexError:
return 'p_' + str(number - len_general)
aboves = []
belows = []
generals = []
dummies = expr.atoms(Dummy)
if not new_indices:
dummies = sorted(dummies, key=default_sort_key)
# generate lists with the dummies we will insert
a = i = p = 0
for d in dummies:
assum = d.assumptions0
if assum.get("above_fermi"):
if new_indices:
sym = _a(a)
a += 1
l1 = aboves
elif assum.get("below_fermi"):
if new_indices:
sym = _i(i)
i += 1
l1 = belows
else:
if new_indices:
sym = _p(p)
p += 1
l1 = generals
if new_indices:
l1.append(Dummy(sym, **assum))
else:
l1.append(d)
expr = expr.expand()
terms = Add.make_args(expr)
new_terms = []
for term in terms:
i = iter(belows)
a = iter(aboves)
p = iter(generals)
ordered = _get_ordered_dummies(term)
subsdict = {}
for d in ordered:
if d.assumptions0.get('below_fermi'):
subsdict[d] = next(i)
elif d.assumptions0.get('above_fermi'):
subsdict[d] = next(a)
else:
subsdict[d] = next(p)
subslist = []
final_subs = []
for k, v in subsdict.items():
if k == v:
continue
if v in subsdict:
# We check if the sequence of substitutions end quickly. In
# that case, we can avoid temporary symbols if we ensure the
# correct substitution order.
if subsdict[v] in subsdict:
# (x, y) -> (y, x), we need a temporary variable
x = Dummy('x')
subslist.append((k, x))
final_subs.append((x, v))
else:
# (x, y) -> (y, a), x->y must be done last
# but before temporary variables are resolved
final_subs.insert(0, (k, v))
else:
subslist.append((k, v))
subslist.extend(final_subs)
new_terms.append(term.subs(subslist))
return Add(*new_terms)
class KeyPrinter(StrPrinter):
"""Printer for which only equal objects are equal in print"""
def _print_Dummy(self, expr):
return "(%s_%i)" % (expr.name, expr.dummy_index)
def __kprint(expr):
p = KeyPrinter()
return p.doprint(expr)
def _get_ordered_dummies(mul, verbose=False):
"""Returns all dummies in the mul sorted in canonical order.
Explanation
===========
The purpose of the canonical ordering is that dummies can be substituted
consistently across terms with the result that equivalent terms can be
simplified.
It is not possible to determine if two terms are equivalent based solely on
the dummy order. However, a consistent substitution guided by the ordered
dummies should lead to trivially (non-)equivalent terms, thereby revealing
the equivalence. This also means that if two terms have identical sequences of
dummies, the (non-)equivalence should already be apparent.
Strategy
--------
The canoncial order is given by an arbitrary sorting rule. A sort key
is determined for each dummy as a tuple that depends on all factors where
the index is present. The dummies are thereby sorted according to the
contraction structure of the term, instead of sorting based solely on the
dummy symbol itself.
After all dummies in the term has been assigned a key, we check for identical
keys, i.e. unorderable dummies. If any are found, we call a specialized
method, _determine_ambiguous(), that will determine a unique order based
on recursive calls to _get_ordered_dummies().
Key description
---------------
A high level description of the sort key:
1. Range of the dummy index
2. Relation to external (non-dummy) indices
3. Position of the index in the first factor
4. Position of the index in the second factor
The sort key is a tuple with the following components:
1. A single character indicating the range of the dummy (above, below
or general.)
2. A list of strings with fully masked string representations of all
factors where the dummy is present. By masked, we mean that dummies
are represented by a symbol to indicate either below fermi, above or
general. No other information is displayed about the dummies at
this point. The list is sorted stringwise.
3. An integer number indicating the position of the index, in the first
factor as sorted in 2.
4. An integer number indicating the position of the index, in the second
factor as sorted in 2.
If a factor is either of type AntiSymmetricTensor or SqOperator, the index
position in items 3 and 4 is indicated as 'upper' or 'lower' only.
(Creation operators are considered upper and annihilation operators lower.)
If the masked factors are identical, the two factors cannot be ordered
unambiguously in item 2. In this case, items 3, 4 are left out. If several
indices are contracted between the unorderable factors, it will be handled by
_determine_ambiguous()
"""
# setup dicts to avoid repeated calculations in key()
args = Mul.make_args(mul)
fac_dum = { fac: fac.atoms(Dummy) for fac in args }
fac_repr = { fac: __kprint(fac) for fac in args }
all_dums = set().union(*fac_dum.values())
mask = {}
for d in all_dums:
if d.assumptions0.get('below_fermi'):
mask[d] = '0'
elif d.assumptions0.get('above_fermi'):
mask[d] = '1'
else:
mask[d] = '2'
dum_repr = {d: __kprint(d) for d in all_dums}
def _key(d):
dumstruct = [ fac for fac in fac_dum if d in fac_dum[fac] ]
other_dums = set().union(*[fac_dum[fac] for fac in dumstruct])
fac = dumstruct[-1]
if other_dums is fac_dum[fac]:
other_dums = fac_dum[fac].copy()
other_dums.remove(d)
masked_facs = [ fac_repr[fac] for fac in dumstruct ]
for d2 in other_dums:
masked_facs = [ fac.replace(dum_repr[d2], mask[d2])
for fac in masked_facs ]
all_masked = [ fac.replace(dum_repr[d], mask[d])
for fac in masked_facs ]
masked_facs = dict(list(zip(dumstruct, masked_facs)))
# dummies for which the ordering cannot be determined
if has_dups(all_masked):
all_masked.sort()
return mask[d], tuple(all_masked) # positions are ambiguous
# sort factors according to fully masked strings
keydict = dict(list(zip(dumstruct, all_masked)))
dumstruct.sort(key=lambda x: keydict[x])
all_masked.sort()
pos_val = []
for fac in dumstruct:
if isinstance(fac, AntiSymmetricTensor):
if d in fac.upper:
pos_val.append('u')
if d in fac.lower:
pos_val.append('l')
elif isinstance(fac, Creator):
pos_val.append('u')
elif isinstance(fac, Annihilator):
pos_val.append('l')
elif isinstance(fac, NO):
ops = [ op for op in fac if op.has(d) ]
for op in ops:
if isinstance(op, Creator):
pos_val.append('u')
else:
pos_val.append('l')
else:
# fallback to position in string representation
facpos = -1
while 1:
facpos = masked_facs[fac].find(dum_repr[d], facpos + 1)
if facpos == -1:
break
pos_val.append(facpos)
return (mask[d], tuple(all_masked), pos_val[0], pos_val[-1])
dumkey = dict(list(zip(all_dums, list(map(_key, all_dums)))))
result = sorted(all_dums, key=lambda x: dumkey[x])
if has_dups(iter(dumkey.values())):
# We have ambiguities
unordered = defaultdict(set)
for d, k in dumkey.items():
unordered[k].add(d)
for k in [ k for k in unordered if len(unordered[k]) < 2 ]:
del unordered[k]
unordered = [ unordered[k] for k in sorted(unordered) ]
result = _determine_ambiguous(mul, result, unordered)
return result
def _determine_ambiguous(term, ordered, ambiguous_groups):
# We encountered a term for which the dummy substitution is ambiguous.
# This happens for terms with 2 or more contractions between factors that
# cannot be uniquely ordered independent of summation indices. For
# example:
#
# Sum(p, q) v^{p, .}_{q, .}v^{q, .}_{p, .}
#
# Assuming that the indices represented by . are dummies with the
# same range, the factors cannot be ordered, and there is no
# way to determine a consistent ordering of p and q.
#
# The strategy employed here, is to relabel all unambiguous dummies with
# non-dummy symbols and call _get_ordered_dummies again. This procedure is
# applied to the entire term so there is a possibility that
# _determine_ambiguous() is called again from a deeper recursion level.
# break recursion if there are no ordered dummies
all_ambiguous = set()
for dummies in ambiguous_groups:
all_ambiguous |= dummies
all_ordered = set(ordered) - all_ambiguous
if not all_ordered:
# FIXME: If we arrive here, there are no ordered dummies. A method to
# handle this needs to be implemented. In order to return something
# useful nevertheless, we choose arbitrarily the first dummy and
# determine the rest from this one. This method is dependent on the
# actual dummy labels which violates an assumption for the
# canonicalization procedure. A better implementation is needed.
group = [ d for d in ordered if d in ambiguous_groups[0] ]
d = group[0]
all_ordered.add(d)
ambiguous_groups[0].remove(d)
stored_counter = _symbol_factory._counter
subslist = []
for d in [ d for d in ordered if d in all_ordered ]:
nondum = _symbol_factory._next()
subslist.append((d, nondum))
newterm = term.subs(subslist)
neworder = _get_ordered_dummies(newterm)
_symbol_factory._set_counter(stored_counter)
# update ordered list with new information
for group in ambiguous_groups:
ordered_group = [ d for d in neworder if d in group ]
ordered_group.reverse()
result = []
for d in ordered:
if d in group:
result.append(ordered_group.pop())
else:
result.append(d)
ordered = result
return ordered
class _SymbolFactory:
def __init__(self, label):
self._counterVar = 0
self._label = label
def _set_counter(self, value):
"""
Sets counter to value.
"""
self._counterVar = value
@property
def _counter(self):
"""
What counter is currently at.
"""
return self._counterVar
def _next(self):
"""
Generates the next symbols and increments counter by 1.
"""
s = Symbol("%s%i" % (self._label, self._counterVar))
self._counterVar += 1
return s
_symbol_factory = _SymbolFactory('_]"]_') # most certainly a unique label
@cacheit
def _get_contractions(string1, keep_only_fully_contracted=False):
"""
Returns Add-object with contracted terms.
Uses recursion to find all contractions. -- Internal helper function --
Will find nonzero contractions in string1 between indices given in
leftrange and rightrange.
"""
# Should we store current level of contraction?
if keep_only_fully_contracted and string1:
result = []
else:
result = [NO(Mul(*string1))]
for i in range(len(string1) - 1):
for j in range(i + 1, len(string1)):
c = contraction(string1[i], string1[j])
if c:
sign = (j - i + 1) % 2
if sign:
coeff = S.NegativeOne*c
else:
coeff = c
#
# Call next level of recursion
# ============================
#
# We now need to find more contractions among operators
#
# oplist = string1[:i]+ string1[i+1:j] + string1[j+1:]
#
# To prevent overcounting, we don't allow contractions
# we have already encountered. i.e. contractions between
# string1[:i] <---> string1[i+1:j]
# and string1[:i] <---> string1[j+1:].
#
# This leaves the case:
oplist = string1[i + 1:j] + string1[j + 1:]
if oplist:
result.append(coeff*NO(
Mul(*string1[:i])*_get_contractions( oplist,
keep_only_fully_contracted=keep_only_fully_contracted)))
else:
result.append(coeff*NO( Mul(*string1[:i])))
if keep_only_fully_contracted:
break # next iteration over i leaves leftmost operator string1[0] uncontracted
return Add(*result)
def wicks(e, **kw_args):
"""
Returns the normal ordered equivalent of an expression using Wicks Theorem.
Examples
========
>>> from sympy import symbols, Dummy
>>> from sympy.physics.secondquant import wicks, F, Fd
>>> p, q, r = symbols('p,q,r')
>>> wicks(Fd(p)*F(q))
KroneckerDelta(_i, q)*KroneckerDelta(p, q) + NO(CreateFermion(p)*AnnihilateFermion(q))
By default, the expression is expanded:
>>> wicks(F(p)*(F(q)+F(r)))
NO(AnnihilateFermion(p)*AnnihilateFermion(q)) + NO(AnnihilateFermion(p)*AnnihilateFermion(r))
With the keyword 'keep_only_fully_contracted=True', only fully contracted
terms are returned.
By request, the result can be simplified in the following order:
-- KroneckerDelta functions are evaluated
-- Dummy variables are substituted consistently across terms
>>> p, q, r = symbols('p q r', cls=Dummy)
>>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True)
KroneckerDelta(_i, _q)*KroneckerDelta(_p, _q) + KroneckerDelta(_i, _r)*KroneckerDelta(_p, _r)
"""
if not e:
return S.Zero
opts = {
'simplify_kronecker_deltas': False,
'expand': True,
'simplify_dummies': False,
'keep_only_fully_contracted': False
}
opts.update(kw_args)
# check if we are already normally ordered
if isinstance(e, NO):
if opts['keep_only_fully_contracted']:
return S.Zero
else:
return e
elif isinstance(e, FermionicOperator):
if opts['keep_only_fully_contracted']:
return S.Zero
else:
return e
# break up any NO-objects, and evaluate commutators
e = e.doit(wicks=True)
# make sure we have only one term to consider
e = e.expand()
if isinstance(e, Add):
if opts['simplify_dummies']:
return substitute_dummies(Add(*[ wicks(term, **kw_args) for term in e.args]))
else:
return Add(*[ wicks(term, **kw_args) for term in e.args])
# For Mul-objects we can actually do something
if isinstance(e, Mul):
# we don't want to mess around with commuting part of Mul
# so we factorize it out before starting recursion
c_part = []
string1 = []
for factor in e.args:
if factor.is_commutative:
c_part.append(factor)
else:
string1.append(factor)
n = len(string1)
# catch trivial cases
if n == 0:
result = e
elif n == 1:
if opts['keep_only_fully_contracted']:
return S.Zero
else:
result = e
else: # non-trivial
if isinstance(string1[0], BosonicOperator):
raise NotImplementedError
string1 = tuple(string1)
# recursion over higher order contractions
result = _get_contractions(string1,
keep_only_fully_contracted=opts['keep_only_fully_contracted'] )
result = Mul(*c_part)*result
if opts['expand']:
result = result.expand()
if opts['simplify_kronecker_deltas']:
result = evaluate_deltas(result)
return result
# there was nothing to do
return e
class PermutationOperator(Expr):
"""
Represents the index permutation operator P(ij).
P(ij)*f(i)*g(j) = f(i)*g(j) - f(j)*g(i)
"""
is_commutative = True
def __new__(cls, i, j):
i, j = sorted(map(sympify, (i, j)), key=default_sort_key)
obj = Basic.__new__(cls, i, j)
return obj
def get_permuted(self, expr):
"""
Returns -expr with permuted indices.
Explanation
===========
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q = symbols('p,q')
>>> f = Function('f')
>>> PermutationOperator(p,q).get_permuted(f(p,q))
-f(q, p)
"""
i = self.args[0]
j = self.args[1]
if expr.has(i) and expr.has(j):
tmp = Dummy()
expr = expr.subs(i, tmp)
expr = expr.subs(j, i)
expr = expr.subs(tmp, j)
return S.NegativeOne*expr
else:
return expr
def _latex(self, printer):
return "P(%s%s)" % self.args
def simplify_index_permutations(expr, permutation_operators):
"""
Performs simplification by introducing PermutationOperators where appropriate.
Explanation
===========
Schematically:
[abij] - [abji] - [baij] + [baji] -> P(ab)*P(ij)*[abij]
permutation_operators is a list of PermutationOperators to consider.
If permutation_operators=[P(ab),P(ij)] we will try to introduce the
permutation operators P(ij) and P(ab) in the expression. If there are other
possible simplifications, we ignore them.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import simplify_index_permutations
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q,r,s = symbols('p,q,r,s')
>>> f = Function('f')
>>> g = Function('g')
>>> expr = f(p)*g(q) - f(q)*g(p); expr
f(p)*g(q) - f(q)*g(p)
>>> simplify_index_permutations(expr,[PermutationOperator(p,q)])
f(p)*g(q)*PermutationOperator(p, q)
>>> PermutList = [PermutationOperator(p,q),PermutationOperator(r,s)]
>>> expr = f(p,r)*g(q,s) - f(q,r)*g(p,s) + f(q,s)*g(p,r) - f(p,s)*g(q,r)
>>> simplify_index_permutations(expr,PermutList)
f(p, r)*g(q, s)*PermutationOperator(p, q)*PermutationOperator(r, s)
"""
def _get_indices(expr, ind):
"""
Collects indices recursively in predictable order.
"""
result = []
for arg in expr.args:
if arg in ind:
result.append(arg)
else:
if arg.args:
result.extend(_get_indices(arg, ind))
return result
def _choose_one_to_keep(a, b, ind):
# we keep the one where indices in ind are in order ind[0] < ind[1]
return min(a, b, key=lambda x: default_sort_key(_get_indices(x, ind)))
expr = expr.expand()
if isinstance(expr, Add):
terms = set(expr.args)
for P in permutation_operators:
new_terms = set()
on_hold = set()
while terms:
term = terms.pop()
permuted = P.get_permuted(term)
if permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
# Some terms must get a second chance because the permuted
# term may already have canonical dummy ordering. Then
# substitute_dummies() does nothing. However, the other
# term, if it exists, will be able to match with us.
permuted1 = permuted
permuted = substitute_dummies(permuted)
if permuted1 == permuted:
on_hold.add(term)
elif permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
new_terms.add(term)
terms = new_terms | on_hold
return Add(*terms)
return expr
|
482306188704855a03eddc2163bfcd58bd622b0a3b39f80a7669b63a4ffd5e5d | from sympy.core.numbers import (I, pi)
from sympy.core.singleton import S
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.physics.quantum.constants import hbar
def wavefunction(n, x):
"""
Returns the wavefunction for particle on ring.
Parameters
==========
n : The quantum number.
Here ``n`` can be positive as well as negative
which can be used to describe the direction of motion of particle.
x :
The angle.
Examples
========
>>> from sympy.physics.pring import wavefunction
>>> from sympy import Symbol, integrate, pi
>>> x=Symbol("x")
>>> wavefunction(1, x)
sqrt(2)*exp(I*x)/(2*sqrt(pi))
>>> wavefunction(2, x)
sqrt(2)*exp(2*I*x)/(2*sqrt(pi))
>>> wavefunction(3, x)
sqrt(2)*exp(3*I*x)/(2*sqrt(pi))
The normalization of the wavefunction is:
>>> integrate(wavefunction(2, x)*wavefunction(-2, x), (x, 0, 2*pi))
1
>>> integrate(wavefunction(4, x)*wavefunction(-4, x), (x, 0, 2*pi))
1
References
==========
.. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum
Mechanics (4th ed.). Pages 71-73.
"""
# sympify arguments
n, x = S(n), S(x)
return exp(n * I * x) / sqrt(2 * pi)
def energy(n, m, r):
"""
Returns the energy of the state corresponding to quantum number ``n``.
E=(n**2 * (hcross)**2) / (2 * m * r**2)
Parameters
==========
n :
The quantum number.
m :
Mass of the particle.
r :
Radius of circle.
Examples
========
>>> from sympy.physics.pring import energy
>>> from sympy import Symbol
>>> m=Symbol("m")
>>> r=Symbol("r")
>>> energy(1, m, r)
hbar**2/(2*m*r**2)
>>> energy(2, m, r)
2*hbar**2/(m*r**2)
>>> energy(-2, 2.0, 3.0)
0.111111111111111*hbar**2
References
==========
.. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum
Mechanics (4th ed.). Pages 71-73.
"""
n, m, r = S(n), S(m), S(r)
if n.is_integer:
return (n**2 * hbar**2) / (2 * m * r**2)
else:
raise ValueError("'n' must be integer")
|
ae248c883213aade0b6419e367c46ade35256981b3ef4eb09f43618a09eea697 | from sympy.core.numbers import Number
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.tensor.array.dense_ndim_array import MutableDenseNDimArray
from sympy.tensor.tensor import (Tensor, TensExpr, TensAdd, TensMul,
TensorIndex)
class PartialDerivative(TensExpr):
"""
Partial derivative for tensor expressions.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead
>>> from sympy.tensor.toperators import PartialDerivative
>>> from sympy import symbols
>>> L = TensorIndexType("L")
>>> A = TensorHead("A", [L])
>>> i, j = symbols("i j")
>>> expr = PartialDerivative(A(i), A(j))
>>> expr
PartialDerivative(A(i), A(j))
The ``PartialDerivative`` object behaves like a tensorial expression:
>>> expr.get_indices()
[i, -j]
Indices can be contracted:
>>> expr = PartialDerivative(A(i), A(i))
>>> expr
PartialDerivative(A(L_0), A(L_0))
>>> expr.get_indices()
[L_0, -L_0]
"""
def __new__(cls, expr, *variables):
# Flatten:
if isinstance(expr, PartialDerivative):
variables = expr.variables + variables
expr = expr.expr
args, indices, free, dum = cls._contract_indices_for_derivative(
S(expr), variables)
obj = TensExpr.__new__(cls, *args)
obj._indices = indices
obj._free = free
obj._dum = dum
return obj
@property
def coeff(self):
return S.One
@property
def nocoeff(self):
return self
@classmethod
def _contract_indices_for_derivative(cls, expr, variables):
variables_opposite_valence = []
for i in variables:
if isinstance(i, Tensor):
i_free_indices = i.get_free_indices()
variables_opposite_valence.append(
i.xreplace({k: -k for k in i_free_indices}))
elif isinstance(i, Symbol):
variables_opposite_valence.append(i)
args, indices, free, dum = TensMul._tensMul_contract_indices(
[expr] + variables_opposite_valence, replace_indices=True)
for i in range(1, len(args)):
args_i = args[i]
if isinstance(args_i, Tensor):
i_indices = args[i].get_free_indices()
args[i] = args[i].xreplace({k: -k for k in i_indices})
return args, indices, free, dum
def doit(self):
args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables)
obj = self.func(*args)
obj._indices = indices
obj._free = free
obj._dum = dum
return obj
def _expand_partial_derivative(self):
args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables)
obj = self.func(*args)
obj._indices = indices
obj._free = free
obj._dum = dum
result = obj
if not args[0].free_symbols:
return S.Zero
elif isinstance(obj.expr, TensAdd):
# take care of sums of multi PDs
result = obj.expr.func(*[
self.func(a, *obj.variables)._expand_partial_derivative()
for a in result.expr.args])
elif isinstance(obj.expr, TensMul):
# take care of products of multi PDs
if len(obj.variables) == 1:
# derivative with respect to single variable
terms = []
mulargs = list(obj.expr.args)
for ind in range(len(mulargs)):
if not isinstance(sympify(mulargs[ind]), Number):
# a number coefficient is not considered for
# expansion of PartialDerivative
d = self.func(mulargs[ind], *obj.variables)._expand_partial_derivative()
terms.append(TensMul(*(mulargs[:ind]
+ [d]
+ mulargs[(ind + 1):])))
result = TensAdd.fromiter(terms)
else:
# derivative with respect to multiple variables
# decompose:
# partial(expr, (u, v))
# = partial(partial(expr, u).doit(), v).doit()
result = obj.expr # init with expr
for v in obj.variables:
result = self.func(result, v)._expand_partial_derivative()
# then throw PD on it
return result
def _perform_derivative(self):
result = self.expr
for v in self.variables:
if isinstance(result, TensExpr):
result = result._eval_partial_derivative(v)
else:
if v._diff_wrt:
result = result._eval_derivative(v)
else:
result = S.Zero
return result
def get_indices(self):
return self._indices
def get_free_indices(self):
free = sorted(self._free, key=lambda x: x[1])
return [i[0] for i in free]
def _replace_indices(self, repl):
expr = self.expr.xreplace(repl)
mirrored = {-k: -v for k, v in repl.items()}
variables = [i.xreplace(mirrored) for i in self.variables]
return self.func(expr, *variables)
@property
def expr(self):
return self.args[0]
@property
def variables(self):
return self.args[1:]
def _extract_data(self, replacement_dict):
from .array import derive_by_array, tensorcontraction
indices, array = self.expr._extract_data(replacement_dict)
for variable in self.variables:
var_indices, var_array = variable._extract_data(replacement_dict)
var_indices = [-i for i in var_indices]
coeff_array, var_array = zip(*[i.as_coeff_Mul() for i in var_array])
array = derive_by_array(array, var_array)
array = array.as_mutable() # type: MutableDenseNDimArray
varindex = var_indices[0] # type: TensorIndex
# Remove coefficients of base vector:
coeff_index = [0] + [slice(None) for i in range(len(indices))]
for i, coeff in enumerate(coeff_array):
coeff_index[0] = i
array[tuple(coeff_index)] /= coeff
if -varindex in indices:
pos = indices.index(-varindex)
array = tensorcontraction(array, (0, pos+1))
indices.pop(pos)
else:
indices.append(varindex)
return indices, array
|
124ad30bc01a2a80d55fb248a5095d51c64287236da2d235fdcd4832b3a6fcf3 | """
This module defines tensors with abstract index notation.
The abstract index notation has been first formalized by Penrose.
Tensor indices are formal objects, with a tensor type; there is no
notion of index range, it is only possible to assign the dimension,
used to trace the Kronecker delta; the dimension can be a Symbol.
The Einstein summation convention is used.
The covariant indices are indicated with a minus sign in front of the index.
For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c``
contracted.
A tensor expression ``t`` can be called; called with its
indices in sorted order it is equal to itself:
in the above example ``t(a, b) == t``;
one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``.
The contracted indices are dummy indices, internally they have no name,
the indices being represented by a graph-like structure.
Tensors are put in canonical form using ``canon_bp``, which uses
the Butler-Portugal algorithm for canonicalization using the monoterm
symmetries of the tensors.
If there is a (anti)symmetric metric, the indices can be raised and
lowered when the tensor is put in canonical form.
"""
from typing import Any, Dict as tDict, List, Set as tSet, Tuple as tTuple
from functools import reduce
from abc import abstractmethod, ABCMeta
from collections import defaultdict
import operator
import itertools
from sympy.core.mul import prod
from sympy.core.numbers import (Integer, Rational)
from sympy.combinatorics import Permutation
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \
bsgs_direct_product, canonicalize, riemann_bsgs
from sympy.core import Basic, Expr, sympify, Add, Mul, S
from sympy.core.assumptions import ManagedProperties
from sympy.core.containers import Tuple, Dict
from sympy.core.sorting import default_sort_key
from sympy.core.symbol import Symbol, symbols
from sympy.core.sympify import CantSympify, _sympify
from sympy.core.operations import AssocOp
from sympy.external.gmpy import SYMPY_INTS
from sympy.matrices import eye
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.decorator import memoize_property, deprecated
import warnings
@deprecated(useinstead=".replace_with_arrays", issue=15276, deprecated_since_version="1.4")
def deprecate_data():
pass
@deprecated(useinstead=".substitute_indices()", issue=17515,
deprecated_since_version="1.5")
def deprecate_fun_eval():
pass
@deprecated(useinstead="tensor_heads()", issue=17108,
deprecated_since_version="1.5")
def deprecate_TensorType():
pass
class _IndexStructure(CantSympify):
"""
This class handles the indices (free and dummy ones). It contains the
algorithms to manage the dummy indices replacements and contractions of
free indices under multiplications of tensor expressions, as well as stuff
related to canonicalization sorting, getting the permutation of the
expression and so on. It also includes tools to get the ``TensorIndex``
objects corresponding to the given index structure.
"""
def __init__(self, free, dum, index_types, indices, canon_bp=False):
self.free = free
self.dum = dum
self.index_types = index_types
self.indices = indices
self._ext_rank = len(self.free) + 2*len(self.dum)
self.dum.sort(key=lambda x: x[0])
@staticmethod
def from_indices(*indices):
"""
Create a new ``_IndexStructure`` object from a list of ``indices``.
Explanation
===========
``indices`` ``TensorIndex`` objects, the indices. Contractions are
detected upon construction.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, _IndexStructure
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> _IndexStructure.from_indices(m0, m1, -m1, m3)
_IndexStructure([(m0, 0), (m3, 3)], [(1, 2)], [Lorentz, Lorentz, Lorentz, Lorentz])
"""
free, dum = _IndexStructure._free_dum_from_indices(*indices)
index_types = [i.tensor_index_type for i in indices]
indices = _IndexStructure._replace_dummy_names(indices, free, dum)
return _IndexStructure(free, dum, index_types, indices)
@staticmethod
def from_components_free_dum(components, free, dum):
index_types = []
for component in components:
index_types.extend(component.index_types)
indices = _IndexStructure.generate_indices_from_free_dum_index_types(free, dum, index_types)
return _IndexStructure(free, dum, index_types, indices)
@staticmethod
def _free_dum_from_indices(*indices):
"""
Convert ``indices`` into ``free``, ``dum`` for single component tensor.
Explanation
===========
``free`` list of tuples ``(index, pos, 0)``,
where ``pos`` is the position of index in
the list of indices formed by the component tensors
``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, \
_IndexStructure
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> _IndexStructure._free_dum_from_indices(m0, m1, -m1, m3)
([(m0, 0), (m3, 3)], [(1, 2)])
"""
n = len(indices)
if n == 1:
return [(indices[0], 0)], []
# find the positions of the free indices and of the dummy indices
free = [True]*len(indices)
index_dict = {}
dum = []
for i, index in enumerate(indices):
name = index.name
typ = index.tensor_index_type
contr = index.is_up
if (name, typ) in index_dict:
# found a pair of dummy indices
is_contr, pos = index_dict[(name, typ)]
# check consistency and update free
if is_contr:
if contr:
raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i))
else:
free[pos] = False
free[i] = False
else:
if contr:
free[pos] = False
free[i] = False
else:
raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i))
if contr:
dum.append((i, pos))
else:
dum.append((pos, i))
else:
index_dict[(name, typ)] = index.is_up, i
free = [(index, i) for i, index in enumerate(indices) if free[i]]
free.sort()
return free, dum
def get_indices(self):
"""
Get a list of indices, creating new tensor indices to complete dummy indices.
"""
return self.indices[:]
@staticmethod
def generate_indices_from_free_dum_index_types(free, dum, index_types):
indices = [None]*(len(free)+2*len(dum))
for idx, pos in free:
indices[pos] = idx
generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free)
for pos1, pos2 in dum:
typ1 = index_types[pos1]
indname = generate_dummy_name(typ1)
indices[pos1] = TensorIndex(indname, typ1, True)
indices[pos2] = TensorIndex(indname, typ1, False)
return _IndexStructure._replace_dummy_names(indices, free, dum)
@staticmethod
def _get_generator_for_dummy_indices(free):
cdt = defaultdict(int)
# if the free indices have names with dummy_name, start with an
# index higher than those for the dummy indices
# to avoid name collisions
for indx, ipos in free:
if indx.name.split('_')[0] == indx.tensor_index_type.dummy_name:
cdt[indx.tensor_index_type] = max(cdt[indx.tensor_index_type], int(indx.name.split('_')[1]) + 1)
def dummy_name_gen(tensor_index_type):
nd = str(cdt[tensor_index_type])
cdt[tensor_index_type] += 1
return tensor_index_type.dummy_name + '_' + nd
return dummy_name_gen
@staticmethod
def _replace_dummy_names(indices, free, dum):
dum.sort(key=lambda x: x[0])
new_indices = [ind for ind in indices]
assert len(indices) == len(free) + 2*len(dum)
generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free)
for ipos1, ipos2 in dum:
typ1 = new_indices[ipos1].tensor_index_type
indname = generate_dummy_name(typ1)
new_indices[ipos1] = TensorIndex(indname, typ1, True)
new_indices[ipos2] = TensorIndex(indname, typ1, False)
return new_indices
def get_free_indices(self): # type: () -> List[TensorIndex]
"""
Get a list of free indices.
"""
# get sorted indices according to their position:
free = sorted(self.free, key=lambda x: x[1])
return [i[0] for i in free]
def __str__(self):
return "_IndexStructure({}, {}, {})".format(self.free, self.dum, self.index_types)
def __repr__(self):
return self.__str__()
def _get_sorted_free_indices_for_canon(self):
sorted_free = self.free[:]
sorted_free.sort(key=lambda x: x[0])
return sorted_free
def _get_sorted_dum_indices_for_canon(self):
return sorted(self.dum, key=lambda x: x[0])
def _get_lexicographically_sorted_index_types(self):
permutation = self.indices_canon_args()[0]
index_types = [None]*self._ext_rank
for i, it in enumerate(self.index_types):
index_types[permutation(i)] = it
return index_types
def _get_lexicographically_sorted_indices(self):
permutation = self.indices_canon_args()[0]
indices = [None]*self._ext_rank
for i, it in enumerate(self.indices):
indices[permutation(i)] = it
return indices
def perm2tensor(self, g, is_canon_bp=False):
"""
Returns a ``_IndexStructure`` instance corresponding to the permutation ``g``.
Explanation
===========
``g`` permutation corresponding to the tensor in the representation
used in canonicalization
``is_canon_bp`` if True, then ``g`` is the permutation
corresponding to the canonical form of the tensor
"""
sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()]
lex_index_types = self._get_lexicographically_sorted_index_types()
lex_indices = self._get_lexicographically_sorted_indices()
nfree = len(sorted_free)
rank = self._ext_rank
dum = [[None]*2 for i in range((rank - nfree)//2)]
free = []
index_types = [None]*rank
indices = [None]*rank
for i in range(rank):
gi = g[i]
index_types[i] = lex_index_types[gi]
indices[i] = lex_indices[gi]
if gi < nfree:
ind = sorted_free[gi]
assert index_types[i] == sorted_free[gi].tensor_index_type
free.append((ind, i))
else:
j = gi - nfree
idum, cov = divmod(j, 2)
if cov:
dum[idum][1] = i
else:
dum[idum][0] = i
dum = [tuple(x) for x in dum]
return _IndexStructure(free, dum, index_types, indices)
def indices_canon_args(self):
"""
Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize``
See ``canonicalize`` in ``tensor_can.py`` in combinatorics module.
"""
# to be called after sorted_components
from sympy.combinatorics.permutations import _af_new
n = self._ext_rank
g = [None]*n + [n, n+1]
# Converts the symmetry of the metric into msym from .canonicalize()
# method in the combinatorics module
def metric_symmetry_to_msym(metric):
if metric is None:
return None
sym = metric.symmetry
if sym == TensorSymmetry.fully_symmetric(2):
return 0
if sym == TensorSymmetry.fully_symmetric(-2):
return 1
return None
# ordered indices: first the free indices, ordered by types
# then the dummy indices, ordered by types and contravariant before
# covariant
# g[position in tensor] = position in ordered indices
for i, (indx, ipos) in enumerate(self._get_sorted_free_indices_for_canon()):
g[ipos] = i
pos = len(self.free)
j = len(self.free)
dummies = []
prev = None
a = []
msym = []
for ipos1, ipos2 in self._get_sorted_dum_indices_for_canon():
g[ipos1] = j
g[ipos2] = j + 1
j += 2
typ = self.index_types[ipos1]
if typ != prev:
if a:
dummies.append(a)
a = [pos, pos + 1]
prev = typ
msym.append(metric_symmetry_to_msym(typ.metric))
else:
a.extend([pos, pos + 1])
pos += 2
if a:
dummies.append(a)
return _af_new(g), dummies, msym
def components_canon_args(components):
numtyp = []
prev = None
for t in components:
if t == prev:
numtyp[-1][1] += 1
else:
prev = t
numtyp.append([prev, 1])
v = []
for h, n in numtyp:
if h.comm in (0, 1):
comm = h.comm
else:
comm = TensorManager.get_comm(h.comm, h.comm)
v.append((h.symmetry.base, h.symmetry.generators, n, comm))
return v
class _TensorDataLazyEvaluator(CantSympify):
"""
EXPERIMENTAL: do not rely on this class, it may change without deprecation
warnings in future versions of SymPy.
Explanation
===========
This object contains the logic to associate components data to a tensor
expression. Components data are set via the ``.data`` property of tensor
expressions, is stored inside this class as a mapping between the tensor
expression and the ``ndarray``.
Computations are executed lazily: whereas the tensor expressions can have
contractions, tensor products, and additions, components data are not
computed until they are accessed by reading the ``.data`` property
associated to the tensor expression.
"""
_substitutions_dict = dict() # type: tDict[Any, Any]
_substitutions_dict_tensmul = dict() # type: tDict[Any, Any]
def __getitem__(self, key):
dat = self._get(key)
if dat is None:
return None
from .array import NDimArray
if not isinstance(dat, NDimArray):
return dat
if dat.rank() == 0:
return dat[()]
elif dat.rank() == 1 and len(dat) == 1:
return dat[0]
return dat
def _get(self, key):
"""
Retrieve ``data`` associated with ``key``.
Explanation
===========
This algorithm looks into ``self._substitutions_dict`` for all
``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a
TensorHead instance). It reconstructs the components data that the
tensor expression should have by performing on components data the
operations that correspond to the abstract tensor operations applied.
Metric tensor is handled in a different manner: it is pre-computed in
``self._substitutions_dict_tensmul``.
"""
if key in self._substitutions_dict:
return self._substitutions_dict[key]
if isinstance(key, TensorHead):
return None
if isinstance(key, Tensor):
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in key.get_indices()])
srch = (key.component,) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
array_list = [self.data_from_tensor(key)]
return self.data_contract_dum(array_list, key.dum, key.ext_rank)
if isinstance(key, TensMul):
tensmul_args = key.args
if len(tensmul_args) == 1 and len(tensmul_args[0].components) == 1:
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in tensmul_args[0].get_indices()])
srch = (tensmul_args[0].components[0],) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
#data_list = [self.data_from_tensor(i) for i in tensmul_args if isinstance(i, TensExpr)]
data_list = [self.data_from_tensor(i) if isinstance(i, Tensor) else i.data for i in tensmul_args if isinstance(i, TensExpr)]
coeff = prod([i for i in tensmul_args if not isinstance(i, TensExpr)])
if all(i is None for i in data_list):
return None
if any(i is None for i in data_list):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
data_result = self.data_contract_dum(data_list, key.dum, key.ext_rank)
return coeff*data_result
if isinstance(key, TensAdd):
data_list = []
free_args_list = []
for arg in key.args:
if isinstance(arg, TensExpr):
data_list.append(arg.data)
free_args_list.append([x[0] for x in arg.free])
else:
data_list.append(arg)
free_args_list.append([])
if all(i is None for i in data_list):
return None
if any(i is None for i in data_list):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
sum_list = []
from .array import permutedims
for data, free_args in zip(data_list, free_args_list):
if len(free_args) < 2:
sum_list.append(data)
else:
free_args_pos = {y: x for x, y in enumerate(free_args)}
axes = [free_args_pos[arg] for arg in key.free_args]
sum_list.append(permutedims(data, axes))
return reduce(lambda x, y: x+y, sum_list)
return None
@staticmethod
def data_contract_dum(ndarray_list, dum, ext_rank):
from .array import tensorproduct, tensorcontraction, MutableDenseNDimArray
arrays = list(map(MutableDenseNDimArray, ndarray_list))
prodarr = tensorproduct(*arrays)
return tensorcontraction(prodarr, *dum)
def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead):
"""
This method is used when assigning components data to a ``TensMul``
object, it converts components data to a fully contravariant ndarray,
which is then stored according to the ``TensorHead`` key.
"""
if data is None:
return None
return self._correct_signature_from_indices(
data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum,
True)
def data_from_tensor(self, tensor):
"""
This method corrects the components data to the right signature
(covariant/contravariant) using the metric associated with each
``TensorIndexType``.
"""
tensorhead = tensor.component
if tensorhead.data is None:
return None
return self._correct_signature_from_indices(
tensorhead.data,
tensor.get_indices(),
tensor.free,
tensor.dum)
def _assign_data_to_tensor_expr(self, key, data):
if isinstance(key, TensAdd):
raise ValueError('cannot assign data to TensAdd')
# here it is assumed that `key` is a `TensMul` instance.
if len(key.components) != 1:
raise ValueError('cannot assign data to TensMul with multiple components')
tensorhead = key.components[0]
newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead)
return tensorhead, newdata
def _check_permutations_on_data(self, tens, data):
from .array import permutedims
from .array.arrayop import Flatten
if isinstance(tens, TensorHead):
rank = tens.rank
generators = tens.symmetry.generators
elif isinstance(tens, Tensor):
rank = tens.rank
generators = tens.components[0].symmetry.generators
elif isinstance(tens, TensorIndexType):
rank = tens.metric.rank
generators = tens.metric.symmetry.generators
# Every generator is a permutation, check that by permuting the array
# by that permutation, the array will be the same, except for a
# possible sign change if the permutation admits it.
for gener in generators:
sign_change = +1 if (gener(rank) == rank) else -1
data_swapped = data
last_data = data
permute_axes = list(map(gener, list(range(rank))))
# the order of a permutation is the number of times to get the
# identity by applying that permutation.
for i in range(gener.order()-1):
data_swapped = permutedims(data_swapped, permute_axes)
# if any value in the difference array is non-zero, raise an error:
if any(Flatten(last_data - sign_change*data_swapped)):
raise ValueError("Component data symmetry structure error")
last_data = data_swapped
def __setitem__(self, key, value):
"""
Set the components data of a tensor object/expression.
Explanation
===========
Components data are transformed to the all-contravariant form and stored
with the corresponding ``TensorHead`` object. If a ``TensorHead`` object
cannot be uniquely identified, it will raise an error.
"""
data = _TensorDataLazyEvaluator.parse_data(value)
self._check_permutations_on_data(key, data)
# TensorHead and TensorIndexType can be assigned data directly, while
# TensMul must first convert data to a fully contravariant form, and
# assign it to its corresponding TensorHead single component.
if not isinstance(key, (TensorHead, TensorIndexType)):
key, data = self._assign_data_to_tensor_expr(key, data)
if isinstance(key, TensorHead):
for dim, indextype in zip(data.shape, key.index_types):
if indextype.data is None:
raise ValueError("index type {} has no components data"\
" associated (needed to raise/lower index)".format(indextype))
if not indextype.dim.is_number:
continue
if dim != indextype.dim:
raise ValueError("wrong dimension of ndarray")
self._substitutions_dict[key] = data
def __delitem__(self, key):
del self._substitutions_dict[key]
def __contains__(self, key):
return key in self._substitutions_dict
def add_metric_data(self, metric, data):
"""
Assign data to the ``metric`` tensor. The metric tensor behaves in an
anomalous way when raising and lowering indices.
Explanation
===========
A fully covariant metric is the inverse transpose of the fully
contravariant metric (it is meant matrix inverse). If the metric is
symmetric, the transpose is not necessary and mixed
covariant/contravariant metrics are Kronecker deltas.
"""
# hard assignment, data should not be added to `TensorHead` for metric:
# the problem with `TensorHead` is that the metric is anomalous, i.e.
# raising and lowering the index means considering the metric or its
# inverse, this is not the case for other tensors.
self._substitutions_dict_tensmul[metric, True, True] = data
inverse_transpose = self.inverse_transpose_matrix(data)
# in symmetric spaces, the transpose is the same as the original matrix,
# the full covariant metric tensor is the inverse transpose, so this
# code will be able to handle non-symmetric metrics.
self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose
# now mixed cases, these are identical to the unit matrix if the metric
# is symmetric.
m = data.tomatrix()
invt = inverse_transpose.tomatrix()
self._substitutions_dict_tensmul[metric, True, False] = m * invt
self._substitutions_dict_tensmul[metric, False, True] = invt * m
@staticmethod
def _flip_index_by_metric(data, metric, pos):
from .array import tensorproduct, tensorcontraction
mdim = metric.rank()
ddim = data.rank()
if pos == 0:
data = tensorcontraction(
tensorproduct(
metric,
data
),
(1, mdim+pos)
)
else:
data = tensorcontraction(
tensorproduct(
data,
metric
),
(pos, ddim)
)
return data
@staticmethod
def inverse_matrix(ndarray):
m = ndarray.tomatrix().inv()
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def inverse_transpose_matrix(ndarray):
m = ndarray.tomatrix().inv().T
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def _correct_signature_from_indices(data, indices, free, dum, inverse=False):
"""
Utility function to correct the values inside the components data
ndarray according to whether indices are covariant or contravariant.
It uses the metric matrix to lower values of covariant indices.
"""
# change the ndarray values according covariantness/contravariantness of the indices
# use the metric
for i, indx in enumerate(indices):
if not indx.is_up and not inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx.tensor_index_type.data, i)
elif not indx.is_up and inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(
data,
_TensorDataLazyEvaluator.inverse_matrix(indx.tensor_index_type.data),
i
)
return data
@staticmethod
def _sort_data_axes(old, new):
from .array import permutedims
new_data = old.data.copy()
old_free = [i[0] for i in old.free]
new_free = [i[0] for i in new.free]
for i in range(len(new_free)):
for j in range(i, len(old_free)):
if old_free[j] == new_free[i]:
old_free[i], old_free[j] = old_free[j], old_free[i]
new_data = permutedims(new_data, (i, j))
break
return new_data
@staticmethod
def add_rearrange_tensmul_parts(new_tensmul, old_tensmul):
def sorted_compo():
return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul)
_TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo()
@staticmethod
def parse_data(data):
"""
Transform ``data`` to array. The parameter ``data`` may
contain data in various formats, e.g. nested lists, SymPy ``Matrix``,
and so on.
Examples
========
>>> from sympy.tensor.tensor import _TensorDataLazyEvaluator
>>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12])
[1, 3, -6, 12]
>>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]])
[[1, 2], [4, 7]]
"""
from .array import MutableDenseNDimArray
if not isinstance(data, MutableDenseNDimArray):
if len(data) == 2 and hasattr(data[0], '__call__'):
data = MutableDenseNDimArray(data[0], data[1])
else:
data = MutableDenseNDimArray(data)
return data
_tensor_data_substitution_dict = _TensorDataLazyEvaluator()
class _TensorManager:
"""
Class to manage tensor properties.
Notes
=====
Tensors belong to tensor commutation groups; each group has a label
``comm``; there are predefined labels:
``0`` tensors commuting with any other tensor
``1`` tensors anticommuting among themselves
``2`` tensors not commuting, apart with those with ``comm=0``
Other groups can be defined using ``set_comm``; tensors in those
groups commute with those with ``comm=0``; by default they
do not commute with any other group.
"""
def __init__(self):
self._comm_init()
def _comm_init(self):
self._comm = [{} for i in range(3)]
for i in range(3):
self._comm[0][i] = 0
self._comm[i][0] = 0
self._comm[1][1] = 1
self._comm[2][1] = None
self._comm[1][2] = None
self._comm_symbols2i = {0:0, 1:1, 2:2}
self._comm_i2symbol = {0:0, 1:1, 2:2}
@property
def comm(self):
return self._comm
def comm_symbols2i(self, i):
"""
Get the commutation group number corresponding to ``i``.
``i`` can be a symbol or a number or a string.
If ``i`` is not already defined its commutation group number
is set.
"""
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
return n
return self._comm_symbols2i[i]
def comm_i2symbol(self, i):
"""
Returns the symbol corresponding to the commutation group number.
"""
return self._comm_i2symbol[i]
def set_comm(self, i, j, c):
"""
Set the commutation parameter ``c`` for commutation groups ``i, j``.
Parameters
==========
i, j : symbols representing commutation groups
c : group commutation number
Notes
=====
``i, j`` can be symbols, strings or numbers,
apart from ``0, 1`` and ``2`` which are reserved respectively
for commuting, anticommuting tensors and tensors not commuting
with any other group apart with the commuting tensors.
For the remaining cases, use this method to set the commutation rules;
by default ``c=None``.
The group commutation number ``c`` is assigned in correspondence
to the group commutation symbols; it can be
0 commuting
1 anticommuting
None no commutation property
Examples
========
``G`` and ``GH`` do not commute with themselves and commute with
each other; A is commuting.
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorManager, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> A = TensorHead('A', [Lorentz])
>>> G = TensorHead('G', [Lorentz], TensorSymmetry.no_symmetry(1), 'Gcomm')
>>> GH = TensorHead('GH', [Lorentz], TensorSymmetry.no_symmetry(1), 'GHcomm')
>>> TensorManager.set_comm('Gcomm', 'GHcomm', 0)
>>> (GH(i1)*G(i0)).canon_bp()
G(i0)*GH(i1)
>>> (G(i1)*G(i0)).canon_bp()
G(i1)*G(i0)
>>> (G(i1)*A(i0)).canon_bp()
A(i0)*G(i1)
"""
if c not in (0, 1, None):
raise ValueError('`c` can assume only the values 0, 1 or None')
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
if j not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[0][n] = 0
self._comm[n][0] = 0
self._comm_symbols2i[j] = n
self._comm_i2symbol[n] = j
ni = self._comm_symbols2i[i]
nj = self._comm_symbols2i[j]
self._comm[ni][nj] = c
self._comm[nj][ni] = c
def set_comms(self, *args):
"""
Set the commutation group numbers ``c`` for symbols ``i, j``.
Parameters
==========
args : sequence of ``(i, j, c)``
"""
for i, j, c in args:
self.set_comm(i, j, c)
def get_comm(self, i, j):
"""
Return the commutation parameter for commutation group numbers ``i, j``
see ``_TensorManager.set_comm``
"""
return self._comm[i].get(j, 0 if i == 0 or j == 0 else None)
def clear(self):
"""
Clear the TensorManager.
"""
self._comm_init()
TensorManager = _TensorManager()
class TensorIndexType(Basic):
"""
A TensorIndexType is characterized by its name and its metric.
Parameters
==========
name : name of the tensor type
dummy_name : name of the head of dummy indices
dim : dimension, it can be a symbol or an integer or ``None``
eps_dim : dimension of the epsilon tensor
metric_symmetry : integer that denotes metric symmetry or ``None`` for no metirc
metric_name : string with the name of the metric tensor
Attributes
==========
``metric`` : the metric tensor
``delta`` : ``Kronecker delta``
``epsilon`` : the ``Levi-Civita epsilon`` tensor
``data`` : (deprecated) a property to add ``ndarray`` values, to work in a specified basis.
Notes
=====
The possible values of the ``metric_symmetry`` parameter are:
``1`` : metric tensor is fully symmetric
``0`` : metric tensor possesses no index symmetry
``-1`` : metric tensor is fully antisymmetric
``None``: there is no metric tensor (metric equals to ``None``)
The metric is assumed to be symmetric by default. It can also be set
to a custom tensor by the ``.set_metric()`` method.
If there is a metric the metric is used to raise and lower indices.
In the case of non-symmetric metric, the following raising and
lowering conventions will be adopted:
``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)``
From these it is easy to find:
``g(-a, b) = delta(-a, b)``
where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta``
(see ``TensorIndex`` for the conventions on indices).
For antisymmetric metrics there is also the following equality:
``g(a, -b) = -delta(a, -b)``
If there is no metric it is not possible to raise or lower indices;
e.g. the index of the defining representation of ``SU(N)``
is 'covariant' and the conjugate representation is
'contravariant'; for ``N > 2`` they are linearly independent.
``eps_dim`` is by default equal to ``dim``, if the latter is an integer;
else it can be assigned (for use in naive dimensional regularization);
if ``eps_dim`` is not an integer ``epsilon`` is ``None``.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> Lorentz.metric
metric(Lorentz,Lorentz)
"""
def __new__(cls, name, dummy_name=None, dim=None, eps_dim=None,
metric_symmetry=1, metric_name='metric', **kwargs):
if 'dummy_fmt' in kwargs:
SymPyDeprecationWarning(useinstead="dummy_name",
feature="dummy_fmt", issue=17517,
deprecated_since_version="1.5").warn()
dummy_name = kwargs.get('dummy_fmt')
if isinstance(name, str):
name = Symbol(name)
if dummy_name is None:
dummy_name = str(name)[0]
if isinstance(dummy_name, str):
dummy_name = Symbol(dummy_name)
if dim is None:
dim = Symbol("dim_" + dummy_name.name)
else:
dim = sympify(dim)
if eps_dim is None:
eps_dim = dim
else:
eps_dim = sympify(eps_dim)
metric_symmetry = sympify(metric_symmetry)
if isinstance(metric_name, str):
metric_name = Symbol(metric_name)
if 'metric' in kwargs:
SymPyDeprecationWarning(useinstead="metric_symmetry or .set_metric()",
feature="metric argument", issue=17517,
deprecated_since_version="1.5").warn()
metric = kwargs.get('metric')
if metric is not None:
if metric in (True, False, 0, 1):
metric_name = 'metric'
#metric_antisym = metric
else:
metric_name = metric.name
#metric_antisym = metric.antisym
if metric:
metric_symmetry = -1
else:
metric_symmetry = 1
obj = Basic.__new__(cls, name, dummy_name, dim, eps_dim,
metric_symmetry, metric_name)
obj._autogenerated = []
return obj
@property
def name(self):
return self.args[0].name
@property
def dummy_name(self):
return self.args[1].name
@property
def dim(self):
return self.args[2]
@property
def eps_dim(self):
return self.args[3]
@memoize_property
def metric(self):
metric_symmetry = self.args[4]
metric_name = self.args[5]
if metric_symmetry is None:
return None
if metric_symmetry == 0:
symmetry = TensorSymmetry.no_symmetry(2)
elif metric_symmetry == 1:
symmetry = TensorSymmetry.fully_symmetric(2)
elif metric_symmetry == -1:
symmetry = TensorSymmetry.fully_symmetric(-2)
return TensorHead(metric_name, [self]*2, symmetry)
@memoize_property
def delta(self):
return TensorHead('KD', [self]*2, TensorSymmetry.fully_symmetric(2))
@memoize_property
def epsilon(self):
if not isinstance(self.eps_dim, (SYMPY_INTS, Integer)):
return None
symmetry = TensorSymmetry.fully_symmetric(-self.eps_dim)
return TensorHead('Eps', [self]*self.eps_dim, symmetry)
def set_metric(self, tensor):
self._metric = tensor
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return self.name
__repr__ = __str__
# Everything below this line is deprecated
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
deprecate_data()
# This assignment is a bit controversial, should metric components be assigned
# to the metric only or also to the TensorIndexType object? The advantage here
# is the ability to assign a 1D array and transform it to a 2D diagonal array.
from .array import MutableDenseNDimArray
data = _TensorDataLazyEvaluator.parse_data(data)
if data.rank() > 2:
raise ValueError("data have to be of rank 1 (diagonal metric) or 2.")
if data.rank() == 1:
if self.dim.is_number:
nda_dim = data.shape[0]
if nda_dim != self.dim:
raise ValueError("Dimension mismatch")
dim = data.shape[0]
newndarray = MutableDenseNDimArray.zeros(dim, dim)
for i, val in enumerate(data):
newndarray[i, i] = val
data = newndarray
dim1, dim2 = data.shape
if dim1 != dim2:
raise ValueError("Non-square matrix tensor.")
if self.dim.is_number:
if self.dim != dim1:
raise ValueError("Dimension mismatch")
_tensor_data_substitution_dict[self] = data
_tensor_data_substitution_dict.add_metric_data(self.metric, data)
delta = self.get_kronecker_delta()
i1 = TensorIndex('i1', self)
i2 = TensorIndex('i2', self)
delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1))
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
@deprecated(useinstead=".delta", issue=17517,
deprecated_since_version="1.5")
def get_kronecker_delta(self):
sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
delta = TensorHead('KD', [self]*2, sym2)
return delta
@deprecated(useinstead=".delta", issue=17517,
deprecated_since_version="1.5")
def get_epsilon(self):
if not isinstance(self._eps_dim, (SYMPY_INTS, Integer)):
return None
sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1))
epsilon = TensorHead('Eps', [self]*self._eps_dim, sym)
return epsilon
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
This destroys components data associated to the ``TensorIndexType``, if
any, specifically:
* metric tensor data
* Kronecker tensor data
"""
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def delete_tensmul_data(key):
if key in _tensor_data_substitution_dict._substitutions_dict_tensmul:
del _tensor_data_substitution_dict._substitutions_dict_tensmul[key]
# delete metric data:
delete_tensmul_data((self.metric, True, True))
delete_tensmul_data((self.metric, True, False))
delete_tensmul_data((self.metric, False, True))
delete_tensmul_data((self.metric, False, False))
# delete delta tensor data:
delta = self.get_kronecker_delta()
if delta in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[delta]
class TensorIndex(Basic):
"""
Represents a tensor index
Parameters
==========
name : name of the index, or ``True`` if you want it to be automatically assigned
tensor_index_type : ``TensorIndexType`` of the index
is_up : flag for contravariant index (is_up=True by default)
Attributes
==========
``name``
``tensor_index_type``
``is_up``
Notes
=====
Tensor indices are contracted with the Einstein summation convention.
An index can be in contravariant or in covariant form; in the latter
case it is represented prepending a ``-`` to the index name. Adding
``-`` to a covariant (is_up=False) index makes it contravariant.
Dummy indices have a name with head given by
``tensor_inde_type.dummy_name`` with underscore and a number.
Similar to ``symbols`` multiple contravariant indices can be created
at once using ``tensor_indices(s, typ)``, where ``s`` is a string
of names.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorHead, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> mu = TensorIndex('mu', Lorentz, is_up=False)
>>> nu, rho = tensor_indices('nu, rho', Lorentz)
>>> A = TensorHead('A', [Lorentz, Lorentz])
>>> A(mu, nu)
A(-mu, nu)
>>> A(-mu, -rho)
A(mu, -rho)
>>> A(mu, -mu)
A(-L_0, L_0)
"""
def __new__(cls, name, tensor_index_type, is_up=True):
if isinstance(name, str):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
elif name is True:
name = "_i{}".format(len(tensor_index_type._autogenerated))
name_symbol = Symbol(name)
tensor_index_type._autogenerated.append(name_symbol)
else:
raise ValueError("invalid name")
is_up = sympify(is_up)
return Basic.__new__(cls, name_symbol, tensor_index_type, is_up)
@property
def name(self):
return self.args[0].name
@property
def tensor_index_type(self):
return self.args[1]
@property
def is_up(self):
return self.args[2]
def _print(self):
s = self.name
if not self.is_up:
s = '-%s' % s
return s
def __lt__(self, other):
return ((self.tensor_index_type, self.name) <
(other.tensor_index_type, other.name))
def __neg__(self):
t1 = TensorIndex(self.name, self.tensor_index_type,
(not self.is_up))
return t1
def tensor_indices(s, typ):
"""
Returns list of tensor indices given their names and their types.
Parameters
==========
s : string of comma separated names of indices
typ : ``TensorIndexType`` of the indices
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
"""
if isinstance(s, str):
a = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
tilist = [TensorIndex(i, typ) for i in a]
if len(tilist) == 1:
return tilist[0]
return tilist
class TensorSymmetry(Basic):
"""
Monoterm symmetry of a tensor (i.e. any symmetric or anti-symmetric
index permutation). For the relevant terminology see ``tensor_can.py``
section of the combinatorics module.
Parameters
==========
bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor
Attributes
==========
``base`` : base of the BSGS
``generators`` : generators of the BSGS
``rank`` : rank of the tensor
Notes
=====
A tensor can have an arbitrary monoterm symmetry provided by its BSGS.
Multiterm symmetries, like the cyclic symmetry of the Riemann tensor
(i.e., Bianchi identity), are not covered. See combinatorics module for
information on how to generate BSGS for a general index permutation group.
Simple symmetries can be generated using built-in methods.
See Also
========
sympy.combinatorics.tensor_can.get_symmetric_group_sgs
Examples
========
Define a symmetric tensor of rank 2
>>> from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorHead
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> sym = TensorSymmetry(get_symmetric_group_sgs(2))
>>> T = TensorHead('T', [Lorentz]*2, sym)
Note, that the same can also be done using built-in TensorSymmetry methods
>>> sym2 = TensorSymmetry.fully_symmetric(2)
>>> sym == sym2
True
"""
def __new__(cls, *args, **kw_args):
if len(args) == 1:
base, generators = args[0]
elif len(args) == 2:
base, generators = args
else:
raise TypeError("bsgs required, either two separate parameters or one tuple")
if not isinstance(base, Tuple):
base = Tuple(*base)
if not isinstance(generators, Tuple):
generators = Tuple(*generators)
return Basic.__new__(cls, base, generators, **kw_args)
@property
def base(self):
return self.args[0]
@property
def generators(self):
return self.args[1]
@property
def rank(self):
return self.generators[0].size - 2
@classmethod
def fully_symmetric(cls, rank):
"""
Returns a fully symmetric (antisymmetric if ``rank``<0)
TensorSymmetry object for ``abs(rank)`` indices.
"""
if rank > 0:
bsgs = get_symmetric_group_sgs(rank, False)
elif rank < 0:
bsgs = get_symmetric_group_sgs(-rank, True)
elif rank == 0:
bsgs = ([], [Permutation(1)])
return TensorSymmetry(bsgs)
@classmethod
def direct_product(cls, *args):
"""
Returns a TensorSymmetry object that is being a direct product of
fully (anti-)symmetric index permutation groups.
Notes
=====
Some examples for different values of ``(*args)``:
``(1)`` vector, equivalent to ``TensorSymmetry.fully_symmetric(1)``
``(2)`` tensor with 2 symmetric indices, equivalent to ``.fully_symmetric(2)``
``(-2)`` tensor with 2 antisymmetric indices, equivalent to ``.fully_symmetric(-2)``
``(2, -2)`` tensor with the first 2 indices commuting and the last 2 anticommuting
``(1, 1, 1)`` tensor with 3 indices without any symmetry
"""
base, sgs = [], [Permutation(1)]
for arg in args:
if arg > 0:
bsgs2 = get_symmetric_group_sgs(arg, False)
elif arg < 0:
bsgs2 = get_symmetric_group_sgs(-arg, True)
else:
continue
base, sgs = bsgs_direct_product(base, sgs, *bsgs2)
return TensorSymmetry(base, sgs)
@classmethod
def riemann(cls):
"""
Returns a monotorem symmetry of the Riemann tensor
"""
return TensorSymmetry(riemann_bsgs)
@classmethod
def no_symmetry(cls, rank):
"""
TensorSymmetry object for ``rank`` indices with no symmetry
"""
return TensorSymmetry([], [Permutation(rank+1)])
@deprecated(useinstead="TensorSymmetry class constructor and methods", issue=17108,
deprecated_since_version="1.5")
def tensorsymmetry(*args):
"""
Returns a ``TensorSymmetry`` object. This method is deprecated, use
``TensorSymmetry.direct_product()`` or ``.riemann()`` instead.
Explanation
===========
One can represent a tensor with any monoterm slot symmetry group
using a BSGS.
``args`` can be a BSGS
``args[0]`` base
``args[1]`` sgs
Usually tensors are in (direct products of) representations
of the symmetric group;
``args`` can be a list of lists representing the shapes of Young tableaux
Notes
=====
For instance:
``[[1]]`` vector
``[[1]*n]`` symmetric tensor of rank ``n``
``[[n]]`` antisymmetric tensor of rank ``n``
``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor
``[[1],[1]]`` vector*vector
``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector
Notice that with the shape ``[2, 2]`` we associate only the monoterm
symmetries of the Riemann tensor; this is an abuse of notation,
since the shape ``[2, 2]`` corresponds usually to the irreducible
representation characterized by the monoterm symmetries and by the
cyclic symmetry.
"""
from sympy.combinatorics import Permutation
def tableau2bsgs(a):
if len(a) == 1:
# antisymmetric vector
n = a[0]
bsgs = get_symmetric_group_sgs(n, 1)
else:
if all(x == 1 for x in a):
# symmetric vector
n = len(a)
bsgs = get_symmetric_group_sgs(n)
elif a == [2, 2]:
bsgs = riemann_bsgs
else:
raise NotImplementedError
return bsgs
if not args:
return TensorSymmetry(Tuple(), Tuple(Permutation(1)))
if len(args) == 2 and isinstance(args[1][0], Permutation):
return TensorSymmetry(args)
base, sgs = tableau2bsgs(args[0])
for a in args[1:]:
basex, sgsx = tableau2bsgs(a)
base, sgs = bsgs_direct_product(base, sgs, basex, sgsx)
return TensorSymmetry(Tuple(base, sgs))
class TensorType(Basic):
"""
Class of tensor types. Deprecated, use tensor_heads() instead.
Parameters
==========
index_types : list of ``TensorIndexType`` of the tensor indices
symmetry : ``TensorSymmetry`` of the tensor
Attributes
==========
``index_types``
``symmetry``
``types`` : list of ``TensorIndexType`` without repetitions
"""
is_commutative = False
def __new__(cls, index_types, symmetry, **kw_args):
deprecate_TensorType()
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args)
return obj
@property
def index_types(self):
return self.args[0]
@property
def symmetry(self):
return self.args[1]
@property
def types(self):
return sorted(set(self.index_types), key=lambda x: x.name)
def __str__(self):
return 'TensorType(%s)' % ([str(x) for x in self.index_types])
def __call__(self, s, comm=0):
"""
Return a TensorHead object or a list of TensorHead objects.
Parameters
==========
s : name or string of names.
comm : Commutation group.
see ``_TensorManager.set_comm``
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
if len(names) == 1:
return TensorHead(names[0], self.index_types, self.symmetry, comm)
else:
return [TensorHead(name, self.index_types, self.symmetry, comm) for name in names]
@deprecated(useinstead="TensorHead class constructor or tensor_heads()",
issue=17108, deprecated_since_version="1.5")
def tensorhead(name, typ, sym=None, comm=0):
"""
Function generating tensorhead(s). This method is deprecated,
use TensorHead constructor or tensor_heads() instead.
Parameters
==========
name : name or sequence of names (as in ``symbols``)
typ : index types
sym : same as ``*args`` in ``tensorsymmetry``
comm : commutation group number
see ``_TensorManager.set_comm``
"""
if sym is None:
sym = [[1] for i in range(len(typ))]
sym = tensorsymmetry(*sym)
return TensorHead(name, typ, sym, comm)
class TensorHead(Basic):
"""
Tensor head of the tensor.
Parameters
==========
name : name of the tensor
index_types : list of TensorIndexType
symmetry : TensorSymmetry of the tensor
comm : commutation group number
Attributes
==========
``name``
``index_types``
``rank`` : total number of indices
``symmetry``
``comm`` : commutation group
Notes
=====
Similar to ``symbols`` multiple TensorHeads can be created using
``tensorhead(s, typ, sym=None, comm=0)`` function, where ``s``
is the string of names and ``sym`` is the monoterm tensor symmetry
(see ``tensorsymmetry``).
A ``TensorHead`` belongs to a commutation group, defined by a
symbol on number ``comm`` (see ``_TensorManager.set_comm``);
tensors in a commutation group have the same commutation properties;
by default ``comm`` is ``0``, the group of the commuting tensors.
Examples
========
Define a fully antisymmetric tensor of rank 2:
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> asym2 = TensorSymmetry.fully_symmetric(-2)
>>> A = TensorHead('A', [Lorentz, Lorentz], asym2)
Examples with ndarray values, the components data assigned to the
``TensorHead`` object are assumed to be in a fully-contravariant
representation. In case it is necessary to assign components data which
represents the values of a non-fully covariant tensor, see the other
examples.
>>> from sympy.tensor.tensor import tensor_indices
>>> from sympy import diag
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
Specify a replacement dictionary to keep track of the arrays to use for
replacements in the tensorial expression. The ``TensorIndexType`` is
associated to the metric used for contractions (in fully covariant form):
>>> repl = {Lorentz: diag(1, -1, -1, -1)}
Let's see some examples of working with components with the electromagnetic
tensor:
>>> from sympy import symbols
>>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z')
>>> c = symbols('c', positive=True)
Let's define `F`, an antisymmetric tensor:
>>> F = TensorHead('F', [Lorentz, Lorentz], asym2)
Let's update the dictionary to contain the matrix to use in the
replacements:
>>> repl.update({F(-i0, -i1): [
... [0, Ex/c, Ey/c, Ez/c],
... [-Ex/c, 0, -Bz, By],
... [-Ey/c, Bz, 0, -Bx],
... [-Ez/c, -By, Bx, 0]]})
Now it is possible to retrieve the contravariant form of the Electromagnetic
tensor:
>>> F(i0, i1).replace_with_arrays(repl, [i0, i1])
[[0, -E_x/c, -E_y/c, -E_z/c], [E_x/c, 0, -B_z, B_y], [E_y/c, B_z, 0, -B_x], [E_z/c, -B_y, B_x, 0]]
and the mixed contravariant-covariant form:
>>> F(i0, -i1).replace_with_arrays(repl, [i0, -i1])
[[0, E_x/c, E_y/c, E_z/c], [E_x/c, 0, B_z, -B_y], [E_y/c, -B_z, 0, B_x], [E_z/c, B_y, -B_x, 0]]
Energy-momentum of a particle may be represented as:
>>> from sympy import symbols
>>> P = TensorHead('P', [Lorentz], TensorSymmetry.no_symmetry(1))
>>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True)
>>> repl.update({P(i0): [E, px, py, pz]})
The contravariant and covariant components are, respectively:
>>> P(i0).replace_with_arrays(repl, [i0])
[E, p_x, p_y, p_z]
>>> P(-i0).replace_with_arrays(repl, [-i0])
[E, -p_x, -p_y, -p_z]
The contraction of a 1-index tensor by itself:
>>> expr = P(i0)*P(-i0)
>>> expr.replace_with_arrays(repl, [])
E**2 - p_x**2 - p_y**2 - p_z**2
"""
is_commutative = False
def __new__(cls, name, index_types, symmetry=None, comm=0):
if isinstance(name, str):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
else:
raise ValueError("invalid name")
if symmetry is None:
symmetry = TensorSymmetry.no_symmetry(len(index_types))
else:
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), symmetry)
obj.comm = TensorManager.comm_symbols2i(comm)
return obj
@property
def name(self):
return self.args[0].name
@property
def index_types(self):
return list(self.args[1])
@property
def symmetry(self):
return self.args[2]
@property
def rank(self):
return len(self.index_types)
def __lt__(self, other):
return (self.name, self.index_types) < (other.name, other.index_types)
def commutes_with(self, other):
"""
Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute.
Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute.
"""
r = TensorManager.get_comm(self.comm, other.comm)
return r
def _print(self):
return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types]))
def __call__(self, *indices, **kw_args):
"""
Returns a tensor with indices.
Explanation
===========
There is a special behavior in case of indices denoted by ``True``,
they are considered auto-matrix indices, their slots are automatically
filled, and confer to the tensor the behavior of a matrix or vector
upon multiplication with another tensor containing auto-matrix indices
of the same ``TensorIndexType``. This means indices get summed over the
same way as in matrix multiplication. For matrix behavior, define two
auto-matrix indices, for vector behavior define just one.
Indices can also be strings, in which case the attribute
``index_types`` is used to convert them to proper ``TensorIndex``.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorHead
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
>>> t = A(a, -b)
>>> t
A(a, -b)
"""
updated_indices = []
for idx, typ in zip(indices, self.index_types):
if isinstance(idx, str):
idx = idx.strip().replace(" ", "")
if idx.startswith('-'):
updated_indices.append(TensorIndex(idx[1:], typ,
is_up=False))
else:
updated_indices.append(TensorIndex(idx, typ))
else:
updated_indices.append(idx)
updated_indices += indices[len(updated_indices):]
tensor = Tensor(self, updated_indices, **kw_args)
return tensor.doit()
# Everything below this line is deprecated
def __pow__(self, other):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
if self.data is None:
raise ValueError("No power on abstract tensors.")
deprecate_data()
from .array import tensorproduct, tensorcontraction
metrics = [_.data for _ in self.index_types]
marray = self.data
marraydim = marray.rank()
for metric in metrics:
marray = tensorproduct(marray, metric, marray)
marray = tensorcontraction(marray, (0, marraydim), (marraydim+1, marraydim+2))
return marray ** (other * S.Half)
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
deprecate_data()
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
deprecate_data()
return self.data.__iter__()
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
Destroy components data associated to the ``TensorHead`` object, this
checks for attached components data, and destroys components data too.
"""
# do not garbage collect Kronecker tensor (it should be done by
# ``TensorIndexType`` garbage collection)
deprecate_data()
if self.name == "KD":
return
# the data attached to a tensor must be deleted only by the TensorHead
# destructor. If the TensorHead is deleted, it means that there are no
# more instances of that tensor anywhere.
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def tensor_heads(s, index_types, symmetry=None, comm=0):
"""
Returns a sequence of TensorHeads from a string `s`
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
thlist = [TensorHead(name, index_types, symmetry, comm) for name in names]
if len(thlist) == 1:
return thlist[0]
return thlist
class _TensorMetaclass(ManagedProperties, ABCMeta):
pass
class TensExpr(Expr, metaclass=_TensorMetaclass):
"""
Abstract base class for tensor expressions
Notes
=====
A tensor expression is an expression formed by tensors;
currently the sums of tensors are distributed.
A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``.
``TensMul`` objects are formed by products of component tensors,
and include a coefficient, which is a SymPy expression.
In the internal representation contracted indices are represented
by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position
of the component tensor with contravariant index, ``ipos1`` is the
slot which the index occupies in that component tensor.
Contracted indices are therefore nameless in the internal representation.
"""
_op_priority = 12.0
is_commutative = False
def __neg__(self):
return self*S.NegativeOne
def __abs__(self):
raise NotImplementedError
def __add__(self, other):
return TensAdd(self, other).doit()
def __radd__(self, other):
return TensAdd(other, self).doit()
def __sub__(self, other):
return TensAdd(self, -other).doit()
def __rsub__(self, other):
return TensAdd(other, -self).doit()
def __mul__(self, other):
"""
Multiply two tensors using Einstein summation convention.
Explanation
===========
If the two tensors have an index in common, one contravariant
and the other covariant, in their product the indices are summed
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t1 = p(m0)
>>> t2 = q(-m0)
>>> t1*t2
p(L_0)*q(-L_0)
"""
return TensMul(self, other).doit()
def __rmul__(self, other):
return TensMul(other, self).doit()
def __truediv__(self, other):
other = _sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensMul(self, S.One/other).doit()
def __rtruediv__(self, other):
raise ValueError('cannot divide by a tensor')
def __pow__(self, other):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
if self.data is None:
raise ValueError("No power without ndarray data.")
deprecate_data()
from .array import tensorproduct, tensorcontraction
free = self.free
marray = self.data
mdim = marray.rank()
for metric in free:
marray = tensorcontraction(
tensorproduct(
marray,
metric[0].tensor_index_type.data,
marray),
(0, mdim), (mdim+1, mdim+2)
)
return marray ** (other * S.Half)
def __rpow__(self, other):
raise NotImplementedError
@property
@abstractmethod
def nocoeff(self):
raise NotImplementedError("abstract method")
@property
@abstractmethod
def coeff(self):
raise NotImplementedError("abstract method")
@abstractmethod
def get_indices(self):
raise NotImplementedError("abstract method")
@abstractmethod
def get_free_indices(self): # type: () -> List[TensorIndex]
raise NotImplementedError("abstract method")
@abstractmethod
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr
raise NotImplementedError("abstract method")
def fun_eval(self, *index_tuples):
deprecate_fun_eval()
return self.substitute_indices(*index_tuples)
def get_matrix(self):
"""
DEPRECATED: do not use.
Returns ndarray components data as a matrix, if components data are
available and ndarray dimension does not exceed 2.
"""
from sympy.matrices.dense import Matrix
deprecate_data()
if 0 < self.rank <= 2:
rows = self.data.shape[0]
columns = self.data.shape[1] if self.rank == 2 else 1
if self.rank == 2:
mat_list = [] * rows
for i in range(rows):
mat_list.append([])
for j in range(columns):
mat_list[i].append(self[i, j])
else:
mat_list = [None] * rows
for i in range(rows):
mat_list[i] = self[i]
return Matrix(mat_list)
else:
raise NotImplementedError(
"missing multidimensional reduction to matrix.")
@staticmethod
def _get_indices_permutation(indices1, indices2):
return [indices1.index(i) for i in indices2]
def expand(self, **hints):
return _expand(self, **hints).doit()
def _expand(self, **kwargs):
return self
def _get_free_indices_set(self):
indset = set()
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_free_indices_set())
return indset
def _get_dummy_indices_set(self):
indset = set()
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_dummy_indices_set())
return indset
def _get_indices_set(self):
indset = set()
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_indices_set())
return indset
@property
def _iterate_dummy_indices(self):
dummy_set = self._get_dummy_indices_set()
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
if expr in dummy_set:
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
yield from recursor(arg, pos+(p,))
return recursor(self, ())
@property
def _iterate_free_indices(self):
free_set = self._get_free_indices_set()
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
if expr in free_set:
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
yield from recursor(arg, pos+(p,))
return recursor(self, ())
@property
def _iterate_indices(self):
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
yield from recursor(arg, pos+(p,))
return recursor(self, ())
@staticmethod
def _contract_and_permute_with_metric(metric, array, pos, dim):
# TODO: add possibility of metric after (spinors)
from .array import tensorcontraction, tensorproduct, permutedims
array = tensorcontraction(tensorproduct(metric, array), (1, 2+pos))
permu = list(range(dim))
permu[0], permu[pos] = permu[pos], permu[0]
return permutedims(array, permu)
@staticmethod
def _match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict):
from .array import permutedims
index_types1 = [i.tensor_index_type for i in free_ind1]
# Check if variance of indices needs to be fixed:
pos2up = []
pos2down = []
free2remaining = free_ind2[:]
for pos1, index1 in enumerate(free_ind1):
if index1 in free2remaining:
pos2 = free2remaining.index(index1)
free2remaining[pos2] = None
continue
if -index1 in free2remaining:
pos2 = free2remaining.index(-index1)
free2remaining[pos2] = None
free_ind2[pos2] = index1
if index1.is_up:
pos2up.append(pos2)
else:
pos2down.append(pos2)
else:
index2 = free2remaining[pos1]
if index2 is None:
raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2))
free2remaining[pos1] = None
free_ind2[pos1] = index1
if index1.is_up ^ index2.is_up:
if index1.is_up:
pos2up.append(pos1)
else:
pos2down.append(pos1)
if len(set(free_ind1) & set(free_ind2)) < len(free_ind1):
raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2))
# Raise indices:
for pos in pos2up:
index_type_pos = index_types1[pos] # type: TensorIndexType
if index_type_pos not in replacement_dict:
raise ValueError("No metric provided to lower index")
metric = replacement_dict[index_type_pos]
metric_inverse = _TensorDataLazyEvaluator.inverse_matrix(metric)
array = TensExpr._contract_and_permute_with_metric(metric_inverse, array, pos, len(free_ind1))
# Lower indices:
for pos in pos2down:
index_type_pos = index_types1[pos] # type: TensorIndexType
if index_type_pos not in replacement_dict:
raise ValueError("No metric provided to lower index")
metric = replacement_dict[index_type_pos]
array = TensExpr._contract_and_permute_with_metric(metric, array, pos, len(free_ind1))
if free_ind1:
permutation = TensExpr._get_indices_permutation(free_ind2, free_ind1)
array = permutedims(array, permutation)
if hasattr(array, "rank") and array.rank() == 0:
array = array[()]
return free_ind2, array
def replace_with_arrays(self, replacement_dict, indices=None):
"""
Replace the tensorial expressions with arrays. The final array will
correspond to the N-dimensional array with indices arranged according
to ``indices``.
Parameters
==========
replacement_dict
dictionary containing the replacement rules for tensors.
indices
the index order with respect to which the array is read. The
original index order will be used if no value is passed.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> from sympy.tensor.tensor import TensorHead
>>> from sympy import symbols, diag
>>> L = TensorIndexType("L")
>>> i, j = tensor_indices("i j", L)
>>> A = TensorHead("A", [L])
>>> A(i).replace_with_arrays({A(i): [1, 2]}, [i])
[1, 2]
Since 'indices' is optional, we can also call replace_with_arrays by
this way if no specific index order is needed:
>>> A(i).replace_with_arrays({A(i): [1, 2]})
[1, 2]
>>> expr = A(i)*A(j)
>>> expr.replace_with_arrays({A(i): [1, 2]})
[[1, 2], [2, 4]]
For contractions, specify the metric of the ``TensorIndexType``, which
in this case is ``L``, in its covariant form:
>>> expr = A(i)*A(-i)
>>> expr.replace_with_arrays({A(i): [1, 2], L: diag(1, -1)})
-3
Symmetrization of an array:
>>> H = TensorHead("H", [L, L])
>>> a, b, c, d = symbols("a b c d")
>>> expr = H(i, j)/2 + H(j, i)/2
>>> expr.replace_with_arrays({H(i, j): [[a, b], [c, d]]})
[[a, b/2 + c/2], [b/2 + c/2, d]]
Anti-symmetrization of an array:
>>> expr = H(i, j)/2 - H(j, i)/2
>>> repl = {H(i, j): [[a, b], [c, d]]}
>>> expr.replace_with_arrays(repl)
[[0, b/2 - c/2], [-b/2 + c/2, 0]]
The same expression can be read as the transpose by inverting ``i`` and
``j``:
>>> expr.replace_with_arrays(repl, [j, i])
[[0, -b/2 + c/2], [b/2 - c/2, 0]]
"""
from .array import Array
indices = indices or []
replacement_dict = {tensor: Array(array) for tensor, array in replacement_dict.items()}
# Check dimensions of replaced arrays:
for tensor, array in replacement_dict.items():
if isinstance(tensor, TensorIndexType):
expected_shape = [tensor.dim for i in range(2)]
else:
expected_shape = [index_type.dim for index_type in tensor.index_types]
if len(expected_shape) != array.rank() or (not all(dim1 == dim2 if
dim1.is_number else True for dim1, dim2 in zip(expected_shape,
array.shape))):
raise ValueError("shapes for tensor %s expected to be %s, "\
"replacement array shape is %s" % (tensor, expected_shape,
array.shape))
ret_indices, array = self._extract_data(replacement_dict)
last_indices, array = self._match_indices_with_other_tensor(array, indices, ret_indices, replacement_dict)
return array
def _check_add_Sum(self, expr, index_symbols):
from sympy.concrete.summations import Sum
indices = self.get_indices()
dum = self.dum
sum_indices = [ (index_symbols[i], 0,
indices[i].tensor_index_type.dim-1) for i, j in dum]
if sum_indices:
expr = Sum(expr, *sum_indices)
return expr
def _expand_partial_derivative(self):
# simply delegate the _expand_partial_derivative() to
# its arguments to expand a possibly found PartialDerivative
return self.func(*[
a._expand_partial_derivative()
if isinstance(a, TensExpr) else a
for a in self.args])
class TensAdd(TensExpr, AssocOp):
"""
Sum of tensors.
Parameters
==========
free_args : list of the free indices
Attributes
==========
``args`` : tuple of addends
``rank`` : rank of the tensor
``free_args`` : list of the free indices in sorted order
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_heads, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(a) + q(a); t
p(a) + q(a)
Examples with components data added to the tensor expression:
>>> from sympy import symbols, diag
>>> x, y, z, t = symbols("x y z t")
>>> repl = {}
>>> repl[Lorentz] = diag(1, -1, -1, -1)
>>> repl[p(a)] = [1, 2, 3, 4]
>>> repl[q(a)] = [x, y, z, t]
The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58
>>> expr = p(a) + q(a)
>>> expr.replace_with_arrays(repl, [a])
[x + 1, y + 2, z + 3, t + 4]
"""
def __new__(cls, *args, **kw_args):
args = [_sympify(x) for x in args if x]
args = TensAdd._tensAdd_flatten(args)
args.sort(key=default_sort_key)
if not args:
return S.Zero
if len(args) == 1:
return args[0]
return Basic.__new__(cls, *args, **kw_args)
@property
def coeff(self):
return S.One
@property
def nocoeff(self):
return self
def get_free_indices(self): # type: () -> List[TensorIndex]
return self.free_indices
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr
newargs = [arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args]
return self.func(*newargs)
@memoize_property
def rank(self):
if isinstance(self.args[0], TensExpr):
return self.args[0].rank
else:
return 0
@memoize_property
def free_args(self):
if isinstance(self.args[0], TensExpr):
return self.args[0].free_args
else:
return []
@memoize_property
def free_indices(self):
if isinstance(self.args[0], TensExpr):
return self.args[0].get_free_indices()
else:
return set()
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
if not args:
return S.Zero
if len(args) == 1 and not isinstance(args[0], TensExpr):
return args[0]
# now check that all addends have the same indices:
TensAdd._tensAdd_check(args)
# if TensAdd has only 1 element in its `args`:
if len(args) == 1: # and isinstance(args[0], TensMul):
return args[0]
# Remove zeros:
args = [x for x in args if x]
# if there are no more args (i.e. have cancelled out),
# just return zero:
if not args:
return S.Zero
if len(args) == 1:
return args[0]
# Collect terms appearing more than once, differing by their coefficients:
args = TensAdd._tensAdd_collect_terms(args)
# collect canonicalized terms
def sort_key(t):
if not isinstance(t, TensExpr):
return [], [], []
if hasattr(t, "_index_structure") and hasattr(t, "components"):
x = get_index_structure(t)
return t.components, x.free, x.dum
return [], [], []
args.sort(key=sort_key)
if not args:
return S.Zero
# it there is only a component tensor return it
if len(args) == 1:
return args[0]
obj = self.func(*args)
return obj
@staticmethod
def _tensAdd_flatten(args):
# flatten TensAdd, coerce terms which are not tensors to tensors
a = []
for x in args:
if isinstance(x, (Add, TensAdd)):
a.extend(list(x.args))
else:
a.append(x)
args = [x for x in a if x.coeff]
return args
@staticmethod
def _tensAdd_check(args):
# check that all addends have the same free indices
def get_indices_set(x): # type: (Expr) -> tSet[TensorIndex]
if isinstance(x, TensExpr):
return set(x.get_free_indices())
return set()
indices0 = get_indices_set(args[0]) # type: tSet[TensorIndex]
list_indices = [get_indices_set(arg) for arg in args[1:]] # type: List[tSet[TensorIndex]]
if not all(x == indices0 for x in list_indices):
raise ValueError('all tensors must have the same indices')
@staticmethod
def _tensAdd_collect_terms(args):
# collect TensMul terms differing at most by their coefficient
terms_dict = defaultdict(list)
scalars = S.Zero
if isinstance(args[0], TensExpr):
free_indices = set(args[0].get_free_indices())
else:
free_indices = set()
for arg in args:
if not isinstance(arg, TensExpr):
if free_indices != set():
raise ValueError("wrong valence")
scalars += arg
continue
if free_indices != set(arg.get_free_indices()):
raise ValueError("wrong valence")
# TODO: what is the part which is not a coeff?
# needs an implementation similar to .as_coeff_Mul()
terms_dict[arg.nocoeff].append(arg.coeff)
new_args = [TensMul(Add(*coeff), t).doit() for t, coeff in terms_dict.items() if Add(*coeff) != 0]
if isinstance(scalars, Add):
new_args = list(scalars.args) + new_args
elif scalars != 0:
new_args = [scalars] + new_args
return new_args
def get_indices(self):
indices = []
for arg in self.args:
indices.extend([i for i in get_indices(arg) if i not in indices])
return indices
def _expand(self, **hints):
return TensAdd(*[_expand(i, **hints) for i in self.args])
def __call__(self, *indices):
deprecate_fun_eval()
free_args = self.free_args
indices = list(indices)
if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
index_tuples = list(zip(free_args, indices))
a = [x.func(*x.substitute_indices(*index_tuples).args) for x in self.args]
res = TensAdd(*a).doit()
return res
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
"""
expr = self.expand()
args = [canon_bp(x) for x in expr.args]
res = TensAdd(*args).doit()
return res
def equals(self, other):
other = _sympify(other)
if isinstance(other, TensMul) and other.coeff == 0:
return all(x.coeff == 0 for x in self.args)
if isinstance(other, TensExpr):
if self.rank != other.rank:
return False
if isinstance(other, TensAdd):
if set(self.args) != set(other.args):
return False
else:
return True
t = self - other
if not isinstance(t, TensExpr):
return t == 0
else:
if isinstance(t, TensMul):
return t.coeff == 0
else:
return all(x.coeff == 0 for x in t.args)
def __getitem__(self, item):
deprecate_data()
return self.data[item]
def contract_delta(self, delta):
args = [x.contract_delta(delta) for x in self.args]
t = TensAdd(*args).doit()
return canon_bp(t)
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``.
Parameters
==========
g : metric
contract_all : if True, eliminate all ``g`` which are contracted
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
"""
args = [contract_metric(x, g) for x in self.args]
t = TensAdd(*args).doit()
return canon_bp(t)
def substitute_indices(self, *index_tuples):
new_args = []
for arg in self.args:
if isinstance(arg, TensExpr):
arg = arg.substitute_indices(*index_tuples)
new_args.append(arg)
return TensAdd(*new_args).doit()
def _print(self):
a = []
args = self.args
for x in args:
a.append(str(x))
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
def _extract_data(self, replacement_dict):
from sympy.tensor.array import Array, permutedims
args_indices, arrays = zip(*[
arg._extract_data(replacement_dict) if
isinstance(arg, TensExpr) else ([], arg) for arg in self.args
])
arrays = [Array(i) for i in arrays]
ref_indices = args_indices[0]
for i in range(1, len(args_indices)):
indices = args_indices[i]
array = arrays[i]
permutation = TensMul._get_indices_permutation(indices, ref_indices)
arrays[i] = permutedims(array, permutation)
return ref_indices, sum(arrays, Array.zeros(*array.shape))
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self.expand()]
@data.setter
def data(self, data):
deprecate_data()
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
deprecate_data()
if not self.data:
raise ValueError("No iteration on abstract tensors")
return self.data.flatten().__iter__()
def _eval_rewrite_as_Indexed(self, *args):
return Add.fromiter(args)
def _eval_partial_derivative(self, s):
# Evaluation like Add
list_addends = []
for a in self.args:
if isinstance(a, TensExpr):
list_addends.append(a._eval_partial_derivative(s))
# do not call diff if s is no symbol
elif s._diff_wrt:
list_addends.append(a._eval_derivative(s))
return self.func(*list_addends)
class Tensor(TensExpr):
"""
Base tensor class, i.e. this represents a tensor, the single unit to be
put into an expression.
Explanation
===========
This object is usually created from a ``TensorHead``, by attaching indices
to it. Indices preceded by a minus sign are considered contravariant,
otherwise covariant.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead
>>> Lorentz = TensorIndexType("Lorentz", dummy_name="L")
>>> mu, nu = tensor_indices('mu nu', Lorentz)
>>> A = TensorHead("A", [Lorentz, Lorentz])
>>> A(mu, -nu)
A(mu, -nu)
>>> A(mu, -mu)
A(L_0, -L_0)
It is also possible to use symbols instead of inidices (appropriate indices
are then generated automatically).
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> A(x, mu)
A(x, mu)
>>> A(x, -x)
A(L_0, -L_0)
"""
is_commutative = False
_index_structure = None # type: _IndexStructure
args: tTuple[TensorHead, Tuple]
def __new__(cls, tensor_head, indices, *, is_canon_bp=False, **kw_args):
indices = cls._parse_indices(tensor_head, indices)
obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args)
obj._index_structure = _IndexStructure.from_indices(*indices)
obj._free = obj._index_structure.free[:]
obj._dum = obj._index_structure.dum[:]
obj._ext_rank = obj._index_structure._ext_rank
obj._coeff = S.One
obj._nocoeff = obj
obj._component = tensor_head
obj._components = [tensor_head]
if tensor_head.rank != len(indices):
raise ValueError("wrong number of indices")
obj.is_canon_bp = is_canon_bp
obj._index_map = Tensor._build_index_map(indices, obj._index_structure)
return obj
@property
def free(self):
return self._free
@property
def dum(self):
return self._dum
@property
def ext_rank(self):
return self._ext_rank
@property
def coeff(self):
return self._coeff
@property
def nocoeff(self):
return self._nocoeff
@property
def component(self):
return self._component
@property
def components(self):
return self._components
@property
def head(self):
return self.args[0]
@property
def indices(self):
return self.args[1]
@property
def free_indices(self):
return set(self._index_structure.get_free_indices())
@property
def index_types(self):
return self.head.index_types
@property
def rank(self):
return len(self.free_indices)
@staticmethod
def _build_index_map(indices, index_structure):
index_map = {}
for idx in indices:
index_map[idx] = (indices.index(idx),)
return index_map
def doit(self, **kwargs):
args, indices, free, dum = TensMul._tensMul_contract_indices([self])
return args[0]
@staticmethod
def _parse_indices(tensor_head, indices):
if not isinstance(indices, (tuple, list, Tuple)):
raise TypeError("indices should be an array, got %s" % type(indices))
indices = list(indices)
for i, index in enumerate(indices):
if isinstance(index, Symbol):
indices[i] = TensorIndex(index, tensor_head.index_types[i], True)
elif isinstance(index, Mul):
c, e = index.as_coeff_Mul()
if c == -1 and isinstance(e, Symbol):
indices[i] = TensorIndex(e, tensor_head.index_types[i], False)
else:
raise ValueError("index not understood: %s" % index)
elif not isinstance(index, TensorIndex):
raise TypeError("wrong type for index: %s is %s" % (index, type(index)))
return indices
def _set_new_index_structure(self, im, is_canon_bp=False):
indices = im.get_indices()
return self._set_indices(*indices, is_canon_bp=is_canon_bp)
def _set_indices(self, *indices, is_canon_bp=False, **kw_args):
if len(indices) != self.ext_rank:
raise ValueError("indices length mismatch")
return self.func(self.args[0], indices, is_canon_bp=is_canon_bp).doit()
def _get_free_indices_set(self):
return {i[0] for i in self._index_structure.free}
def _get_dummy_indices_set(self):
dummy_pos = set(itertools.chain(*self._index_structure.dum))
return {idx for i, idx in enumerate(self.args[1]) if i in dummy_pos}
def _get_indices_set(self):
return set(self.args[1].args)
@property
def free_in_args(self):
return [(ind, pos, 0) for ind, pos in self.free]
@property
def dum_in_args(self):
return [(p1, p2, 0, 0) for p1, p2 in self.dum]
@property
def free_args(self):
return sorted([x[0] for x in self.free])
def commutes_with(self, other):
"""
:param other:
:return:
0 commute
1 anticommute
None neither commute nor anticommute
"""
if not isinstance(other, TensExpr):
return 0
elif isinstance(other, Tensor):
return self.component.commutes_with(other.component)
return NotImplementedError
def perm2tensor(self, g, is_canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``.
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, is_canon_bp)
def canon_bp(self):
if self.is_canon_bp:
return self
expr = self.expand()
g, dummies, msym = expr._index_structure.indices_canon_args()
v = components_canon_args([expr.component])
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tensor = self.perm2tensor(can, True)
return tensor
def split(self):
return [self]
def _expand(self, **kwargs):
return self
def sorted_components(self):
return self
def get_indices(self): # type: () -> List[TensorIndex]
"""
Get a list of indices, corresponding to those of the tensor.
"""
return list(self.args[1])
def get_free_indices(self): # type: () -> List[TensorIndex]
"""
Get a list of free indices, corresponding to those of the tensor.
"""
return self._index_structure.get_free_indices()
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> Tensor
# TODO: this could be optimized by only swapping the indices
# instead of visiting the whole expression tree:
return self.xreplace(repl)
def as_base_exp(self):
return self, S.One
def substitute_indices(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``.
``index_types`` list of tuples ``(old_index, new_index)``.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i, k),(-j, l))
A(k, L_0)*B(-L_0, l)
"""
indices = []
for index in self.indices:
for ind_old, ind_new in index_tuples:
if (index.name == ind_old.name and index.tensor_index_type ==
ind_old.tensor_index_type):
if index.is_up == ind_old.is_up:
indices.append(ind_new)
else:
indices.append(-ind_new)
break
else:
indices.append(index)
return self.head(*indices)
def __call__(self, *indices):
deprecate_fun_eval()
free_args = self.free_args
indices = list(indices)
if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.substitute_indices(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len({i if i.is_up else -i for i in indices}) != len(indices):
return t.func(*t.args)
return t
# TODO: put this into TensExpr?
def __iter__(self):
deprecate_data()
return self.data.__iter__()
# TODO: put this into TensExpr?
def __getitem__(self, item):
deprecate_data()
return self.data[item]
def _extract_data(self, replacement_dict):
from .array import Array
for k, v in replacement_dict.items():
if isinstance(k, Tensor) and k.args[0] == self.args[0]:
other = k
array = v
break
else:
raise ValueError("%s not found in %s" % (self, replacement_dict))
# TODO: inefficient, this should be done at root level only:
replacement_dict = {k: Array(v) for k, v in replacement_dict.items()}
array = Array(array)
dum1 = self.dum
dum2 = other.dum
if len(dum2) > 0:
for pair in dum2:
# allow `dum2` if the contained values are also in `dum1`.
if pair not in dum1:
raise NotImplementedError("%s with contractions is not implemented" % other)
# Remove elements in `dum2` from `dum1`:
dum1 = [pair for pair in dum1 if pair not in dum2]
if len(dum1) > 0:
indices1 = self.get_indices()
indices2 = other.get_indices()
repl = {}
for p1, p2 in dum1:
repl[indices2[p2]] = -indices2[p1]
for pos in (p1, p2):
if indices1[pos].is_up ^ indices2[pos].is_up:
metric = replacement_dict[indices1[pos].tensor_index_type]
if indices1[pos].is_up:
metric = _TensorDataLazyEvaluator.inverse_matrix(metric)
array = self._contract_and_permute_with_metric(metric, array, pos, len(indices2))
other = other.xreplace(repl).doit()
array = _TensorDataLazyEvaluator.data_contract_dum([array], dum1, len(indices2))
free_ind1 = self.get_free_indices()
free_ind2 = other.get_free_indices()
return self._match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict)
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
deprecate_data()
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
def _print(self):
indices = [str(ind) for ind in self.indices]
component = self.component
if component.rank > 0:
return ('%s(%s)' % (component.name, ', '.join(indices)))
else:
return ('%s' % component.name)
def equals(self, other):
if other == 0:
return self.coeff == 0
other = _sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return S.One == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (t.coeff, tuple(t.components), \
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def contract_metric(self, g):
# if metric is not the same, ignore this step:
if self.component != g:
return self
# in case there are free components, do not perform anything:
if len(self.free) != 0:
return self
#antisym = g.index_types[0].metric_antisym
if g.symmetry == TensorSymmetry.fully_symmetric(-2):
antisym = 1
elif g.symmetry == TensorSymmetry.fully_symmetric(2):
antisym = 0
elif g.symmetry == TensorSymmetry.no_symmetry(2):
antisym = None
else:
raise NotImplementedError
sign = S.One
typ = g.index_types[0]
if not antisym:
# g(i, -i)
sign = sign*typ.dim
else:
# g(i, -i)
sign = sign*typ.dim
dp0, dp1 = self.dum[0]
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
return sign
def contract_delta(self, metric):
return self.contract_metric(metric)
def _eval_rewrite_as_Indexed(self, tens, indices):
from sympy.tensor.indexed import Indexed
# TODO: replace .args[0] with .name:
index_symbols = [i.args[0] for i in self.get_indices()]
expr = Indexed(tens.args[0], *index_symbols)
return self._check_add_Sum(expr, index_symbols)
def _eval_partial_derivative(self, s): # type: (Tensor) -> Expr
if not isinstance(s, Tensor):
return S.Zero
else:
# @a_i/@a_k = delta_i^k
# @a_i/@a^k = g_ij delta^j_k
# @a^i/@a^k = delta^i_k
# @a^i/@a_k = g^ij delta_j^k
# TODO: if there is no metric present, the derivative should be zero?
if self.head != s.head:
return S.Zero
# if heads are the same, provide delta and/or metric products
# for every free index pair in the appropriate tensor
# assumed that the free indices are in proper order
# A contravariante index in the derivative becomes covariant
# after performing the derivative and vice versa
kronecker_delta_list = [1]
# not guarantee a correct index order
for (count, (iself, iother)) in enumerate(zip(self.get_free_indices(), s.get_free_indices())):
if iself.tensor_index_type != iother.tensor_index_type:
raise ValueError("index types not compatible")
else:
tensor_index_type = iself.tensor_index_type
tensor_metric = tensor_index_type.metric
dummy = TensorIndex("d_" + str(count), tensor_index_type,
is_up=iself.is_up)
if iself.is_up == iother.is_up:
kroneckerdelta = tensor_index_type.delta(iself, -iother)
else:
kroneckerdelta = (
TensMul(tensor_metric(iself, dummy),
tensor_index_type.delta(-dummy, -iother))
)
kronecker_delta_list.append(kroneckerdelta)
return TensMul.fromiter(kronecker_delta_list).doit()
# doit necessary to rename dummy indices accordingly
class TensMul(TensExpr, AssocOp):
"""
Product of tensors.
Parameters
==========
coeff : SymPy coefficient of the tensor
args
Attributes
==========
``components`` : list of ``TensorHead`` of the component tensors
``types`` : list of nonrepeated ``TensorIndexType``
``free`` : list of ``(ind, ipos, icomp)``, see Notes
``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes
``ext_rank`` : rank of the tensor counting the dummy indices
``rank`` : rank of the tensor
``coeff`` : SymPy coefficient of the tensor
``free_args`` : list of the free indices in sorted order
``is_canon_bp`` : ``True`` if the tensor in in canonical form
Notes
=====
``args[0]`` list of ``TensorHead`` of the component tensors.
``args[1]`` list of ``(ind, ipos, icomp)``
where ``ind`` is a free index, ``ipos`` is the slot position
of ``ind`` in the ``icomp``-th component tensor.
``args[2]`` list of tuples representing dummy indices.
``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant
dummy index is the ``ipos1``-th slot position in the ``icomp1``-th
component tensor; the corresponding covariant index is
in the ``ipos2`` slot position in the ``icomp2``-th component tensor.
"""
identity = S.One
_index_structure = None # type: _IndexStructure
def __new__(cls, *args, **kw_args):
is_canon_bp = kw_args.get('is_canon_bp', False)
args = list(map(_sympify, args))
# Flatten:
args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])]
args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False)
# Data for indices:
index_types = [i.tensor_index_type for i in indices]
index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp)
obj = TensExpr.__new__(cls, *args)
obj._indices = indices
obj._index_types = index_types[:]
obj._index_structure = index_structure
obj._free = index_structure.free[:]
obj._dum = index_structure.dum[:]
obj._free_indices = {x[0] for x in obj.free}
obj._rank = len(obj.free)
obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum)
obj._coeff = S.One
obj._is_canon_bp = is_canon_bp
return obj
index_types = property(lambda self: self._index_types)
free = property(lambda self: self._free)
dum = property(lambda self: self._dum)
free_indices = property(lambda self: self._free_indices)
rank = property(lambda self: self._rank)
ext_rank = property(lambda self: self._ext_rank)
@staticmethod
def _indices_to_free_dum(args_indices):
free2pos1 = {}
free2pos2 = {}
dummy_data = []
indices = []
# Notation for positions (to better understand the code):
# `pos1`: position in the `args`.
# `pos2`: position in the indices.
# Example:
# A(i, j)*B(k, m, n)*C(p)
# `pos1` of `n` is 1 because it's in `B` (second `args` of TensMul).
# `pos2` of `n` is 4 because it's the fifth overall index.
# Counter for the index position wrt the whole expression:
pos2 = 0
for pos1, arg_indices in enumerate(args_indices):
for index_pos, index in enumerate(arg_indices):
if not isinstance(index, TensorIndex):
raise TypeError("expected TensorIndex")
if -index in free2pos1:
# Dummy index detected:
other_pos1 = free2pos1.pop(-index)
other_pos2 = free2pos2.pop(-index)
if index.is_up:
dummy_data.append((index, pos1, other_pos1, pos2, other_pos2))
else:
dummy_data.append((-index, other_pos1, pos1, other_pos2, pos2))
indices.append(index)
elif index in free2pos1:
raise ValueError("Repeated index: %s" % index)
else:
free2pos1[index] = pos1
free2pos2[index] = pos2
indices.append(index)
pos2 += 1
free = [(i, p) for (i, p) in free2pos2.items()]
free_names = [i.name for i in free2pos2.keys()]
dummy_data.sort(key=lambda x: x[3])
return indices, free, free_names, dummy_data
@staticmethod
def _dummy_data_to_dum(dummy_data):
return [(p2a, p2b) for (i, p1a, p1b, p2a, p2b) in dummy_data]
@staticmethod
def _tensMul_contract_indices(args, replace_indices=True):
replacements = [{} for _ in args]
#_index_order = all(_has_index_order(arg) for arg in args)
args_indices = [get_indices(arg) for arg in args]
indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices)
cdt = defaultdict(int)
def dummy_name_gen(tensor_index_type):
nd = str(cdt[tensor_index_type])
cdt[tensor_index_type] += 1
return tensor_index_type.dummy_name + '_' + nd
if replace_indices:
for old_index, pos1cov, pos1contra, pos2cov, pos2contra in dummy_data:
index_type = old_index.tensor_index_type
while True:
dummy_name = dummy_name_gen(index_type)
if dummy_name not in free_names:
break
dummy = TensorIndex(dummy_name, index_type, True)
replacements[pos1cov][old_index] = dummy
replacements[pos1contra][-old_index] = -dummy
indices[pos2cov] = dummy
indices[pos2contra] = -dummy
args = [
arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg
for arg, repl in zip(args, replacements)]
dum = TensMul._dummy_data_to_dum(dummy_data)
return args, indices, free, dum
@staticmethod
def _get_components_from_args(args):
"""
Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied
by one another.
"""
components = []
for arg in args:
if not isinstance(arg, TensExpr):
continue
if isinstance(arg, TensAdd):
continue
components.extend(arg.components)
return components
@staticmethod
def _rebuild_tensors_list(args, index_structure):
indices = index_structure.get_indices()
#tensors = [None for i in components] # pre-allocate list
ind_pos = 0
for i, arg in enumerate(args):
if not isinstance(arg, TensExpr):
continue
prev_pos = ind_pos
ind_pos += arg.ext_rank
args[i] = Tensor(arg.component, indices[prev_pos:ind_pos])
def doit(self, **kwargs):
is_canon_bp = self._is_canon_bp
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
args = [arg for arg in args if arg != self.identity]
# Extract non-tensor coefficients:
coeff = reduce(lambda a, b: a*b, [arg for arg in args if not isinstance(arg, TensExpr)], S.One)
args = [arg for arg in args if isinstance(arg, TensExpr)]
if len(args) == 0:
return coeff
if coeff != self.identity:
args = [coeff] + args
if coeff == 0:
return S.Zero
if len(args) == 1:
return args[0]
args, indices, free, dum = TensMul._tensMul_contract_indices(args)
# Data for indices:
index_types = [i.tensor_index_type for i in indices]
index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp)
obj = self.func(*args)
obj._index_types = index_types
obj._index_structure = index_structure
obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum)
obj._coeff = coeff
obj._is_canon_bp = is_canon_bp
return obj
# TODO: this method should be private
# TODO: should this method be renamed _from_components_free_dum ?
@staticmethod
def from_data(coeff, components, free, dum, **kw_args):
return TensMul(coeff, *TensMul._get_tensors_from_components_free_dum(components, free, dum), **kw_args).doit()
@staticmethod
def _get_tensors_from_components_free_dum(components, free, dum):
"""
Get a list of ``Tensor`` objects by distributing ``free`` and ``dum`` indices on the ``components``.
"""
index_structure = _IndexStructure.from_components_free_dum(components, free, dum)
indices = index_structure.get_indices()
tensors = [None for i in components] # pre-allocate list
# distribute indices on components to build a list of tensors:
ind_pos = 0
for i, component in enumerate(components):
prev_pos = ind_pos
ind_pos += component.rank
tensors[i] = Tensor(component, indices[prev_pos:ind_pos])
return tensors
def _get_free_indices_set(self):
return {i[0] for i in self.free}
def _get_dummy_indices_set(self):
dummy_pos = set(itertools.chain(*self.dum))
return {idx for i, idx in enumerate(self._index_structure.get_indices()) if i in dummy_pos}
def _get_position_offset_for_indices(self):
arg_offset = [None for i in range(self.ext_rank)]
counter = 0
for i, arg in enumerate(self.args):
if not isinstance(arg, TensExpr):
continue
for j in range(arg.ext_rank):
arg_offset[j + counter] = counter
counter += arg.ext_rank
return arg_offset
@property
def free_args(self):
return sorted([x[0] for x in self.free])
@property
def components(self):
return self._get_components_from_args(self.args)
@property
def free_in_args(self):
arg_offset = self._get_position_offset_for_indices()
argpos = self._get_indices_to_args_pos()
return [(ind, pos-arg_offset[pos], argpos[pos]) for (ind, pos) in self.free]
@property
def coeff(self):
# return Mul.fromiter([c for c in self.args if not isinstance(c, TensExpr)])
return self._coeff
@property
def nocoeff(self):
return self.func(*[t for t in self.args if isinstance(t, TensExpr)]).doit()
@property
def dum_in_args(self):
arg_offset = self._get_position_offset_for_indices()
argpos = self._get_indices_to_args_pos()
return [(p1-arg_offset[p1], p2-arg_offset[p2], argpos[p1], argpos[p2]) for p1, p2 in self.dum]
def equals(self, other):
if other == 0:
return self.coeff == 0
other = _sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return self.coeff == other
return self.canon_bp() == other.canon_bp()
def get_indices(self):
"""
Returns the list of indices of the tensor.
Explanation
===========
The indices are listed in the order in which they appear in the
component tensors.
The dummy indices are given a name which does not collide with
the names of the free indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(m1)*g(m0,m2)
>>> t.get_indices()
[m1, m0, m2]
>>> t2 = p(m1)*g(-m1, m2)
>>> t2.get_indices()
[L_0, -L_0, m2]
"""
return self._indices
def get_free_indices(self): # type: () -> List[TensorIndex]
"""
Returns the list of free indices of the tensor.
Explanation
===========
The indices are listed in the order in which they appear in the
component tensors.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(m1)*g(m0,m2)
>>> t.get_free_indices()
[m1, m0, m2]
>>> t2 = p(m1)*g(-m1, m2)
>>> t2.get_free_indices()
[m2]
"""
return self._index_structure.get_free_indices()
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr
return self.func(*[arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args])
def split(self):
"""
Returns a list of tensors, whose product is ``self``.
Explanation
===========
Dummy indices contracted among different tensor components
become free indices with the same name as the one used to
represent the dummy indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
>>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
>>> t = A(a,b)*B(-b,c)
>>> t
A(a, L_0)*B(-L_0, c)
>>> t.split()
[A(a, L_0), B(-L_0, c)]
"""
if self.args == ():
return [self]
splitp = []
res = 1
for arg in self.args:
if isinstance(arg, Tensor):
splitp.append(res*arg)
res = 1
else:
res *= arg
return splitp
def _expand(self, **hints):
# TODO: temporary solution, in the future this should be linked to
# `Expr.expand`.
args = [_expand(arg, **hints) for arg in self.args]
args1 = [arg.args if isinstance(arg, (Add, TensAdd)) else (arg,) for arg in args]
return TensAdd(*[
TensMul(*i) for i in itertools.product(*args1)]
)
def __neg__(self):
return TensMul(S.NegativeOne, self, is_canon_bp=self._is_canon_bp).doit()
def __getitem__(self, item):
deprecate_data()
return self.data[item]
def _get_args_for_traditional_printer(self):
args = list(self.args)
if (self.coeff < 0) == True:
# expressions like "-A(a)"
sign = "-"
if self.coeff == S.NegativeOne:
args = args[1:]
else:
args[0] = -args[0]
else:
sign = ""
return sign, args
def _sort_args_for_sorted_components(self):
"""
Returns the ``args`` sorted according to the components commutation
properties.
Explanation
===========
The sorting is done taking into account the commutation group
of the component tensors.
"""
cv = [arg for arg in self.args if isinstance(arg, TensExpr)]
sign = 1
n = len(cv) - 1
for i in range(n):
for j in range(n, i, -1):
c = cv[j-1].commutes_with(cv[j])
# if `c` is `None`, it does neither commute nor anticommute, skip:
if c not in (0, 1):
continue
typ1 = sorted(set(cv[j-1].component.index_types), key=lambda x: x.name)
typ2 = sorted(set(cv[j].component.index_types), key=lambda x: x.name)
if (typ1, cv[j-1].component.name) > (typ2, cv[j].component.name):
cv[j-1], cv[j] = cv[j], cv[j-1]
# if `c` is 1, the anticommute, so change sign:
if c:
sign = -sign
coeff = sign * self.coeff
if coeff != 1:
return [coeff] + cv
return cv
def sorted_components(self):
"""
Returns a tensor product with sorted components.
"""
return TensMul(*self._sort_args_for_sorted_components()).doit()
def perm2tensor(self, g, is_canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, is_canon_bp=is_canon_bp)
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
>>> t = A(m0,-m1)*A(m1,-m0)
>>> t.canon_bp()
-A(L_0, L_1)*A(-L_0, -L_1)
>>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0)
>>> t.canon_bp()
0
"""
if self._is_canon_bp:
return self
expr = self.expand()
if isinstance(expr, TensAdd):
return expr.canon_bp()
if not expr.components:
return expr
t = expr.sorted_components()
g, dummies, msym = t._index_structure.indices_canon_args()
v = components_canon_args(t.components)
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tmul = t.perm2tensor(can, True)
return tmul
def contract_delta(self, delta):
t = self.contract_metric(delta)
return t
def _get_indices_to_args_pos(self):
"""
Get a dict mapping the index position to TensMul's argument number.
"""
pos_map = dict()
pos_counter = 0
for arg_i, arg in enumerate(self.args):
if not isinstance(arg, TensExpr):
continue
assert isinstance(arg, Tensor)
for i in range(arg.ext_rank):
pos_map[pos_counter] = arg_i
pos_counter += 1
return pos_map
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``.
Parameters
==========
g : metric
Notes
=====
See the ``TensorIndexType`` docstring for the contraction conventions.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(m0)*q(m1)*g(-m0, -m1)
>>> t.canon_bp()
metric(L_0, L_1)*p(-L_0)*q(-L_1)
>>> t.contract_metric(g).canon_bp()
p(L_0)*q(-L_0)
"""
expr = self.expand()
if self != expr:
expr = expr.canon_bp()
return expr.contract_metric(g)
pos_map = self._get_indices_to_args_pos()
args = list(self.args)
#antisym = g.index_types[0].metric_antisym
if g.symmetry == TensorSymmetry.fully_symmetric(-2):
antisym = 1
elif g.symmetry == TensorSymmetry.fully_symmetric(2):
antisym = 0
elif g.symmetry == TensorSymmetry.no_symmetry(2):
antisym = None
else:
raise NotImplementedError
# list of positions of the metric ``g`` inside ``args``
gpos = [i for i, x in enumerate(self.args) if isinstance(x, Tensor) and x.component == g]
if not gpos:
return self
# Sign is either 1 or -1, to correct the sign after metric contraction
# (for spinor indices).
sign = 1
dum = self.dum[:]
free = self.free[:]
elim = set()
for gposx in gpos:
if gposx in elim:
continue
free1 = [x for x in free if pos_map[x[1]] == gposx]
dum1 = [x for x in dum if pos_map[x[0]] == gposx or pos_map[x[1]] == gposx]
if not dum1:
continue
elim.add(gposx)
# subs with the multiplication neutral element, that is, remove it:
args[gposx] = 1
if len(dum1) == 2:
if not antisym:
dum10, dum11 = dum1
if pos_map[dum10[1]] == gposx:
# the index with pos p0 contravariant
p0 = dum10[0]
else:
# the index with pos p0 is covariant
p0 = dum10[1]
if pos_map[dum11[1]] == gposx:
# the index with pos p1 is contravariant
p1 = dum11[0]
else:
# the index with pos p1 is covariant
p1 = dum11[1]
dum.append((p0, p1))
else:
dum10, dum11 = dum1
# change the sign to bring the indices of the metric to contravariant
# form; change the sign if dum10 has the metric index in position 0
if pos_map[dum10[1]] == gposx:
# the index with pos p0 is contravariant
p0 = dum10[0]
if dum10[1] == 1:
sign = -sign
else:
# the index with pos p0 is covariant
p0 = dum10[1]
if dum10[0] == 0:
sign = -sign
if pos_map[dum11[1]] == gposx:
# the index with pos p1 is contravariant
p1 = dum11[0]
sign = -sign
else:
# the index with pos p1 is covariant
p1 = dum11[1]
dum.append((p0, p1))
elif len(dum1) == 1:
if not antisym:
dp0, dp1 = dum1[0]
if pos_map[dp0] == pos_map[dp1]:
# g(i, -i)
typ = g.index_types[0]
sign = sign*typ.dim
else:
# g(i0, i1)*p(-i1)
if pos_map[dp0] == gposx:
p1 = dp1
else:
p1 = dp0
ind, p = free1[0]
free.append((ind, p1))
else:
dp0, dp1 = dum1[0]
if pos_map[dp0] == pos_map[dp1]:
# g(i, -i)
typ = g.index_types[0]
sign = sign*typ.dim
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
else:
# g(i0, i1)*p(-i1)
if pos_map[dp0] == gposx:
p1 = dp1
if dp0 == 0:
sign = -sign
else:
p1 = dp0
ind, p = free1[0]
free.append((ind, p1))
dum = [x for x in dum if x not in dum1]
free = [x for x in free if x not in free1]
# shift positions:
shift = 0
shifts = [0]*len(args)
for i in range(len(args)):
if i in elim:
shift += 2
continue
shifts[i] = shift
free = [(ind, p - shifts[pos_map[p]]) for (ind, p) in free if pos_map[p] not in elim]
dum = [(p0 - shifts[pos_map[p0]], p1 - shifts[pos_map[p1]]) for i, (p0, p1) in enumerate(dum) if pos_map[p0] not in elim and pos_map[p1] not in elim]
res = sign*TensMul(*args).doit()
if not isinstance(res, TensExpr):
return res
im = _IndexStructure.from_components_free_dum(res.components, free, dum)
return res._set_new_index_structure(im)
def _set_new_index_structure(self, im, is_canon_bp=False):
indices = im.get_indices()
return self._set_indices(*indices, is_canon_bp=is_canon_bp)
def _set_indices(self, *indices, is_canon_bp=False, **kw_args):
if len(indices) != self.ext_rank:
raise ValueError("indices length mismatch")
args = list(self.args)[:]
pos = 0
for i, arg in enumerate(args):
if not isinstance(arg, TensExpr):
continue
assert isinstance(arg, Tensor)
ext_rank = arg.ext_rank
args[i] = arg._set_indices(*indices[pos:pos+ext_rank])
pos += ext_rank
return TensMul(*args, is_canon_bp=is_canon_bp).doit()
@staticmethod
def _index_replacement_for_contract_metric(args, free, dum):
for arg in args:
if not isinstance(arg, TensExpr):
continue
assert isinstance(arg, Tensor)
def substitute_indices(self, *index_tuples):
new_args = []
for arg in self.args:
if isinstance(arg, TensExpr):
arg = arg.substitute_indices(*index_tuples)
new_args.append(arg)
return TensMul(*new_args).doit()
def __call__(self, *indices):
deprecate_fun_eval()
free_args = self.free_args
indices = list(indices)
if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.substitute_indices(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len({i if i.is_up else -i for i in indices}) != len(indices):
return t.func(*t.args)
return t
def _extract_data(self, replacement_dict):
args_indices, arrays = zip(*[arg._extract_data(replacement_dict) for arg in self.args if isinstance(arg, TensExpr)])
coeff = reduce(operator.mul, [a for a in self.args if not isinstance(a, TensExpr)], S.One)
indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices)
dum = TensMul._dummy_data_to_dum(dummy_data)
ext_rank = self.ext_rank
free.sort(key=lambda x: x[1])
free_indices = [i[0] for i in free]
return free_indices, coeff*_TensorDataLazyEvaluator.data_contract_dum(arrays, dum, ext_rank)
@property
def data(self):
deprecate_data()
dat = _tensor_data_substitution_dict[self.expand()]
return dat
@data.setter
def data(self, data):
deprecate_data()
raise ValueError("Not possible to set component data to a tensor expression")
@data.deleter
def data(self):
deprecate_data()
raise ValueError("Not possible to delete component data to a tensor expression")
def __iter__(self):
deprecate_data()
if self.data is None:
raise ValueError("No iteration on abstract tensors")
return self.data.__iter__()
def _eval_rewrite_as_Indexed(self, *args):
from sympy.concrete.summations import Sum
index_symbols = [i.args[0] for i in self.get_indices()]
args = [arg.args[0] if isinstance(arg, Sum) else arg for arg in args]
expr = Mul.fromiter(args)
return self._check_add_Sum(expr, index_symbols)
def _eval_partial_derivative(self, s):
# Evaluation like Mul
terms = []
for i, arg in enumerate(self.args):
# checking whether some tensor instance is differentiated
# or some other thing is necessary, but ugly
if isinstance(arg, TensExpr):
d = arg._eval_partial_derivative(s)
else:
# do not call diff is s is no symbol
if s._diff_wrt:
d = arg._eval_derivative(s)
else:
d = S.Zero
if d:
terms.append(TensMul.fromiter(self.args[:i] + (d,) + self.args[i + 1:]))
return TensAdd.fromiter(terms)
class TensorElement(TensExpr):
"""
Tensor with evaluated components.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry
>>> from sympy import symbols
>>> L = TensorIndexType("L")
>>> i, j, k = symbols("i j k")
>>> A = TensorHead("A", [L, L], TensorSymmetry.fully_symmetric(2))
>>> A(i, j).get_free_indices()
[i, j]
If we want to set component ``i`` to a specific value, use the
``TensorElement`` class:
>>> from sympy.tensor.tensor import TensorElement
>>> te = TensorElement(A(i, j), {i: 2})
As index ``i`` has been accessed (``{i: 2}`` is the evaluation of its 3rd
element), the free indices will only contain ``j``:
>>> te.get_free_indices()
[j]
"""
def __new__(cls, expr, index_map):
if not isinstance(expr, Tensor):
# remap
if not isinstance(expr, TensExpr):
raise TypeError("%s is not a tensor expression" % expr)
return expr.func(*[TensorElement(arg, index_map) for arg in expr.args])
expr_free_indices = expr.get_free_indices()
name_translation = {i.args[0]: i for i in expr_free_indices}
index_map = {name_translation.get(index, index): value for index, value in index_map.items()}
index_map = {index: value for index, value in index_map.items() if index in expr_free_indices}
if len(index_map) == 0:
return expr
free_indices = [i for i in expr_free_indices if i not in index_map.keys()]
index_map = Dict(index_map)
obj = TensExpr.__new__(cls, expr, index_map)
obj._free_indices = free_indices
return obj
@property
def free(self):
return [(index, i) for i, index in enumerate(self.get_free_indices())]
@property
def dum(self):
# TODO: inherit dummies from expr
return []
@property
def expr(self):
return self._args[0]
@property
def index_map(self):
return self._args[1]
@property
def coeff(self):
return S.One
@property
def nocoeff(self):
return self
def get_free_indices(self):
return self._free_indices
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr
# TODO: can be improved:
return self.xreplace(repl)
def get_indices(self):
return self.get_free_indices()
def _extract_data(self, replacement_dict):
ret_indices, array = self.expr._extract_data(replacement_dict)
index_map = self.index_map
slice_tuple = tuple(index_map.get(i, slice(None)) for i in ret_indices)
ret_indices = [i for i in ret_indices if i not in index_map]
array = array.__getitem__(slice_tuple)
return ret_indices, array
def canon_bp(p):
"""
Butler-Portugal canonicalization. See ``tensor_can.py`` from the
combinatorics module for the details.
"""
if isinstance(p, TensExpr):
return p.canon_bp()
return p
def tensor_mul(*a):
"""
product of tensors
"""
if not a:
return TensMul.from_data(S.One, [], [], [])
t = a[0]
for tx in a[1:]:
t = t*tx
return t
def riemann_cyclic_replace(t_r):
"""
replace Riemann tensor with an equivalent expression
``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)``
"""
free = sorted(t_r.free, key=lambda x: x[1])
m, n, p, q = [x[0] for x in free]
t0 = t_r*Rational(2, 3)
t1 = -t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))*Rational(1, 3)
t2 = t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))*Rational(1, 3)
t3 = t0 + t1 + t2
return t3
def riemann_cyclic(t2):
"""
Replace each Riemann tensor with an equivalent expression
satisfying the cyclic identity.
This trick is discussed in the reference guide to Cadabra.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, riemann_cyclic, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
>>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l))
>>> riemann_cyclic(t)
0
"""
t2 = t2.expand()
if isinstance(t2, (TensMul, Tensor)):
args = [t2]
else:
args = t2.args
a1 = [x.split() for x in args]
a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1]
a3 = [tensor_mul(*v) for v in a2]
t3 = TensAdd(*a3).doit()
if not t3:
return t3
else:
return canon_bp(t3)
def get_lines(ex, index_type):
"""
Returns ``(lines, traces, rest)`` for an index type,
where ``lines`` is the list of list of positions of a matrix line,
``traces`` is the list of list of traced matrix lines,
``rest`` is the rest of the elements ot the tensor.
"""
def _join_lines(a):
i = 0
while i < len(a):
x = a[i]
xend = x[-1]
xstart = x[0]
hit = True
while hit:
hit = False
for j in range(i + 1, len(a)):
if j >= len(a):
break
if a[j][0] == xend:
hit = True
x.extend(a[j][1:])
xend = x[-1]
a.pop(j)
continue
if a[j][0] == xstart:
hit = True
a[i] = reversed(a[j][1:]) + x
x = a[i]
xstart = a[i][0]
a.pop(j)
continue
if a[j][-1] == xend:
hit = True
x.extend(reversed(a[j][:-1]))
xend = x[-1]
a.pop(j)
continue
if a[j][-1] == xstart:
hit = True
a[i] = a[j][:-1] + x
x = a[i]
xstart = x[0]
a.pop(j)
continue
i += 1
return a
arguments = ex.args
dt = {}
for c in ex.args:
if not isinstance(c, TensExpr):
continue
if c in dt:
continue
index_types = c.index_types
a = []
for i in range(len(index_types)):
if index_types[i] is index_type:
a.append(i)
if len(a) > 2:
raise ValueError('at most two indices of type %s allowed' % index_type)
if len(a) == 2:
dt[c] = a
#dum = ex.dum
lines = []
traces = []
traces1 = []
#indices_to_args_pos = ex._get_indices_to_args_pos()
# TODO: add a dum_to_components_map ?
for p0, p1, c0, c1 in ex.dum_in_args:
if arguments[c0] not in dt:
continue
if c0 == c1:
traces.append([c0])
continue
ta0 = dt[arguments[c0]]
ta1 = dt[arguments[c1]]
if p0 not in ta0:
continue
if ta0.index(p0) == ta1.index(p1):
# case gamma(i,s0,-s1) in c0, gamma(j,-s0,s2) in c1;
# to deal with this case one could add to the position
# a flag for transposition;
# one could write [(c0, False), (c1, True)]
raise NotImplementedError
# if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1
# if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0
ta0 = dt[arguments[c0]]
b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0)
lines1 = lines[:]
for line in lines:
if line[-1] == b0:
if line[0] == b1:
n = line.index(min(line))
traces1.append(line)
traces.append(line[n:] + line[:n])
else:
line.append(b1)
break
elif line[0] == b1:
line.insert(0, b0)
break
else:
lines1.append([b0, b1])
lines = [x for x in lines1 if x not in traces1]
lines = _join_lines(lines)
rest = []
for line in lines:
for y in line:
rest.append(y)
for line in traces:
for y in line:
rest.append(y)
rest = [x for x in range(len(arguments)) if x not in rest]
return lines, traces, rest
def get_free_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_free_indices()
def get_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_indices()
def get_index_structure(t):
if isinstance(t, TensExpr):
return t._index_structure
return _IndexStructure([], [], [], [])
def get_coeff(t):
if isinstance(t, Tensor):
return S.One
if isinstance(t, TensMul):
return t.coeff
if isinstance(t, TensExpr):
raise ValueError("no coefficient associated to this tensor expression")
return t
def contract_metric(t, g):
if isinstance(t, TensExpr):
return t.contract_metric(g)
return t
def perm2tensor(t, g, is_canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
if not isinstance(t, TensExpr):
return t
elif isinstance(t, (Tensor, TensMul)):
nim = get_index_structure(t).perm2tensor(g, is_canon_bp=is_canon_bp)
res = t._set_new_index_structure(nim, is_canon_bp=is_canon_bp)
if g[-1] != len(g) - 1:
return -res
return res
raise NotImplementedError()
def substitute_indices(t, *index_tuples):
if not isinstance(t, TensExpr):
return t
return t.substitute_indices(*index_tuples)
def _expand(expr, **kwargs):
if isinstance(expr, TensExpr):
return expr._expand(**kwargs)
else:
return expr.expand(**kwargs)
|
bdd29b85838a9f297d2aa45fac9bd4e387ed782705fc3ffcd83d76fe8f5cfbd5 | from collections.abc import Iterable
from functools import singledispatch
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.core.parameters import global_parameters
class TensorProduct(Expr):
"""
Generic class for tensor products.
"""
is_number = False
def __new__(cls, *args, **kwargs):
from sympy.tensor.array import NDimArray, tensorproduct, Array
from sympy.matrices.expressions.matexpr import MatrixExpr
from sympy.matrices.matrices import MatrixBase
from sympy.strategies import flatten
args = [sympify(arg) for arg in args]
evaluate = kwargs.get("evaluate", global_parameters.evaluate)
if not evaluate:
obj = Expr.__new__(cls, *args)
return obj
arrays = []
other = []
scalar = S.One
for arg in args:
if isinstance(arg, (Iterable, MatrixBase, NDimArray)):
arrays.append(Array(arg))
elif isinstance(arg, (MatrixExpr,)):
other.append(arg)
else:
scalar *= arg
coeff = scalar*tensorproduct(*arrays)
if len(other) == 0:
return coeff
if coeff != 1:
newargs = [coeff] + other
else:
newargs = other
obj = Expr.__new__(cls, *newargs, **kwargs)
return flatten(obj)
def rank(self):
return len(self.shape)
def _get_args_shapes(self):
from sympy.tensor.array import Array
return [i.shape if hasattr(i, "shape") else Array(i).shape for i in self.args]
@property
def shape(self):
shape_list = self._get_args_shapes()
return sum(shape_list, ())
def __getitem__(self, index):
index = iter(index)
return Mul.fromiter(
arg.__getitem__(tuple(next(index) for i in shp))
for arg, shp in zip(self.args, self._get_args_shapes())
)
@singledispatch
def shape(expr):
"""
Return the shape of the *expr* as a tuple. *expr* should represent
suitable object such as matrix or array.
Parameters
==========
expr : SymPy object having ``MatrixKind`` or ``ArrayKind``.
Raises
======
NoShapeError : Raised when object with wrong kind is passed.
Examples
========
This function returns the shape of any object representing matrix or array.
>>> from sympy import shape, Array, Matrix, Integral
>>> from sympy.abc import x
>>> A = Array([1, 2])
>>> shape(A)
(2,)
>>> shape(Integral(A, x))
(2,)
>>> M = Matrix([1, 2])
>>> shape(M)
(2, 1)
>>> shape(Integral(M, x))
(2, 1)
You can support new type by dispatching.
>>> from sympy import Expr
>>> class NewExpr(Expr):
... pass
>>> @shape.register(NewExpr)
... def _(expr):
... return shape(expr.args[0])
>>> shape(NewExpr(M))
(2, 1)
If unsuitable expression is passed, ``NoShapeError()`` will be raised.
>>> shape(Integral(x, x))
Traceback (most recent call last):
...
sympy.tensor.functions.NoShapeError: shape() called on non-array object: Integral(x, x)
Notes
=====
Array-like classes (such as ``Matrix`` or ``NDimArray``) has ``shape``
property which returns its shape, but it cannot be used for non-array
classes containing array. This function returns the shape of any
registered object representing array.
"""
if hasattr(expr, "shape"):
return expr.shape
raise NoShapeError(
"%s does not have shape, or its type is not registered to shape()." % expr)
class NoShapeError(Exception):
"""
Raised when ``shape()`` is called on non-array object.
This error can be imported from ``sympy.tensor.functions``.
Examples
========
>>> from sympy import shape
>>> from sympy.abc import x
>>> shape(x)
Traceback (most recent call last):
...
sympy.tensor.functions.NoShapeError: shape() called on non-array object: x
"""
pass
|
560784baa03cb7c462cddd943554eea60faa69d0768c9793094e87438646c45a | r"""Module that defines indexed objects
The classes ``IndexedBase``, ``Indexed``, and ``Idx`` represent a
matrix element ``M[i, j]`` as in the following diagram::
1) The Indexed class represents the entire indexed object.
|
___|___
' '
M[i, j]
/ \__\______
| |
| |
| 2) The Idx class represents indices; each Idx can
| optionally contain information about its range.
|
3) IndexedBase represents the 'stem' of an indexed object, here `M`.
The stem used by itself is usually taken to represent the entire
array.
There can be any number of indices on an Indexed object. No
transformation properties are implemented in these Base objects, but
implicit contraction of repeated indices is supported.
Note that the support for complicated (i.e. non-atomic) integer
expressions as indices is limited. (This should be improved in
future releases.)
Examples
========
To express the above matrix element example you would write:
>>> from sympy import symbols, IndexedBase, Idx
>>> M = IndexedBase('M')
>>> i, j = symbols('i j', cls=Idx)
>>> M[i, j]
M[i, j]
Repeated indices in a product implies a summation, so to express a
matrix-vector product in terms of Indexed objects:
>>> x = IndexedBase('x')
>>> M[i, j]*x[j]
M[i, j]*x[j]
If the indexed objects will be converted to component based arrays, e.g.
with the code printers or the autowrap framework, you also need to provide
(symbolic or numerical) dimensions. This can be done by passing an
optional shape parameter to IndexedBase upon construction:
>>> dim1, dim2 = symbols('dim1 dim2', integer=True)
>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2))
>>> A.shape
(dim1, 2*dim1, dim2)
>>> A[i, j, 3].shape
(dim1, 2*dim1, dim2)
If an IndexedBase object has no shape information, it is assumed that the
array is as large as the ranges of its indices:
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> M[i, j].shape
(m, n)
>>> M[i, j].ranges
[(0, m - 1), (0, n - 1)]
The above can be compared with the following:
>>> A[i, 2, j].shape
(dim1, 2*dim1, dim2)
>>> A[i, 2, j].ranges
[(0, m - 1), None, (0, n - 1)]
To analyze the structure of indexed expressions, you can use the methods
get_indices() and get_contraction_structure():
>>> from sympy.tensor import get_indices, get_contraction_structure
>>> get_indices(A[i, j, j])
({i}, {})
>>> get_contraction_structure(A[i, j, j])
{(j,): {A[i, j, j]}}
See the appropriate docstrings for a detailed explanation of the output.
"""
# TODO: (some ideas for improvement)
#
# o test and guarantee numpy compatibility
# - implement full support for broadcasting
# - strided arrays
#
# o more functions to analyze indexed expressions
# - identify standard constructs, e.g matrix-vector product in a subexpression
#
# o functions to generate component based arrays (numpy and sympy.Matrix)
# - generate a single array directly from Indexed
# - convert simple sub-expressions
#
# o sophisticated indexing (possibly in subclasses to preserve simplicity)
# - Idx with range smaller than dimension of Indexed
# - Idx with stepsize != 1
# - Idx with step determined by function call
from collections.abc import Iterable
from sympy.core.numbers import Number
from sympy.core.assumptions import StdFactKB
from sympy.core import Expr, Tuple, sympify, S
from sympy.core.symbol import _filter_assumptions, Symbol
from sympy.core.logic import fuzzy_bool, fuzzy_not
from sympy.core.sympify import _sympify
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.multipledispatch import dispatch
from sympy.utilities.iterables import is_sequence, NotIterable
from sympy.utilities.misc import filldedent
class IndexException(Exception):
pass
class Indexed(Expr):
"""Represents a mathematical object with indices.
>>> from sympy import Indexed, IndexedBase, Idx, symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j)
A[i, j]
It is recommended that ``Indexed`` objects be created by indexing ``IndexedBase``:
``IndexedBase('A')[i, j]`` instead of ``Indexed(IndexedBase('A'), i, j)``.
>>> A = IndexedBase('A')
>>> a_ij = A[i, j] # Prefer this,
>>> b_ij = Indexed(A, i, j) # over this.
>>> a_ij == b_ij
True
"""
is_commutative = True
is_Indexed = True
is_symbol = True
is_Atom = True
def __new__(cls, base, *args, **kw_args):
from sympy.tensor.array.ndim_array import NDimArray
from sympy.matrices.matrices import MatrixBase
if not args:
raise IndexException("Indexed needs at least one index.")
if isinstance(base, (str, Symbol)):
base = IndexedBase(base)
elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase):
raise TypeError(filldedent("""
The base can only be replaced with a string, Symbol,
IndexedBase or an object with a method for getting
items (i.e. an object with a `__getitem__` method).
"""))
args = list(map(sympify, args))
if isinstance(base, (NDimArray, Iterable, Tuple, MatrixBase)) and all(i.is_number for i in args):
if len(args) == 1:
return base[args[0]]
else:
return base[args]
obj = Expr.__new__(cls, base, *args, **kw_args)
try:
IndexedBase._set_assumptions(obj, base.assumptions0)
except AttributeError:
IndexedBase._set_assumptions(obj, {})
return obj
def _hashable_content(self):
return super()._hashable_content() + tuple(sorted(self.assumptions0.items()))
@property
def name(self):
return str(self)
@property
def _diff_wrt(self):
"""Allow derivatives with respect to an ``Indexed`` object."""
return True
def _eval_derivative(self, wrt):
from sympy.tensor.array.ndim_array import NDimArray
if isinstance(wrt, Indexed) and wrt.base == self.base:
if len(self.indices) != len(wrt.indices):
msg = "Different # of indices: d({!s})/d({!s})".format(self,
wrt)
raise IndexException(msg)
result = S.One
for index1, index2 in zip(self.indices, wrt.indices):
result *= KroneckerDelta(index1, index2)
return result
elif isinstance(self.base, NDimArray):
from sympy.tensor.array import derive_by_array
return Indexed(derive_by_array(self.base, wrt), *self.args[1:])
else:
if Tuple(self.indices).has(wrt):
return S.NaN
return S.Zero
@property
def assumptions0(self):
return {k: v for k, v in self._assumptions.items() if v is not None}
@property
def base(self):
"""Returns the ``IndexedBase`` of the ``Indexed`` object.
Examples
========
>>> from sympy import Indexed, IndexedBase, Idx, symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).base
A
>>> B = IndexedBase('B')
>>> B == B[i, j].base
True
"""
return self.args[0]
@property
def indices(self):
"""
Returns the indices of the ``Indexed`` object.
Examples
========
>>> from sympy import Indexed, Idx, symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).indices
(i, j)
"""
return self.args[1:]
@property
def rank(self):
"""
Returns the rank of the ``Indexed`` object.
Examples
========
>>> from sympy import Indexed, Idx, symbols
>>> i, j, k, l, m = symbols('i:m', cls=Idx)
>>> Indexed('A', i, j).rank
2
>>> q = Indexed('A', i, j, k, l, m)
>>> q.rank
5
>>> q.rank == len(q.indices)
True
"""
return len(self.args) - 1
@property
def shape(self):
"""Returns a list with dimensions of each index.
Dimensions is a property of the array, not of the indices. Still, if
the ``IndexedBase`` does not define a shape attribute, it is assumed
that the ranges of the indices correspond to the shape of the array.
>>> from sympy import IndexedBase, Idx, symbols
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', m)
>>> A = IndexedBase('A', shape=(n, n))
>>> B = IndexedBase('B')
>>> A[i, j].shape
(n, n)
>>> B[i, j].shape
(m, m)
"""
if self.base.shape:
return self.base.shape
sizes = []
for i in self.indices:
upper = getattr(i, 'upper', None)
lower = getattr(i, 'lower', None)
if None in (upper, lower):
raise IndexException(filldedent("""
Range is not defined for all indices in: %s""" % self))
try:
size = upper - lower + 1
except TypeError:
raise IndexException(filldedent("""
Shape cannot be inferred from Idx with
undefined range: %s""" % self))
sizes.append(size)
return Tuple(*sizes)
@property
def ranges(self):
"""Returns a list of tuples with lower and upper range of each index.
If an index does not define the data members upper and lower, the
corresponding slot in the list contains ``None`` instead of a tuple.
Examples
========
>>> from sympy import Indexed,Idx, symbols
>>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges
[(0, 1), (0, 3), (0, 7)]
>>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges
[(0, 2), (0, 2), (0, 2)]
>>> x, y, z = symbols('x y z', integer=True)
>>> Indexed('A', x, y, z).ranges
[None, None, None]
"""
ranges = []
for i in self.indices:
sentinel = object()
upper = getattr(i, 'upper', sentinel)
lower = getattr(i, 'lower', sentinel)
if sentinel not in (upper, lower):
ranges.append(Tuple(lower, upper))
else:
ranges.append(None)
return ranges
def _sympystr(self, p):
indices = list(map(p.doprint, self.indices))
return "%s[%s]" % (p.doprint(self.base), ", ".join(indices))
@property
def free_symbols(self):
base_free_symbols = self.base.free_symbols
indices_free_symbols = {
fs for i in self.indices for fs in i.free_symbols}
if base_free_symbols:
return {self} | base_free_symbols | indices_free_symbols
else:
return indices_free_symbols
@property
def expr_free_symbols(self):
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(feature="expr_free_symbols method",
issue=21494,
deprecated_since_version="1.9").warn()
return {self}
class IndexedBase(Expr, NotIterable):
"""Represent the base or stem of an indexed object
The IndexedBase class represent an array that contains elements. The main purpose
of this class is to allow the convenient creation of objects of the Indexed
class. The __getitem__ method of IndexedBase returns an instance of
Indexed. Alone, without indices, the IndexedBase class can be used as a
notation for e.g. matrix equations, resembling what you could do with the
Symbol class. But, the IndexedBase class adds functionality that is not
available for Symbol instances:
- An IndexedBase object can optionally store shape information. This can
be used in to check array conformance and conditions for numpy
broadcasting. (TODO)
- An IndexedBase object implements syntactic sugar that allows easy symbolic
representation of array operations, using implicit summation of
repeated indices.
- The IndexedBase object symbolizes a mathematical structure equivalent
to arrays, and is recognized as such for code generation and automatic
compilation and wrapping.
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> A = IndexedBase('A'); A
A
>>> type(A)
<class 'sympy.tensor.indexed.IndexedBase'>
When an IndexedBase object receives indices, it returns an array with named
axes, represented by an Indexed object:
>>> i, j = symbols('i j', integer=True)
>>> A[i, j, 2]
A[i, j, 2]
>>> type(A[i, j, 2])
<class 'sympy.tensor.indexed.Indexed'>
The IndexedBase constructor takes an optional shape argument. If given,
it overrides any shape information in the indices. (But not the index
ranges!)
>>> m, n, o, p = symbols('m n o p', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> A[i, j].shape
(m, n)
>>> B = IndexedBase('B', shape=(o, p))
>>> B[i, j].shape
(o, p)
Assumptions can be specified with keyword arguments the same way as for Symbol:
>>> A_real = IndexedBase('A', real=True)
>>> A_real.is_real
True
>>> A != A_real
True
Assumptions can also be inherited if a Symbol is used to initialize the IndexedBase:
>>> I = symbols('I', integer=True)
>>> C_inherit = IndexedBase(I)
>>> C_explicit = IndexedBase('I', integer=True)
>>> C_inherit == C_explicit
True
"""
is_commutative = True
is_symbol = True
is_Atom = True
@staticmethod
def _set_assumptions(obj, assumptions):
"""Set assumptions on obj, making sure to apply consistent values."""
tmp_asm_copy = assumptions.copy()
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
assumptions['commutative'] = is_commutative
obj._assumptions = StdFactKB(assumptions)
obj._assumptions._generator = tmp_asm_copy # Issue #8873
def __new__(cls, label, shape=None, *, offset=S.Zero, strides=None, **kw_args):
from sympy.matrices.matrices import MatrixBase
from sympy.tensor.array.ndim_array import NDimArray
assumptions, kw_args = _filter_assumptions(kw_args)
if isinstance(label, str):
label = Symbol(label, **assumptions)
elif isinstance(label, Symbol):
assumptions = label._merge(assumptions)
elif isinstance(label, (MatrixBase, NDimArray)):
return label
elif isinstance(label, Iterable):
return _sympify(label)
else:
label = _sympify(label)
if is_sequence(shape):
shape = Tuple(*shape)
elif shape is not None:
shape = Tuple(shape)
if shape is not None:
obj = Expr.__new__(cls, label, shape)
else:
obj = Expr.__new__(cls, label)
obj._shape = shape
obj._offset = offset
obj._strides = strides
obj._name = str(label)
IndexedBase._set_assumptions(obj, assumptions)
return obj
@property
def name(self):
return self._name
def _hashable_content(self):
return super()._hashable_content() + tuple(sorted(self.assumptions0.items()))
@property
def assumptions0(self):
return {k: v for k, v in self._assumptions.items() if v is not None}
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return Indexed(self, *indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return Indexed(self, indices, **kw_args)
@property
def shape(self):
"""Returns the shape of the ``IndexedBase`` object.
Examples
========
>>> from sympy import IndexedBase, Idx
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).shape
(x, y)
Note: If the shape of the ``IndexedBase`` is specified, it will override
any shape information given by the indices.
>>> A = IndexedBase('A', shape=(x, y))
>>> B = IndexedBase('B')
>>> i = Idx('i', 2)
>>> j = Idx('j', 1)
>>> A[i, j].shape
(x, y)
>>> B[i, j].shape
(2, 1)
"""
return self._shape
@property
def strides(self):
"""Returns the strided scheme for the ``IndexedBase`` object.
Normally this is a tuple denoting the number of
steps to take in the respective dimension when traversing
an array. For code generation purposes strides='C' and
strides='F' can also be used.
strides='C' would mean that code printer would unroll
in row-major order and 'F' means unroll in column major
order.
"""
return self._strides
@property
def offset(self):
"""Returns the offset for the ``IndexedBase`` object.
This is the value added to the resulting index when the
2D Indexed object is unrolled to a 1D form. Used in code
generation.
Examples
==========
>>> from sympy.printing import ccode
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> l, m, n, o = symbols('l m n o', integer=True)
>>> A = IndexedBase('A', strides=(l, m, n), offset=o)
>>> i, j, k = map(Idx, 'ijk')
>>> ccode(A[i, j, k])
'A[l*i + m*j + n*k + o]'
"""
return self._offset
@property
def label(self):
"""Returns the label of the ``IndexedBase`` object.
Examples
========
>>> from sympy import IndexedBase
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).label
A
"""
return self.args[0]
def _sympystr(self, p):
return p.doprint(self.label)
class Idx(Expr):
"""Represents an integer index as an ``Integer`` or integer expression.
There are a number of ways to create an ``Idx`` object. The constructor
takes two arguments:
``label``
An integer or a symbol that labels the index.
``range``
Optionally you can specify a range as either
* ``Symbol`` or integer: This is interpreted as a dimension. Lower and
upper bounds are set to ``0`` and ``range - 1``, respectively.
* ``tuple``: The two elements are interpreted as the lower and upper
bounds of the range, respectively.
Note: bounds of the range are assumed to be either integer or infinite (oo
and -oo are allowed to specify an unbounded range). If ``n`` is given as a
bound, then ``n.is_integer`` must not return false.
For convenience, if the label is given as a string it is automatically
converted to an integer symbol. (Note: this conversion is not done for
range or dimension arguments.)
Examples
========
>>> from sympy import Idx, symbols, oo
>>> n, i, L, U = symbols('n i L U', integer=True)
If a string is given for the label an integer ``Symbol`` is created and the
bounds are both ``None``:
>>> idx = Idx('qwerty'); idx
qwerty
>>> idx.lower, idx.upper
(None, None)
Both upper and lower bounds can be specified:
>>> idx = Idx(i, (L, U)); idx
i
>>> idx.lower, idx.upper
(L, U)
When only a single bound is given it is interpreted as the dimension
and the lower bound defaults to 0:
>>> idx = Idx(i, n); idx.lower, idx.upper
(0, n - 1)
>>> idx = Idx(i, 4); idx.lower, idx.upper
(0, 3)
>>> idx = Idx(i, oo); idx.lower, idx.upper
(0, oo)
"""
is_integer = True
is_finite = True
is_real = True
is_symbol = True
is_Atom = True
_diff_wrt = True
def __new__(cls, label, range=None, **kw_args):
if isinstance(label, str):
label = Symbol(label, integer=True)
label, range = list(map(sympify, (label, range)))
if label.is_Number:
if not label.is_integer:
raise TypeError("Index is not an integer number.")
return label
if not label.is_integer:
raise TypeError("Idx object requires an integer label.")
elif is_sequence(range):
if len(range) != 2:
raise ValueError(filldedent("""
Idx range tuple must have length 2, but got %s""" % len(range)))
for bound in range:
if (bound.is_integer is False and bound is not S.Infinity
and bound is not S.NegativeInfinity):
raise TypeError("Idx object requires integer bounds.")
args = label, Tuple(*range)
elif isinstance(range, Expr):
if range is not S.Infinity and fuzzy_not(range.is_integer):
raise TypeError("Idx object requires an integer dimension.")
args = label, Tuple(0, range - 1)
elif range:
raise TypeError(filldedent("""
The range must be an ordered iterable or
integer SymPy expression."""))
else:
args = label,
obj = Expr.__new__(cls, *args, **kw_args)
obj._assumptions["finite"] = True
obj._assumptions["real"] = True
return obj
@property
def label(self):
"""Returns the label (Integer or integer expression) of the Idx object.
Examples
========
>>> from sympy import Idx, Symbol
>>> x = Symbol('x', integer=True)
>>> Idx(x).label
x
>>> j = Symbol('j', integer=True)
>>> Idx(j).label
j
>>> Idx(j + 1).label
j + 1
"""
return self.args[0]
@property
def lower(self):
"""Returns the lower bound of the ``Idx``.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).lower
0
>>> Idx('j', 5).lower
0
>>> Idx('j').lower is None
True
"""
try:
return self.args[1][0]
except IndexError:
return
@property
def upper(self):
"""Returns the upper bound of the ``Idx``.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).upper
1
>>> Idx('j', 5).upper
4
>>> Idx('j').upper is None
True
"""
try:
return self.args[1][1]
except IndexError:
return
def _sympystr(self, p):
return p.doprint(self.label)
@property
def name(self):
return self.label.name if self.label.is_Symbol else str(self.label)
@property
def free_symbols(self):
return {self}
@dispatch(Idx, Idx)
def _eval_is_ge(lhs, rhs): # noqa:F811
other_upper = rhs if rhs.upper is None else rhs.upper
other_lower = rhs if rhs.lower is None else rhs.lower
if lhs.lower is not None and (lhs.lower >= other_upper) == True:
return True
if lhs.upper is not None and (lhs.upper < other_lower) == True:
return False
return None
@dispatch(Idx, Number) # type:ignore
def _eval_is_ge(lhs, rhs): # noqa:F811
other_upper = rhs
other_lower = rhs
if lhs.lower is not None and (lhs.lower >= other_upper) == True:
return True
if lhs.upper is not None and (lhs.upper < other_lower) == True:
return False
return None
@dispatch(Number, Idx) # type:ignore
def _eval_is_ge(lhs, rhs): # noqa:F811
other_upper = lhs
other_lower = lhs
if rhs.upper is not None and (rhs.upper <= other_lower) == True:
return True
if rhs.lower is not None and (rhs.lower > other_upper) == True:
return False
return None
|
03455d39a5425c2374dd648d9e63a446239ccafe3e2b2a6511e8882ad76ed197 | from typing import Dict as tDict, Any
import inspect
from .dispatcher import Dispatcher, MethodDispatcher, ambiguity_warn
# XXX: This parameter to dispatch isn't documented and isn't used anywhere in
# sympy. Maybe it should just be removed.
global_namespace = dict() # type: tDict[str, Any]
def dispatch(*types, namespace=global_namespace, on_ambiguity=ambiguity_warn):
""" Dispatch function on the types of the inputs
Supports dispatch on all non-keyword arguments.
Collects implementations based on the function name. Ignores namespaces.
If ambiguous type signatures occur a warning is raised when the function is
defined suggesting the additional method to break the ambiguity.
Examples
--------
>>> from sympy.multipledispatch import dispatch
>>> @dispatch(int)
... def f(x):
... return x + 1
>>> @dispatch(float)
... def f(x): # noqa: F811
... return x - 1
>>> f(3)
4
>>> f(3.0)
2.0
Specify an isolated namespace with the namespace keyword argument
>>> my_namespace = dict()
>>> @dispatch(int, namespace=my_namespace)
... def foo(x):
... return x + 1
Dispatch on instance methods within classes
>>> class MyClass(object):
... @dispatch(list)
... def __init__(self, data):
... self.data = data
... @dispatch(int)
... def __init__(self, datum): # noqa: F811
... self.data = [datum]
"""
types = tuple(types)
def _(func):
name = func.__name__
if ismethod(func):
dispatcher = inspect.currentframe().f_back.f_locals.get(
name,
MethodDispatcher(name))
else:
if name not in namespace:
namespace[name] = Dispatcher(name)
dispatcher = namespace[name]
dispatcher.add(types, func, on_ambiguity=on_ambiguity)
return dispatcher
return _
def ismethod(func):
""" Is func a method?
Note that this has to work as the method is defined but before the class is
defined. At this stage methods look like functions.
"""
signature = inspect.signature(func)
return signature.parameters.get('self', None) is not None
|
0c221eda913ae3429ec74811e05a7d8c48fb894811a34b57f10b13bf9a80965e | from typing import Set as tSet
from warnings import warn
import inspect
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
from .utils import expand_tuples
import itertools as itl
class MDNotImplementedError(NotImplementedError):
""" A NotImplementedError for multiple dispatch """
### Functions for on_ambiguity
def ambiguity_warn(dispatcher, ambiguities):
""" Raise warning when ambiguity is detected
Parameters
----------
dispatcher : Dispatcher
The dispatcher on which the ambiguity was detected
ambiguities : set
Set of type signature pairs that are ambiguous within this dispatcher
See Also:
Dispatcher.add
warning_text
"""
warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning)
class RaiseNotImplementedError:
"""Raise ``NotImplementedError`` when called."""
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def __call__(self, *args, **kwargs):
types = tuple(type(a) for a in args)
raise NotImplementedError(
"Ambiguous signature for %s: <%s>" % (
self.dispatcher.name, str_signature(types)
))
def ambiguity_register_error_ignore_dup(dispatcher, ambiguities):
"""
If super signature for ambiguous types is duplicate types, ignore it.
Else, register instance of ``RaiseNotImplementedError`` for ambiguous types.
Parameters
----------
dispatcher : Dispatcher
The dispatcher on which the ambiguity was detected
ambiguities : set
Set of type signature pairs that are ambiguous within this dispatcher
See Also:
Dispatcher.add
ambiguity_warn
"""
for amb in ambiguities:
signature = tuple(super_signature(amb))
if len(set(signature)) == 1:
continue
dispatcher.add(
signature, RaiseNotImplementedError(dispatcher),
on_ambiguity=ambiguity_register_error_ignore_dup
)
###
_unresolved_dispatchers = set() # type: tSet[Dispatcher]
_resolve = [True]
def halt_ordering():
_resolve[0] = False
def restart_ordering(on_ambiguity=ambiguity_warn):
_resolve[0] = True
while _unresolved_dispatchers:
dispatcher = _unresolved_dispatchers.pop()
dispatcher.reorder(on_ambiguity=on_ambiguity)
class Dispatcher:
""" Dispatch methods based on type signature
Use ``dispatch`` to add implementations
Examples
--------
>>> from sympy.multipledispatch import dispatch
>>> @dispatch(int)
... def f(x):
... return x + 1
>>> @dispatch(float)
... def f(x): # noqa: F811
... return x - 1
>>> f(3)
4
>>> f(3.0)
2.0
"""
__slots__ = '__name__', 'name', 'funcs', 'ordering', '_cache', 'doc'
def __init__(self, name, doc=None):
self.name = self.__name__ = name
self.funcs = dict()
self._cache = dict()
self.ordering = []
self.doc = doc
def register(self, *types, **kwargs):
""" Register dispatcher with new implementation
>>> from sympy.multipledispatch.dispatcher import Dispatcher
>>> f = Dispatcher('f')
>>> @f.register(int)
... def inc(x):
... return x + 1
>>> @f.register(float)
... def dec(x):
... return x - 1
>>> @f.register(list)
... @f.register(tuple)
... def reverse(x):
... return x[::-1]
>>> f(1)
2
>>> f(1.0)
0.0
>>> f([1, 2, 3])
[3, 2, 1]
"""
def _(func):
self.add(types, func, **kwargs)
return func
return _
@classmethod
def get_func_params(cls, func):
if hasattr(inspect, "signature"):
sig = inspect.signature(func)
return sig.parameters.values()
@classmethod
def get_func_annotations(cls, func):
""" Get annotations of function positional parameters
"""
params = cls.get_func_params(func)
if params:
Parameter = inspect.Parameter
params = (param for param in params
if param.kind in
(Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD))
annotations = tuple(
param.annotation
for param in params)
if not any(ann is Parameter.empty for ann in annotations):
return annotations
def add(self, signature, func, on_ambiguity=ambiguity_warn):
""" Add new types/method pair to dispatcher
>>> from sympy.multipledispatch import Dispatcher
>>> D = Dispatcher('add')
>>> D.add((int, int), lambda x, y: x + y)
>>> D.add((float, float), lambda x, y: x + y)
>>> D(1, 2)
3
>>> D(1, 2.0)
Traceback (most recent call last):
...
NotImplementedError: Could not find signature for add: <int, float>
When ``add`` detects a warning it calls the ``on_ambiguity`` callback
with a dispatcher/itself, and a set of ambiguous type signature pairs
as inputs. See ``ambiguity_warn`` for an example.
"""
# Handle annotations
if not signature:
annotations = self.get_func_annotations(func)
if annotations:
signature = annotations
# Handle union types
if any(isinstance(typ, tuple) for typ in signature):
for typs in expand_tuples(signature):
self.add(typs, func, on_ambiguity)
return
for typ in signature:
if not isinstance(typ, type):
str_sig = ', '.join(c.__name__ if isinstance(c, type)
else str(c) for c in signature)
raise TypeError("Tried to dispatch on non-type: %s\n"
"In signature: <%s>\n"
"In function: %s" %
(typ, str_sig, self.name))
self.funcs[signature] = func
self.reorder(on_ambiguity=on_ambiguity)
self._cache.clear()
def reorder(self, on_ambiguity=ambiguity_warn):
if _resolve[0]:
self.ordering = ordering(self.funcs)
amb = ambiguities(self.funcs)
if amb:
on_ambiguity(self, amb)
else:
_unresolved_dispatchers.add(self)
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
try:
func = self._cache[types]
except KeyError:
func = self.dispatch(*types)
if not func:
raise NotImplementedError(
'Could not find signature for %s: <%s>' %
(self.name, str_signature(types)))
self._cache[types] = func
try:
return func(*args, **kwargs)
except MDNotImplementedError:
funcs = self.dispatch_iter(*types)
next(funcs) # burn first
for func in funcs:
try:
return func(*args, **kwargs)
except MDNotImplementedError:
pass
raise NotImplementedError("Matching functions for "
"%s: <%s> found, but none completed successfully"
% (self.name, str_signature(types)))
def __str__(self):
return "<dispatched %s>" % self.name
__repr__ = __str__
def dispatch(self, *types):
""" Deterimine appropriate implementation for this type signature
This method is internal. Users should call this object as a function.
Implementation resolution occurs within the ``__call__`` method.
>>> from sympy.multipledispatch import dispatch
>>> @dispatch(int)
... def inc(x):
... return x + 1
>>> implementation = inc.dispatch(int)
>>> implementation(3)
4
>>> print(inc.dispatch(float))
None
See Also:
``sympy.multipledispatch.conflict`` - module to determine resolution order
"""
if types in self.funcs:
return self.funcs[types]
try:
return next(self.dispatch_iter(*types))
except StopIteration:
return None
def dispatch_iter(self, *types):
n = len(types)
for signature in self.ordering:
if len(signature) == n and all(map(issubclass, types, signature)):
result = self.funcs[signature]
yield result
def resolve(self, types):
""" Deterimine appropriate implementation for this type signature
.. deprecated:: 0.4.4
Use ``dispatch(*types)`` instead
"""
warn("resolve() is deprecated, use dispatch(*types)",
DeprecationWarning)
return self.dispatch(*types)
def __getstate__(self):
return {'name': self.name,
'funcs': self.funcs}
def __setstate__(self, d):
self.name = d['name']
self.funcs = d['funcs']
self.ordering = ordering(self.funcs)
self._cache = dict()
@property
def __doc__(self):
docs = ["Multiply dispatched method: %s" % self.name]
if self.doc:
docs.append(self.doc)
other = []
for sig in self.ordering[::-1]:
func = self.funcs[sig]
if func.__doc__:
s = 'Inputs: <%s>\n' % str_signature(sig)
s += '-' * len(s) + '\n'
s += func.__doc__.strip()
docs.append(s)
else:
other.append(str_signature(sig))
if other:
docs.append('Other signatures:\n ' + '\n '.join(other))
return '\n\n'.join(docs)
def _help(self, *args):
return self.dispatch(*map(type, args)).__doc__
def help(self, *args, **kwargs):
""" Print docstring for the function corresponding to inputs """
print(self._help(*args))
def _source(self, *args):
func = self.dispatch(*map(type, args))
if not func:
raise TypeError("No function found")
return source(func)
def source(self, *args, **kwargs):
""" Print source code for the function corresponding to inputs """
print(self._source(*args))
def source(func):
s = 'File: %s\n\n' % inspect.getsourcefile(func)
s = s + inspect.getsource(func)
return s
class MethodDispatcher(Dispatcher):
""" Dispatch methods based on type signature
See Also:
Dispatcher
"""
@classmethod
def get_func_params(cls, func):
if hasattr(inspect, "signature"):
sig = inspect.signature(func)
return itl.islice(sig.parameters.values(), 1, None)
def __get__(self, instance, owner):
self.obj = instance
self.cls = owner
return self
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
func = self.dispatch(*types)
if not func:
raise NotImplementedError('Could not find signature for %s: <%s>' %
(self.name, str_signature(types)))
return func(self.obj, *args, **kwargs)
def str_signature(sig):
""" String representation of type signature
>>> from sympy.multipledispatch.dispatcher import str_signature
>>> str_signature((int, float))
'int, float'
"""
return ', '.join(cls.__name__ for cls in sig)
def warning_text(name, amb):
""" The text for ambiguity warnings """
text = "\nAmbiguities exist in dispatched function %s\n\n" % (name)
text += "The following signatures may result in ambiguous behavior:\n"
for pair in amb:
text += "\t" + \
', '.join('[' + str_signature(s) + ']' for s in pair) + "\n"
text += "\n\nConsider making the following additions:\n\n"
text += '\n\n'.join(['@dispatch(' + str_signature(super_signature(s))
+ ')\ndef %s(...)' % name for s in amb])
return text
|
92cb6c7a6482782aadefaff4c8c9748ef2c50bd1452d87f9a5765092c7ecd1b7 | """
Boolean algebra module for SymPy
"""
from collections import defaultdict
from itertools import chain, combinations, product
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core.decorators import sympify_method_args, sympify_return
from sympy.core.function import Application, Derivative
from sympy.core.kind import NumberKind
from sympy.core.numbers import Number
from sympy.core.operations import LatticeOp
from sympy.core.singleton import Singleton, S
from sympy.core.sorting import ordered
from sympy.core.sympify import converter, _sympify, sympify
from sympy.core.kind import BooleanKind
from sympy.utilities.iterables import sift, ibin
from sympy.utilities.misc import filldedent
def as_Boolean(e):
"""Like ``bool``, return the Boolean value of an expression, e,
which can be any instance of :py:class:`~.Boolean` or ``bool``.
Examples
========
>>> from sympy import true, false, nan
>>> from sympy.logic.boolalg import as_Boolean
>>> from sympy.abc import x
>>> as_Boolean(0) is false
True
>>> as_Boolean(1) is true
True
>>> as_Boolean(x)
x
>>> as_Boolean(2)
Traceback (most recent call last):
...
TypeError: expecting bool or Boolean, not `2`.
>>> as_Boolean(nan)
Traceback (most recent call last):
...
TypeError: expecting bool or Boolean, not `nan`.
"""
from sympy.core.symbol import Symbol
if e == True:
return S.true
if e == False:
return S.false
if isinstance(e, Symbol):
z = e.is_zero
if z is None:
return e
return S.false if z else S.true
if isinstance(e, Boolean):
return e
raise TypeError('expecting bool or Boolean, not `%s`.' % e)
@sympify_method_args
class Boolean(Basic):
"""A Boolean object is an object for which logic operations make sense."""
__slots__ = ()
kind = BooleanKind
@sympify_return([('other', 'Boolean')], NotImplemented)
def __and__(self, other):
return And(self, other)
__rand__ = __and__
@sympify_return([('other', 'Boolean')], NotImplemented)
def __or__(self, other):
return Or(self, other)
__ror__ = __or__
def __invert__(self):
"""Overloading for ~"""
return Not(self)
@sympify_return([('other', 'Boolean')], NotImplemented)
def __rshift__(self, other):
return Implies(self, other)
@sympify_return([('other', 'Boolean')], NotImplemented)
def __lshift__(self, other):
return Implies(other, self)
__rrshift__ = __lshift__
__rlshift__ = __rshift__
@sympify_return([('other', 'Boolean')], NotImplemented)
def __xor__(self, other):
return Xor(self, other)
__rxor__ = __xor__
def equals(self, other):
"""
Returns ``True`` if the given formulas have the same truth table.
For two formulas to be equal they must have the same literals.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.boolalg import And, Or, Not
>>> (A >> B).equals(~B >> ~A)
True
>>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C)))
False
>>> Not(And(A, Not(A))).equals(Or(B, Not(B)))
False
"""
from sympy.logic.inference import satisfiable
from sympy.core.relational import Relational
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
not satisfiable(Not(Equivalent(self, other)))
def to_nnf(self, simplify=True):
# override where necessary
return self
def as_set(self):
"""
Rewrites Boolean expression in terms of real sets.
Examples
========
>>> from sympy import Symbol, Eq, Or, And
>>> x = Symbol('x', real=True)
>>> Eq(x, 0).as_set()
{0}
>>> (x > 0).as_set()
Interval.open(0, oo)
>>> And(-2 < x, x < 2).as_set()
Interval.open(-2, 2)
>>> Or(x < -2, 2 < x).as_set()
Union(Interval.open(-oo, -2), Interval.open(2, oo))
"""
from sympy.calculus.util import periodicity
from sympy.core.relational import Relational
free = self.free_symbols
if len(free) == 1:
x = free.pop()
if x.kind is NumberKind:
reps = {}
for r in self.atoms(Relational):
if periodicity(r, x) not in (0, None):
s = r._eval_as_set()
if s in (S.EmptySet, S.UniversalSet, S.Reals):
reps[r] = s.as_relational(x)
continue
raise NotImplementedError(filldedent('''
as_set is not implemented for relationals
with periodic solutions
'''))
new = self.subs(reps)
if new.func != self.func:
return new.as_set() # restart with new obj
else:
return new._eval_as_set()
return self._eval_as_set()
else:
raise NotImplementedError("Sorry, as_set has not yet been"
" implemented for multivariate"
" expressions")
@property
def binary_symbols(self):
from sympy.core.relational import Eq, Ne
return set().union(*[i.binary_symbols for i in self.args
if i.is_Boolean or i.is_Symbol
or isinstance(i, (Eq, Ne))])
def _eval_refine(self, assumptions):
from sympy.assumptions import ask
ret = ask(self, assumptions)
if ret is True:
return true
elif ret is False:
return false
return None
class BooleanAtom(Boolean):
"""
Base class of :py:class:`~.BooleanTrue` and :py:class:`~.BooleanFalse`.
"""
is_Boolean = True
is_Atom = True
_op_priority = 11 # higher than Expr
def simplify(self, *a, **kw):
return self
def expand(self, *a, **kw):
return self
@property
def canonical(self):
return self
def _noop(self, other=None):
raise TypeError('BooleanAtom not allowed in this context.')
__add__ = _noop
__radd__ = _noop
__sub__ = _noop
__rsub__ = _noop
__mul__ = _noop
__rmul__ = _noop
__pow__ = _noop
__rpow__ = _noop
__truediv__ = _noop
__rtruediv__ = _noop
__mod__ = _noop
__rmod__ = _noop
_eval_power = _noop
# /// drop when Py2 is no longer supported
def __lt__(self, other):
raise TypeError(filldedent('''
A Boolean argument can only be used in
Eq and Ne; all other relationals expect
real expressions.
'''))
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
# \\\
class BooleanTrue(BooleanAtom, metaclass=Singleton):
"""
SymPy version of ``True``, a singleton that can be accessed via ``S.true``.
This is the SymPy version of ``True``, for use in the logic module. The
primary advantage of using ``true`` instead of ``True`` is that shorthand Boolean
operations like ``~`` and ``>>`` will work as expected on this class, whereas with
True they act bitwise on 1. Functions in the logic module will return this
class when they evaluate to true.
Notes
=====
There is liable to be some confusion as to when ``True`` should
be used and when ``S.true`` should be used in various contexts
throughout SymPy. An important thing to remember is that
``sympify(True)`` returns ``S.true``. This means that for the most
part, you can just use ``True`` and it will automatically be converted
to ``S.true`` when necessary, similar to how you can generally use 1
instead of ``S.One``.
The rule of thumb is:
"If the boolean in question can be replaced by an arbitrary symbolic
``Boolean``, like ``Or(x, y)`` or ``x > 1``, use ``S.true``.
Otherwise, use ``True``"
In other words, use ``S.true`` only on those contexts where the
boolean is being used as a symbolic representation of truth.
For example, if the object ends up in the ``.args`` of any expression,
then it must necessarily be ``S.true`` instead of ``True``, as
elements of ``.args`` must be ``Basic``. On the other hand,
``==`` is not a symbolic operation in SymPy, since it always returns
``True`` or ``False``, and does so in terms of structural equality
rather than mathematical, so it should return ``True``. The assumptions
system should use ``True`` and ``False``. Aside from not satisfying
the above rule of thumb, the assumptions system uses a three-valued logic
(``True``, ``False``, ``None``), whereas ``S.true`` and ``S.false``
represent a two-valued logic. When in doubt, use ``True``.
"``S.true == True is True``."
While "``S.true is True``" is ``False``, "``S.true == True``"
is ``True``, so if there is any doubt over whether a function or
expression will return ``S.true`` or ``True``, just use ``==``
instead of ``is`` to do the comparison, and it will work in either
case. Finally, for boolean flags, it's better to just use ``if x``
instead of ``if x is True``. To quote PEP 8:
Don't compare boolean values to ``True`` or ``False``
using ``==``.
* Yes: ``if greeting:``
* No: ``if greeting == True:``
* Worse: ``if greeting is True:``
Examples
========
>>> from sympy import sympify, true, false, Or
>>> sympify(True)
True
>>> _ is True, _ is true
(False, True)
>>> Or(true, false)
True
>>> _ is true
True
Python operators give a boolean result for true but a
bitwise result for True
>>> ~true, ~True
(False, -2)
>>> true >> true, True >> True
(True, 0)
Python operators give a boolean result for true but a
bitwise result for True
>>> ~true, ~True
(False, -2)
>>> true >> true, True >> True
(True, 0)
See Also
========
sympy.logic.boolalg.BooleanFalse
"""
def __bool__(self):
return True
def __hash__(self):
return hash(True)
@property
def negated(self):
return S.false
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import true
>>> true.as_set()
UniversalSet
"""
return S.UniversalSet
class BooleanFalse(BooleanAtom, metaclass=Singleton):
"""
SymPy version of ``False``, a singleton that can be accessed via ``S.false``.
This is the SymPy version of ``False``, for use in the logic module. The
primary advantage of using ``false`` instead of ``False`` is that shorthand
Boolean operations like ``~`` and ``>>`` will work as expected on this class,
whereas with ``False`` they act bitwise on 0. Functions in the logic module
will return this class when they evaluate to false.
Notes
======
See the notes section in :py:class:`sympy.logic.boolalg.BooleanTrue`
Examples
========
>>> from sympy import sympify, true, false, Or
>>> sympify(False)
False
>>> _ is False, _ is false
(False, True)
>>> Or(true, false)
True
>>> _ is true
True
Python operators give a boolean result for false but a
bitwise result for False
>>> ~false, ~False
(True, -1)
>>> false >> false, False >> False
(True, 0)
See Also
========
sympy.logic.boolalg.BooleanTrue
"""
def __bool__(self):
return False
def __hash__(self):
return hash(False)
@property
def negated(self):
return S.true
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import false
>>> false.as_set()
EmptySet
"""
return S.EmptySet
true = BooleanTrue()
false = BooleanFalse()
# We want S.true and S.false to work, rather than S.BooleanTrue and
# S.BooleanFalse, but making the class and instance names the same causes some
# major issues (like the inability to import the class directly from this
# file).
S.true = true
S.false = false
converter[bool] = lambda x: S.true if x else S.false
class BooleanFunction(Application, Boolean):
"""Boolean function is a function that lives in a boolean space
It is used as base class for :py:class:`~.And`, :py:class:`~.Or`,
:py:class:`~.Not`, etc.
"""
is_Boolean = True
def _eval_simplify(self, **kwargs):
rv = simplify_univariate(self)
if not isinstance(rv, BooleanFunction):
return rv.simplify(**kwargs)
rv = rv.func(*[a.simplify(**kwargs) for a in rv.args])
return simplify_logic(rv)
def simplify(self, **kwargs):
from sympy.simplify.simplify import simplify
return simplify(self, **kwargs)
def __lt__(self, other):
raise TypeError(filldedent('''
A Boolean argument can only be used in
Eq and Ne; all other relationals expect
real expressions.
'''))
__le__ = __lt__
__ge__ = __lt__
__gt__ = __lt__
@classmethod
def binary_check_and_simplify(self, *args):
from sympy.core.relational import Relational, Eq, Ne
args = [as_Boolean(i) for i in args]
bin_syms = set().union(*[i.binary_symbols for i in args])
rel = set().union(*[i.atoms(Relational) for i in args])
reps = {}
for x in bin_syms:
for r in rel:
if x in bin_syms and x in r.free_symbols:
if isinstance(r, (Eq, Ne)):
if not (
S.true in r.args or
S.false in r.args):
reps[r] = S.false
else:
raise TypeError(filldedent('''
Incompatible use of binary symbol `%s` as a
real variable in `%s`
''' % (x, r)))
return [i.subs(reps) for i in args]
def to_nnf(self, simplify=True):
return self._to_nnf(*self.args, simplify=simplify)
def to_anf(self, deep=True):
return self._to_anf(*self.args, deep=deep)
@classmethod
def _to_nnf(cls, *args, **kwargs):
simplify = kwargs.get('simplify', True)
argset = set()
for arg in args:
if not is_literal(arg):
arg = arg.to_nnf(simplify)
if simplify:
if isinstance(arg, cls):
arg = arg.args
else:
arg = (arg,)
for a in arg:
if Not(a) in argset:
return cls.zero
argset.add(a)
else:
argset.add(arg)
return cls(*argset)
@classmethod
def _to_anf(cls, *args, **kwargs):
deep = kwargs.get('deep', True)
argset = set()
for arg in args:
if deep:
if not is_literal(arg) or isinstance(arg, Not):
arg = arg.to_anf(deep=deep)
argset.add(arg)
else:
argset.add(arg)
return cls(*argset, remove_true=False)
# the diff method below is copied from Expr class
def diff(self, *symbols, **assumptions):
assumptions.setdefault("evaluate", True)
return Derivative(self, *symbols, **assumptions)
def _eval_derivative(self, x):
if x in self.binary_symbols:
from sympy.core.relational import Eq
from sympy.functions.elementary.piecewise import Piecewise
return Piecewise(
(0, Eq(self.subs(x, 0), self.subs(x, 1))),
(1, True))
elif x in self.free_symbols:
# not implemented, see https://www.encyclopediaofmath.org/
# index.php/Boolean_differential_calculus
pass
else:
return S.Zero
def _apply_patternbased_simplification(self, rv, patterns, measure,
dominatingvalue,
replacementvalue=None):
"""
Replace patterns of Relational
Parameters
==========
rv : Expr
Boolean expression
patterns : tuple
Tuple of tuples, with (pattern to simplify, simplified pattern).
measure : function
Simplification measure.
dominatingvalue : Boolean or ``None``
The dominating value for the function of consideration.
For example, for :py:class:`~.And` ``S.false`` is dominating.
As soon as one expression is ``S.false`` in :py:class:`~.And`,
the whole expression is ``S.false``.
replacementvalue : Boolean or ``None``, optional
The resulting value for the whole expression if one argument
evaluates to ``dominatingvalue``.
For example, for :py:class:`~.Nand` ``S.false`` is dominating, but
in this case the resulting value is ``S.true``. Default is ``None``.
If ``replacementvalue`` is ``None`` and ``dominatingvalue`` is not
``None``, ``replacementvalue = dominatingvalue``.
"""
from sympy.core.relational import Relational, _canonical
from sympy.functions.elementary.miscellaneous import Min, Max
if replacementvalue is None and dominatingvalue is not None:
replacementvalue = dominatingvalue
# Use replacement patterns for Relationals
changed = True
Rel, nonRel = sift(rv.args, lambda i: isinstance(i, Relational),
binary=True)
if len(Rel) <= 1:
return rv
Rel, nonRealRel = sift(Rel, lambda i: not any(s.is_real is False
for s in i.free_symbols),
binary=True)
Rel = [i.canonical for i in Rel]
while changed and len(Rel) >= 2:
changed = False
# Sort based on ordered
Rel = list(ordered(Rel))
# Create a list of possible replacements
results = []
# Try all combinations
for ((i, pi), (j, pj)) in combinations(enumerate(Rel), 2):
for pattern, simp in patterns:
res = []
# use SymPy matching
oldexpr = rv.func(pi, pj)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
# Try reversing first relational
# This and the rest should not be required with a better
# canonical
oldexpr = rv.func(pi.reversed, pj)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
# Try reversing second relational
oldexpr = rv.func(pi, pj.reversed)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
# Try reversing both relationals
oldexpr = rv.func(pi.reversed, pj.reversed)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
if res:
for tmpres, oldexpr in res:
# we have a matching, compute replacement
np = simp.subs(tmpres)
if np == dominatingvalue:
# if dominatingvalue, the whole expression
# will be replacementvalue
return replacementvalue
# add replacement
if not isinstance(np, ITE) and not np.has(Min, Max):
# We only want to use ITE and Min/Max
# replacements if they simplify away
costsaving = measure(oldexpr) - measure(np)
if costsaving > 0:
results.append((costsaving, (i, j, np)))
if results:
# Sort results based on complexity
results = list(reversed(sorted(results,
key=lambda pair: pair[0])))
# Replace the one providing most simplification
replacement = results[0][1]
i, j, newrel = replacement
# Remove the old relationals
del Rel[j]
del Rel[i]
if dominatingvalue is None or newrel != ~dominatingvalue:
# Insert the new one (no need to insert a value that will
# not affect the result)
Rel.append(newrel)
# We did change something so try again
changed = True
rv = rv.func(*([_canonical(i) for i in ordered(Rel)]
+ nonRel + nonRealRel))
return rv
class And(LatticeOp, BooleanFunction):
"""
Logical AND function.
It evaluates its arguments in order, returning false immediately
when an argument is false and true if they are all true.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.logic.boolalg import And
>>> x & y
x & y
Notes
=====
The ``&`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
and. Hence, ``And(a, b)`` and ``a & b`` will return different things if
``a`` and ``b`` are integers.
>>> And(x, y).subs(x, 1)
y
"""
zero = false
identity = true
nargs = None
@classmethod
def _new_args_filter(cls, args):
args = BooleanFunction.binary_check_and_simplify(*args)
args = LatticeOp._new_args_filter(args, And)
newargs = []
rel = set()
for x in ordered(args):
if x.is_Relational:
c = x.canonical
if c in rel:
continue
elif c.negated.canonical in rel:
return [S.false]
else:
rel.add(c)
newargs.append(x)
return newargs
def _eval_subs(self, old, new):
args = []
bad = None
for i in self.args:
try:
i = i.subs(old, new)
except TypeError:
# store TypeError
if bad is None:
bad = i
continue
if i == False:
return S.false
elif i != True:
args.append(i)
if bad is not None:
# let it raise
bad.subs(old, new)
# If old is And, replace the parts of the arguments with new if all
# are there
if isinstance(old, And):
old_set = set(old.args)
if old_set.issubset(args):
args = set(args) - old_set
args.add(new)
return self.func(*args)
def _eval_simplify(self, **kwargs):
from sympy.core.relational import Equality, Relational
from sympy.solvers.solveset import linear_coeffs
# standard simplify
rv = super()._eval_simplify(**kwargs)
if not isinstance(rv, And):
return rv
# simplify args that are equalities involving
# symbols so x == 0 & x == y -> x==0 & y == 0
Rel, nonRel = sift(rv.args, lambda i: isinstance(i, Relational),
binary=True)
if not Rel:
return rv
eqs, other = sift(Rel, lambda i: isinstance(i, Equality), binary=True)
measure = kwargs['measure']
if eqs:
ratio = kwargs['ratio']
reps = {}
sifted = {}
# group by length of free symbols
sifted = sift(ordered([
(i.free_symbols, i) for i in eqs]),
lambda x: len(x[0]))
eqs = []
nonlineqs = []
while 1 in sifted:
for free, e in sifted.pop(1):
x = free.pop()
if (e.lhs != x or x in e.rhs.free_symbols) and x not in reps:
try:
m, b = linear_coeffs(
e.rewrite(Add, evaluate=False), x)
enew = e.func(x, -b/m)
if measure(enew) <= ratio*measure(e):
e = enew
else:
eqs.append(e)
continue
except ValueError:
pass
if x in reps:
eqs.append(e.subs(x, reps[x]))
elif e.lhs == x and x not in e.rhs.free_symbols:
reps[x] = e.rhs
eqs.append(e)
else:
# x is not yet identified, but may be later
nonlineqs.append(e)
resifted = defaultdict(list)
for k in sifted:
for f, e in sifted[k]:
e = e.xreplace(reps)
f = e.free_symbols
resifted[len(f)].append((f, e))
sifted = resifted
for k in sifted:
eqs.extend([e for f, e in sifted[k]])
nonlineqs = [ei.subs(reps) for ei in nonlineqs]
other = [ei.subs(reps) for ei in other]
rv = rv.func(*([i.canonical for i in (eqs + nonlineqs + other)] + nonRel))
patterns = simplify_patterns_and()
return self._apply_patternbased_simplification(rv, patterns,
measure, False)
def _eval_as_set(self):
from sympy.sets.sets import Intersection
return Intersection(*[arg.as_set() for arg in self.args])
def _eval_rewrite_as_Nor(self, *args, **kwargs):
return Nor(*[Not(arg) for arg in self.args])
def to_anf(self, deep=True):
if deep:
result = And._to_anf(*self.args, deep=deep)
return distribute_xor_over_and(result)
return self
class Or(LatticeOp, BooleanFunction):
"""
Logical OR function
It evaluates its arguments in order, returning true immediately
when an argument is true, and false if they are all false.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.logic.boolalg import Or
>>> x | y
x | y
Notes
=====
The ``|`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
or. Hence, ``Or(a, b)`` and ``a | b`` will return different things if
``a`` and ``b`` are integers.
>>> Or(x, y).subs(x, 0)
y
"""
zero = true
identity = false
@classmethod
def _new_args_filter(cls, args):
newargs = []
rel = []
args = BooleanFunction.binary_check_and_simplify(*args)
for x in args:
if x.is_Relational:
c = x.canonical
if c in rel:
continue
nc = c.negated.canonical
if any(r == nc for r in rel):
return [S.true]
rel.append(c)
newargs.append(x)
return LatticeOp._new_args_filter(newargs, Or)
def _eval_subs(self, old, new):
args = []
bad = None
for i in self.args:
try:
i = i.subs(old, new)
except TypeError:
# store TypeError
if bad is None:
bad = i
continue
if i == True:
return S.true
elif i != False:
args.append(i)
if bad is not None:
# let it raise
bad.subs(old, new)
# If old is Or, replace the parts of the arguments with new if all
# are there
if isinstance(old, Or):
old_set = set(old.args)
if old_set.issubset(args):
args = set(args) - old_set
args.add(new)
return self.func(*args)
def _eval_as_set(self):
from sympy.sets.sets import Union
return Union(*[arg.as_set() for arg in self.args])
def _eval_rewrite_as_Nand(self, *args, **kwargs):
return Nand(*[Not(arg) for arg in self.args])
def _eval_simplify(self, **kwargs):
from sympy.core.relational import Le, Ge, Eq
lege = self.atoms(Le, Ge)
if lege:
reps = {i: self.func(
Eq(i.lhs, i.rhs), i.strict) for i in lege}
return self.xreplace(reps)._eval_simplify(**kwargs)
# standard simplify
rv = super()._eval_simplify(**kwargs)
if not isinstance(rv, Or):
return rv
patterns = simplify_patterns_or()
return self._apply_patternbased_simplification(rv, patterns,
kwargs['measure'], S.true)
def to_anf(self, deep=True):
args = range(1, len(self.args) + 1)
args = (combinations(self.args, j) for j in args)
args = chain.from_iterable(args) # powerset
args = (And(*arg) for arg in args)
args = map(lambda x: to_anf(x, deep=deep) if deep else x, args)
return Xor(*list(args), remove_true=False)
class Not(BooleanFunction):
"""
Logical Not function (negation)
Returns ``true`` if the statement is ``false`` or ``False``.
Returns ``false`` if the statement is ``true`` or ``True``.
Examples
========
>>> from sympy.logic.boolalg import Not, And, Or
>>> from sympy.abc import x, A, B
>>> Not(True)
False
>>> Not(False)
True
>>> Not(And(True, False))
True
>>> Not(Or(True, False))
False
>>> Not(And(And(True, x), Or(x, False)))
~x
>>> ~x
~x
>>> Not(And(Or(A, B), Or(~A, ~B)))
~((A | B) & (~A | ~B))
Notes
=====
- The ``~`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
not. In particular, ``~a`` and ``Not(a)`` will be different if ``a`` is
an integer. Furthermore, since bools in Python subclass from ``int``,
``~True`` is the same as ``~1`` which is ``-2``, which has a boolean
value of True. To avoid this issue, use the SymPy boolean types
``true`` and ``false``.
>>> from sympy import true
>>> ~True
-2
>>> ~true
False
"""
is_Not = True
@classmethod
def eval(cls, arg):
if isinstance(arg, Number) or arg in (True, False):
return false if arg else true
if arg.is_Not:
return arg.args[0]
# Simplify Relational objects.
if arg.is_Relational:
return arg.negated
def _eval_as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import Not, Symbol
>>> x = Symbol('x')
>>> Not(x > 0).as_set()
Interval(-oo, 0)
"""
return self.args[0].as_set().complement(S.Reals)
def to_nnf(self, simplify=True):
if is_literal(self):
return self
expr = self.args[0]
func, args = expr.func, expr.args
if func == And:
return Or._to_nnf(*[~arg for arg in args], simplify=simplify)
if func == Or:
return And._to_nnf(*[~arg for arg in args], simplify=simplify)
if func == Implies:
a, b = args
return And._to_nnf(a, ~b, simplify=simplify)
if func == Equivalent:
return And._to_nnf(Or(*args), Or(*[~arg for arg in args]),
simplify=simplify)
if func == Xor:
result = []
for i in range(1, len(args)+1, 2):
for neg in combinations(args, i):
clause = [~s if s in neg else s for s in args]
result.append(Or(*clause))
return And._to_nnf(*result, simplify=simplify)
if func == ITE:
a, b, c = args
return And._to_nnf(Or(a, ~c), Or(~a, ~b), simplify=simplify)
raise ValueError("Illegal operator %s in expression" % func)
def to_anf(self, deep=True):
return Xor._to_anf(true, self.args[0], deep=deep)
class Xor(BooleanFunction):
"""
Logical XOR (exclusive OR) function.
Returns True if an odd number of the arguments are True and the rest are
False.
Returns False if an even number of the arguments are True and the rest are
False.
Examples
========
>>> from sympy.logic.boolalg import Xor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Xor(True, False)
True
>>> Xor(True, True)
False
>>> Xor(True, False, True, True, False)
True
>>> Xor(True, False, True, False)
False
>>> x ^ y
x ^ y
Notes
=====
The ``^`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise xor. In
particular, ``a ^ b`` and ``Xor(a, b)`` will be different if ``a`` and
``b`` are integers.
>>> Xor(x, y).subs(y, 0)
x
"""
def __new__(cls, *args, remove_true=True, **kwargs):
argset = set()
obj = super().__new__(cls, *args, **kwargs)
for arg in obj._args:
if isinstance(arg, Number) or arg in (True, False):
if arg:
arg = true
else:
continue
if isinstance(arg, Xor):
for a in arg.args:
argset.remove(a) if a in argset else argset.add(a)
elif arg in argset:
argset.remove(arg)
else:
argset.add(arg)
rel = [(r, r.canonical, r.negated.canonical)
for r in argset if r.is_Relational]
odd = False # is number of complimentary pairs odd? start 0 -> False
remove = []
for i, (r, c, nc) in enumerate(rel):
for j in range(i + 1, len(rel)):
rj, cj = rel[j][:2]
if cj == nc:
odd = ~odd
break
elif cj == c:
break
else:
continue
remove.append((r, rj))
if odd:
argset.remove(true) if true in argset else argset.add(true)
for a, b in remove:
argset.remove(a)
argset.remove(b)
if len(argset) == 0:
return false
elif len(argset) == 1:
return argset.pop()
elif True in argset and remove_true:
argset.remove(True)
return Not(Xor(*argset))
else:
obj._args = tuple(ordered(argset))
obj._argset = frozenset(argset)
return obj
# XXX: This should be cached on the object rather than using cacheit
# Maybe it can be computed in __new__?
@property # type: ignore
@cacheit
def args(self):
return tuple(ordered(self._argset))
def to_nnf(self, simplify=True):
args = []
for i in range(0, len(self.args)+1, 2):
for neg in combinations(self.args, i):
clause = [~s if s in neg else s for s in self.args]
args.append(Or(*clause))
return And._to_nnf(*args, simplify=simplify)
def _eval_rewrite_as_Or(self, *args, **kwargs):
a = self.args
return Or(*[_convert_to_varsSOP(x, self.args)
for x in _get_odd_parity_terms(len(a))])
def _eval_rewrite_as_And(self, *args, **kwargs):
a = self.args
return And(*[_convert_to_varsPOS(x, self.args)
for x in _get_even_parity_terms(len(a))])
def _eval_simplify(self, **kwargs):
# as standard simplify uses simplify_logic which writes things as
# And and Or, we only simplify the partial expressions before using
# patterns
rv = self.func(*[a.simplify(**kwargs) for a in self.args])
if not isinstance(rv, Xor): # This shouldn't really happen here
return rv
patterns = simplify_patterns_xor()
return self._apply_patternbased_simplification(rv, patterns,
kwargs['measure'], None)
def _eval_subs(self, old, new):
# If old is Xor, replace the parts of the arguments with new if all
# are there
if isinstance(old, Xor):
old_set = set(old.args)
if old_set.issubset(self.args):
args = set(self.args) - old_set
args.add(new)
return self.func(*args)
class Nand(BooleanFunction):
"""
Logical NAND function.
It evaluates its arguments in order, giving True immediately if any
of them are False, and False if they are all True.
Returns True if any of the arguments are False
Returns False if all arguments are True
Examples
========
>>> from sympy.logic.boolalg import Nand
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Nand(False, True)
True
>>> Nand(True, True)
False
>>> Nand(x, y)
~(x & y)
"""
@classmethod
def eval(cls, *args):
return Not(And(*args))
class Nor(BooleanFunction):
"""
Logical NOR function.
It evaluates its arguments in order, giving False immediately if any
of them are True, and True if they are all False.
Returns False if any argument is True
Returns True if all arguments are False
Examples
========
>>> from sympy.logic.boolalg import Nor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Nor(True, False)
False
>>> Nor(True, True)
False
>>> Nor(False, True)
False
>>> Nor(False, False)
True
>>> Nor(x, y)
~(x | y)
"""
@classmethod
def eval(cls, *args):
return Not(Or(*args))
class Xnor(BooleanFunction):
"""
Logical XNOR function.
Returns False if an odd number of the arguments are True and the rest are
False.
Returns True if an even number of the arguments are True and the rest are
False.
Examples
========
>>> from sympy.logic.boolalg import Xnor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Xnor(True, False)
False
>>> Xnor(True, True)
True
>>> Xnor(True, False, True, True, False)
False
>>> Xnor(True, False, True, False)
True
"""
@classmethod
def eval(cls, *args):
return Not(Xor(*args))
class Implies(BooleanFunction):
r"""
Logical implication.
A implies B is equivalent to if A then B. Mathematically, it is written
as `A \Rightarrow B` and is equivalent to `\neg A \vee B` or ``~A | B``.
Accepts two Boolean arguments; A and B.
Returns False if A is True and B is False
Returns True otherwise.
Examples
========
>>> from sympy.logic.boolalg import Implies
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Implies(True, False)
False
>>> Implies(False, False)
True
>>> Implies(True, True)
True
>>> Implies(False, True)
True
>>> x >> y
Implies(x, y)
>>> y << x
Implies(x, y)
Notes
=====
The ``>>`` and ``<<`` operators are provided as a convenience, but note
that their use here is different from their normal use in Python, which is
bit shifts. Hence, ``Implies(a, b)`` and ``a >> b`` will return different
things if ``a`` and ``b`` are integers. In particular, since Python
considers ``True`` and ``False`` to be integers, ``True >> True`` will be
the same as ``1 >> 1``, i.e., 0, which has a truth value of False. To
avoid this issue, use the SymPy objects ``true`` and ``false``.
>>> from sympy import true, false
>>> True >> False
1
>>> true >> false
False
"""
@classmethod
def eval(cls, *args):
try:
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(bool(x))
else:
newargs.append(x)
A, B = newargs
except ValueError:
raise ValueError(
"%d operand(s) used for an Implies "
"(pairs are required): %s" % (len(args), str(args)))
if A in (True, False) or B in (True, False):
return Or(Not(A), B)
elif A == B:
return S.true
elif A.is_Relational and B.is_Relational:
if A.canonical == B.canonical:
return S.true
if A.negated.canonical == B.canonical:
return B
else:
return Basic.__new__(cls, *args)
def to_nnf(self, simplify=True):
a, b = self.args
return Or._to_nnf(~a, b, simplify=simplify)
def to_anf(self, deep=True):
a, b = self.args
return Xor._to_anf(true, a, And(a, b), deep=deep)
class Equivalent(BooleanFunction):
"""
Equivalence relation.
``Equivalent(A, B)`` is True iff A and B are both True or both False.
Returns True if all of the arguments are logically equivalent.
Returns False otherwise.
For two arguments, this is equivalent to :py:class:`~.Xnor`.
Examples
========
>>> from sympy.logic.boolalg import Equivalent, And
>>> from sympy.abc import x
>>> Equivalent(False, False, False)
True
>>> Equivalent(True, False, False)
False
>>> Equivalent(x, And(x, True))
True
"""
def __new__(cls, *args, **options):
from sympy.core.relational import Relational
args = [_sympify(arg) for arg in args]
argset = set(args)
for x in args:
if isinstance(x, Number) or x in [True, False]: # Includes 0, 1
argset.discard(x)
argset.add(bool(x))
rel = []
for r in argset:
if isinstance(r, Relational):
rel.append((r, r.canonical, r.negated.canonical))
remove = []
for i, (r, c, nc) in enumerate(rel):
for j in range(i + 1, len(rel)):
rj, cj = rel[j][:2]
if cj == nc:
return false
elif cj == c:
remove.append((r, rj))
break
for a, b in remove:
argset.remove(a)
argset.remove(b)
argset.add(True)
if len(argset) <= 1:
return true
if True in argset:
argset.discard(True)
return And(*argset)
if False in argset:
argset.discard(False)
return And(*[~arg for arg in argset])
_args = frozenset(argset)
obj = super().__new__(cls, _args)
obj._argset = _args
return obj
# XXX: This should be cached on the object rather than using cacheit
# Maybe it can be computed in __new__?
@property # type: ignore
@cacheit
def args(self):
return tuple(ordered(self._argset))
def to_nnf(self, simplify=True):
args = []
for a, b in zip(self.args, self.args[1:]):
args.append(Or(~a, b))
args.append(Or(~self.args[-1], self.args[0]))
return And._to_nnf(*args, simplify=simplify)
def to_anf(self, deep=True):
a = And(*self.args)
b = And(*[to_anf(Not(arg), deep=False) for arg in self.args])
b = distribute_xor_over_and(b)
return Xor._to_anf(a, b, deep=deep)
class ITE(BooleanFunction):
"""
If-then-else clause.
``ITE(A, B, C)`` evaluates and returns the result of B if A is true
else it returns the result of C. All args must be Booleans.
Examples
========
>>> from sympy.logic.boolalg import ITE, And, Xor, Or
>>> from sympy.abc import x, y, z
>>> ITE(True, False, True)
False
>>> ITE(Or(True, False), And(True, True), Xor(True, True))
True
>>> ITE(x, y, z)
ITE(x, y, z)
>>> ITE(True, x, y)
x
>>> ITE(False, x, y)
y
>>> ITE(x, y, y)
y
Trying to use non-Boolean args will generate a TypeError:
>>> ITE(True, [], ())
Traceback (most recent call last):
...
TypeError: expecting bool, Boolean or ITE, not `[]`
"""
def __new__(cls, *args, **kwargs):
from sympy.core.relational import Eq, Ne
if len(args) != 3:
raise ValueError('expecting exactly 3 args')
a, b, c = args
# check use of binary symbols
if isinstance(a, (Eq, Ne)):
# in this context, we can evaluate the Eq/Ne
# if one arg is a binary symbol and the other
# is true/false
b, c = map(as_Boolean, (b, c))
bin_syms = set().union(*[i.binary_symbols for i in (b, c)])
if len(set(a.args) - bin_syms) == 1:
# one arg is a binary_symbols
_a = a
if a.lhs is S.true:
a = a.rhs
elif a.rhs is S.true:
a = a.lhs
elif a.lhs is S.false:
a = ~a.rhs
elif a.rhs is S.false:
a = ~a.lhs
else:
# binary can only equal True or False
a = S.false
if isinstance(_a, Ne):
a = ~a
else:
a, b, c = BooleanFunction.binary_check_and_simplify(
a, b, c)
rv = None
if kwargs.get('evaluate', True):
rv = cls.eval(a, b, c)
if rv is None:
rv = BooleanFunction.__new__(cls, a, b, c, evaluate=False)
return rv
@classmethod
def eval(cls, *args):
from sympy.core.relational import Eq, Ne
# do the args give a singular result?
a, b, c = args
if isinstance(a, (Ne, Eq)):
_a = a
if S.true in a.args:
a = a.lhs if a.rhs is S.true else a.rhs
elif S.false in a.args:
a = ~a.lhs if a.rhs is S.false else ~a.rhs
else:
_a = None
if _a is not None and isinstance(_a, Ne):
a = ~a
if a is S.true:
return b
if a is S.false:
return c
if b == c:
return b
else:
# or maybe the results allow the answer to be expressed
# in terms of the condition
if b is S.true and c is S.false:
return a
if b is S.false and c is S.true:
return Not(a)
if [a, b, c] != args:
return cls(a, b, c, evaluate=False)
def to_nnf(self, simplify=True):
a, b, c = self.args
return And._to_nnf(Or(~a, b), Or(a, c), simplify=simplify)
def _eval_as_set(self):
return self.to_nnf().as_set()
def _eval_rewrite_as_Piecewise(self, *args, **kwargs):
from sympy.functions import Piecewise
return Piecewise((args[1], args[0]), (args[2], True))
class Exclusive(BooleanFunction):
"""
True if only one or no argument is true.
``Exclusive(A, B, C)`` is equivalent to ``~(A & B) & ~(A & C) & ~(B & C)``.
For two arguments, this is equivalent to :py:class:`~.Xor`.
Examples
========
>>> from sympy.logic.boolalg import Exclusive
>>> Exclusive(False, False, False)
True
>>> Exclusive(False, True, False)
True
>>> Exclusive(False, True, True)
False
"""
@classmethod
def eval(cls, *args):
and_args = []
for a, b in combinations(args, 2):
and_args.append(Not(And(a, b)))
return And(*and_args)
# end class definitions. Some useful methods
def conjuncts(expr):
"""Return a list of the conjuncts in ``expr``.
Examples
========
>>> from sympy.logic.boolalg import conjuncts
>>> from sympy.abc import A, B
>>> conjuncts(A & B)
frozenset({A, B})
>>> conjuncts(A | B)
frozenset({A | B})
"""
return And.make_args(expr)
def disjuncts(expr):
"""Return a list of the disjuncts in ``expr``.
Examples
========
>>> from sympy.logic.boolalg import disjuncts
>>> from sympy.abc import A, B
>>> disjuncts(A | B)
frozenset({A, B})
>>> disjuncts(A & B)
frozenset({A & B})
"""
return Or.make_args(expr)
def distribute_and_over_or(expr):
"""
Given a sentence ``expr`` consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
Examples
========
>>> from sympy.logic.boolalg import distribute_and_over_or, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_and_over_or(Or(A, And(Not(B), Not(C))))
(A | ~B) & (A | ~C)
"""
return _distribute((expr, And, Or))
def distribute_or_over_and(expr):
"""
Given a sentence ``expr`` consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in DNF.
Note that the output is NOT simplified.
Examples
========
>>> from sympy.logic.boolalg import distribute_or_over_and, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_or_over_and(And(Or(Not(A), B), C))
(B & C) | (C & ~A)
"""
return _distribute((expr, Or, And))
def distribute_xor_over_and(expr):
"""
Given a sentence ``expr`` consisting of conjunction and
exclusive disjunctions of literals, return an
equivalent exclusive disjunction.
Note that the output is NOT simplified.
Examples
========
>>> from sympy.logic.boolalg import distribute_xor_over_and, And, Xor, Not
>>> from sympy.abc import A, B, C
>>> distribute_xor_over_and(And(Xor(Not(A), B), C))
(B & C) ^ (C & ~A)
"""
return _distribute((expr, Xor, And))
def _distribute(info):
"""
Distributes ``info[1]`` over ``info[2]`` with respect to ``info[0]``.
"""
if isinstance(info[0], info[2]):
for arg in info[0].args:
if isinstance(arg, info[1]):
conj = arg
break
else:
return info[0]
rest = info[2](*[a for a in info[0].args if a is not conj])
return info[1](*list(map(_distribute,
[(info[2](c, rest), info[1], info[2])
for c in conj.args])), remove_true=False)
elif isinstance(info[0], info[1]):
return info[1](*list(map(_distribute,
[(x, info[1], info[2])
for x in info[0].args])),
remove_true=False)
else:
return info[0]
def to_anf(expr, deep=True):
r"""
Converts expr to Algebraic Normal Form (ANF).
ANF is a canonical normal form, which means that two
equivalent formulas will convert to the same ANF.
A logical expression is in ANF if it has the form
.. math:: 1 \oplus a \oplus b \oplus ab \oplus abc
i.e. it can be:
- purely true,
- purely false,
- conjunction of variables,
- exclusive disjunction.
The exclusive disjunction can only contain true, variables
or conjunction of variables. No negations are permitted.
If ``deep`` is ``False``, arguments of the boolean
expression are considered variables, i.e. only the
top-level expression is converted to ANF.
Examples
========
>>> from sympy.logic.boolalg import And, Or, Not, Implies, Equivalent
>>> from sympy.logic.boolalg import to_anf
>>> from sympy.abc import A, B, C
>>> to_anf(Not(A))
A ^ True
>>> to_anf(And(Or(A, B), Not(C)))
A ^ B ^ (A & B) ^ (A & C) ^ (B & C) ^ (A & B & C)
>>> to_anf(Implies(Not(A), Equivalent(B, C)), deep=False)
True ^ ~A ^ (~A & (Equivalent(B, C)))
"""
expr = sympify(expr)
if is_anf(expr):
return expr
return expr.to_anf(deep=deep)
def to_nnf(expr, simplify=True):
"""
Converts ``expr`` to Negation Normal Form (NNF).
A logical expression is in NNF if it
contains only And, Or and Not, and Not is applied only to literals.
If ``simplify`` is ``True``, the result contains no redundant clauses.
Examples
========
>>> from sympy.abc import A, B, C, D
>>> from sympy.logic.boolalg import Not, Equivalent, to_nnf
>>> to_nnf(Not((~A & ~B) | (C & D)))
(A | B) & (~C | ~D)
>>> to_nnf(Equivalent(A >> B, B >> A))
(A | ~B | (A & ~B)) & (B | ~A | (B & ~A))
"""
if is_nnf(expr, simplify):
return expr
return expr.to_nnf(simplify)
def to_cnf(expr, simplify=False, force=False):
"""
Convert a propositional logical sentence ``expr`` to conjunctive normal
form: ``((A | ~B | ...) & (B | C | ...) & ...)``.
If ``simplify`` is ``True``, ``expr`` is evaluated to its simplest CNF
form using the Quine-McCluskey algorithm; this may take a long
time if there are more than 8 variables and requires that the
``force`` flag be set to ``True`` (default is ``False``).
Examples
========
>>> from sympy.logic.boolalg import to_cnf
>>> from sympy.abc import A, B, D
>>> to_cnf(~(A | B) | D)
(D | ~A) & (D | ~B)
>>> to_cnf((A | B) & (A | ~A), True)
A | B
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
if not force and len(_find_predicates(expr)) > 8:
raise ValueError(filldedent('''
To simplify a logical expression with more
than 8 variables may take a long time and requires
the use of `force=True`.'''))
return simplify_logic(expr, 'cnf', True, force=force)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
res = distribute_and_over_or(expr)
return res
def to_dnf(expr, simplify=False, force=False):
"""
Convert a propositional logical sentence ``expr`` to disjunctive normal
form: ``((A & ~B & ...) | (B & C & ...) | ...)``.
If ``simplify`` is ``True``, ``expr`` is evaluated to its simplest DNF form using
the Quine-McCluskey algorithm; this may take a long
time if there are more than 8 variables and requires that the
``force`` flag be set to ``True`` (default is ``False``).
Examples
========
>>> from sympy.logic.boolalg import to_dnf
>>> from sympy.abc import A, B, C
>>> to_dnf(B & (A | C))
(A & B) | (B & C)
>>> to_dnf((A & B) | (A & ~B) | (B & C) | (~B & C), True)
A | C
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
if not force and len(_find_predicates(expr)) > 8:
raise ValueError(filldedent('''
To simplify a logical expression with more
than 8 variables may take a long time and requires
the use of `force=True`.'''))
return simplify_logic(expr, 'dnf', True, force=force)
# Don't convert unless we have to
if is_dnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_or_over_and(expr)
def is_anf(expr):
r"""
Checks if ``expr`` is in Algebraic Normal Form (ANF).
A logical expression is in ANF if it has the form
.. math:: 1 \oplus a \oplus b \oplus ab \oplus abc
i.e. it is purely true, purely false, conjunction of
variables or exclusive disjunction. The exclusive
disjunction can only contain true, variables or
conjunction of variables. No negations are permitted.
Examples
========
>>> from sympy.logic.boolalg import And, Not, Xor, true, is_anf
>>> from sympy.abc import A, B, C
>>> is_anf(true)
True
>>> is_anf(A)
True
>>> is_anf(And(A, B, C))
True
>>> is_anf(Xor(A, Not(B)))
False
"""
expr = sympify(expr)
if is_literal(expr) and not isinstance(expr, Not):
return True
if isinstance(expr, And):
for arg in expr.args:
if not arg.is_Symbol:
return False
return True
elif isinstance(expr, Xor):
for arg in expr.args:
if isinstance(arg, And):
for a in arg.args:
if not a.is_Symbol:
return False
elif is_literal(arg):
if isinstance(arg, Not):
return False
else:
return False
return True
else:
return False
def is_nnf(expr, simplified=True):
"""
Checks if ``expr`` is in Negation Normal Form (NNF).
A logical expression is in NNF if it
contains only And, Or and Not, and Not is applied only to literals.
If ``simplified`` is ``True``, checks if result contains no redundant clauses.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.boolalg import Not, is_nnf
>>> is_nnf(A & B | ~C)
True
>>> is_nnf((A | ~A) & (B | C))
False
>>> is_nnf((A | ~A) & (B | C), False)
True
>>> is_nnf(Not(A & B) | C)
False
>>> is_nnf((A >> B) & (B >> A))
False
"""
expr = sympify(expr)
if is_literal(expr):
return True
stack = [expr]
while stack:
expr = stack.pop()
if expr.func in (And, Or):
if simplified:
args = expr.args
for arg in args:
if Not(arg) in args:
return False
stack.extend(expr.args)
elif not is_literal(expr):
return False
return True
def is_cnf(expr):
"""
Test whether or not an expression is in conjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_cnf
>>> from sympy.abc import A, B, C
>>> is_cnf(A | B | C)
True
>>> is_cnf(A & B & C)
True
>>> is_cnf((A & B) | C)
False
"""
return _is_form(expr, And, Or)
def is_dnf(expr):
"""
Test whether or not an expression is in disjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_dnf
>>> from sympy.abc import A, B, C
>>> is_dnf(A | B | C)
True
>>> is_dnf(A & B & C)
True
>>> is_dnf((A & B) | C)
True
>>> is_dnf(A & (B | C))
False
"""
return _is_form(expr, Or, And)
def _is_form(expr, function1, function2):
"""
Test whether or not an expression is of the required form.
"""
expr = sympify(expr)
vals = function1.make_args(expr) if isinstance(expr, function1) else [expr]
for lit in vals:
if isinstance(lit, function2):
vals2 = function2.make_args(lit) if isinstance(lit, function2) else [lit]
for l in vals2:
if is_literal(l) is False:
return False
elif is_literal(lit) is False:
return False
return True
def eliminate_implications(expr):
"""
Change ``Implies`` and ``Equivalent`` into ``And``, ``Or``, and ``Not``.
That is, return an expression that is equivalent to ``expr``, but has only
``&``, ``|``, and ``~`` as logical
operators.
Examples
========
>>> from sympy.logic.boolalg import Implies, Equivalent, \
eliminate_implications
>>> from sympy.abc import A, B, C
>>> eliminate_implications(Implies(A, B))
B | ~A
>>> eliminate_implications(Equivalent(A, B))
(A | ~B) & (B | ~A)
>>> eliminate_implications(Equivalent(A, B, C))
(A | ~C) & (B | ~A) & (C | ~B)
"""
return to_nnf(expr, simplify=False)
def is_literal(expr):
"""
Returns True if expr is a literal, else False.
Examples
========
>>> from sympy import Or, Q
>>> from sympy.abc import A, B
>>> from sympy.logic.boolalg import is_literal
>>> is_literal(A)
True
>>> is_literal(~A)
True
>>> is_literal(Q.zero(A))
True
>>> is_literal(A + B)
True
>>> is_literal(Or(A, B))
False
"""
from sympy.assumptions import AppliedPredicate
if isinstance(expr, Not):
return is_literal(expr.args[0])
elif expr in (True, False) or isinstance(expr, AppliedPredicate) or expr.is_Atom:
return True
elif not isinstance(expr, BooleanFunction) and all(
(isinstance(expr, AppliedPredicate) or a.is_Atom) for a in expr.args):
return True
return False
def to_int_repr(clauses, symbols):
"""
Takes clauses in CNF format and puts them into an integer representation.
Examples
========
>>> from sympy.logic.boolalg import to_int_repr
>>> from sympy.abc import x, y
>>> to_int_repr([x | y, y], [x, y]) == [{1, 2}, {2}]
True
"""
# Convert the symbol list into a dict
symbols = dict(list(zip(symbols, list(range(1, len(symbols) + 1)))))
def append_symbol(arg, symbols):
if isinstance(arg, Not):
return -symbols[arg.args[0]]
else:
return symbols[arg]
return [{append_symbol(arg, symbols) for arg in Or.make_args(c)}
for c in clauses]
def term_to_integer(term):
"""
Return an integer corresponding to the base-2 digits given by ``term``.
Parameters
==========
term : a string or list of ones and zeros
Examples
========
>>> from sympy.logic.boolalg import term_to_integer
>>> term_to_integer([1, 0, 0])
4
>>> term_to_integer('100')
4
"""
return int(''.join(list(map(str, list(term)))), 2)
integer_to_term = ibin # XXX could delete?
def truth_table(expr, variables, input=True):
"""
Return a generator of all possible configurations of the input variables,
and the result of the boolean expression for those values.
Parameters
==========
expr : Boolean expression
variables : list of variables
input : bool (default ``True``)
Indicates whether to return the input combinations.
Examples
========
>>> from sympy.logic.boolalg import truth_table
>>> from sympy.abc import x,y
>>> table = truth_table(x >> y, [x, y])
>>> for t in table:
... print('{0} -> {1}'.format(*t))
[0, 0] -> True
[0, 1] -> True
[1, 0] -> False
[1, 1] -> True
>>> table = truth_table(x | y, [x, y])
>>> list(table)
[([0, 0], False), ([0, 1], True), ([1, 0], True), ([1, 1], True)]
If ``input`` is ``False``, ``truth_table`` returns only a list of truth values.
In this case, the corresponding input values of variables can be
deduced from the index of a given output.
>>> from sympy.utilities.iterables import ibin
>>> vars = [y, x]
>>> values = truth_table(x >> y, vars, input=False)
>>> values = list(values)
>>> values
[True, False, True, True]
>>> for i, value in enumerate(values):
... print('{0} -> {1}'.format(list(zip(
... vars, ibin(i, len(vars)))), value))
[(y, 0), (x, 0)] -> True
[(y, 0), (x, 1)] -> False
[(y, 1), (x, 0)] -> True
[(y, 1), (x, 1)] -> True
"""
variables = [sympify(v) for v in variables]
expr = sympify(expr)
if not isinstance(expr, BooleanFunction) and not is_literal(expr):
return
table = product((0, 1), repeat=len(variables))
for term in table:
term = list(term)
value = expr.xreplace(dict(zip(variables, term)))
if input:
yield term, value
else:
yield value
def _check_pair(minterm1, minterm2):
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns `-1`.
"""
# Early termination seems to be faster than list comprehension,
# at least for large examples.
index = -1
for x, i in enumerate(minterm1): # zip(minterm1, minterm2) is slower
if i != minterm2[x]:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to its
variable form (for SOP).
"""
temp = [variables[n] if val == 1 else Not(variables[n])
for n, val in enumerate(minterm) if val != 3]
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to its
variable form (for POS).
"""
temp = [variables[n] if val == 0 else Not(variables[n])
for n, val in enumerate(maxterm) if val != 3]
return Or(*temp)
def _convert_to_varsANF(term, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for ANF).
Parameters
==========
term : list of 1's and 0's (complementation patter)
variables : list of variables
"""
temp = [variables[n] for n, t in enumerate(term) if t == 1]
if not temp:
return true
return And(*temp)
def _get_odd_parity_terms(n):
"""
Returns a list of lists, with all possible combinations of n zeros and ones
with an odd number of ones.
"""
return [e for e in [ibin(i, n) for i in range(2**n)] if sum(e) % 2 == 1]
def _get_even_parity_terms(n):
"""
Returns a list of lists, with all possible combinations of n zeros and ones
with an even number of ones.
"""
return [e for e in [ibin(i, n) for i in range(2**n)] if sum(e) % 2 == 0]
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
if not terms:
return []
simplified_terms = []
todo = list(range(len(terms)))
# Count number of ones as _check_pair can only potentially match if there
# is at most a difference of a single one
termdict = defaultdict(list)
for n, term in enumerate(terms):
ones = sum([1 for t in term if t == 1])
termdict[ones].append(n)
variables = len(terms[0])
for k in range(variables):
for i in termdict[k]:
for j in termdict[k+1]:
index = _check_pair(terms[i], terms[j])
if index != -1:
# Mark terms handled
todo[i] = todo[j] = None
# Copy old term
newterm = terms[i][:]
# Set differing position to don't care
newterm[index] = 3
# Add if not already there
if newterm not in simplified_terms:
simplified_terms.append(newterm)
if simplified_terms:
# Further simplifications only among the new terms
simplified_terms = _simplified_pairs(simplified_terms)
# Add remaining, non-simplified, terms
simplified_terms.extend([terms[i] for i in todo if i is not None])
return simplified_terms
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
if not terms:
return []
nterms = len(terms)
nl1 = len(l1)
# Create dominating matrix
dommatrix = [[0]*nl1 for n in range(nterms)]
colcount = [0]*nl1
rowcount = [0]*nterms
for primei, prime in enumerate(l1):
for termi, term in enumerate(terms):
# Check prime implicant covering term
if all(t == 3 or t == mt for t, mt in zip(prime, term)):
dommatrix[termi][primei] = 1
colcount[primei] += 1
rowcount[termi] += 1
# Keep track if anything changed
anythingchanged = True
# Then, go again
while anythingchanged:
anythingchanged = False
for rowi in range(nterms):
# Still non-dominated?
if rowcount[rowi]:
row = dommatrix[rowi]
for row2i in range(nterms):
# Still non-dominated?
if rowi != row2i and rowcount[rowi] and (rowcount[rowi] <= rowcount[row2i]):
row2 = dommatrix[row2i]
if all(row2[n] >= row[n] for n in range(nl1)):
# row2 dominating row, remove row2
rowcount[row2i] = 0
anythingchanged = True
for primei, prime in enumerate(row2):
if prime:
# Make corresponding entry 0
dommatrix[row2i][primei] = 0
colcount[primei] -= 1
colcache = dict()
for coli in range(nl1):
# Still non-dominated?
if colcount[coli]:
if coli in colcache:
col = colcache[coli]
else:
col = [dommatrix[i][coli] for i in range(nterms)]
colcache[coli] = col
for col2i in range(nl1):
# Still non-dominated?
if coli != col2i and colcount[col2i] and (colcount[coli] >= colcount[col2i]):
if col2i in colcache:
col2 = colcache[col2i]
else:
col2 = [dommatrix[i][col2i] for i in range(nterms)]
colcache[col2i] = col2
if all(col[n] >= col2[n] for n in range(nterms)):
# col dominating col2, remove col2
colcount[col2i] = 0
anythingchanged = True
for termi, term in enumerate(col2):
if term and dommatrix[termi][col2i]:
# Make corresponding entry 0
dommatrix[termi][col2i] = 0
rowcount[termi] -= 1
if not anythingchanged:
# Heuristically select the prime implicant covering most terms
maxterms = 0
bestcolidx = -1
for coli in range(nl1):
s = colcount[coli]
if s > maxterms:
bestcolidx = coli
maxterms = s
# In case we found a prime implicant covering at least two terms
if bestcolidx != -1 and maxterms > 1:
for primei, prime in enumerate(l1):
if primei != bestcolidx:
for termi, term in enumerate(colcache[bestcolidx]):
if term and dommatrix[termi][primei]:
# Make corresponding entry 0
dommatrix[termi][primei] = 0
anythingchanged = True
rowcount[termi] -= 1
colcount[primei] -= 1
return [l1[i] for i in range(nl1) if colcount[i]]
def _input_to_binlist(inputlist, variables):
binlist = []
bits = len(variables)
for val in inputlist:
if isinstance(val, int):
binlist.append(ibin(val, bits))
elif isinstance(val, dict):
nonspecvars = list(variables)
for key in val.keys():
nonspecvars.remove(key)
for t in product((0, 1), repeat=len(nonspecvars)):
d = dict(zip(nonspecvars, t))
d.update(val)
binlist.append([d[v] for v in variables])
elif isinstance(val, (list, tuple)):
if len(val) != bits:
raise ValueError("Each term must contain {bits} bits as there are"
"\n{bits} variables (or be an integer)."
"".format(bits=bits))
binlist.append(list(val))
else:
raise TypeError("A term list can only contain lists,"
" ints or dicts.")
return binlist
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest Sum of Products form.
The variables must be given as the first argument.
Return a logical Or function (i.e., the "sum of products" or "SOP"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import SOPform
>>> from sympy import symbols
>>> w, x, y, z = symbols('w x y z')
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform([w, x, y, z], minterms, dontcares)
(y & z) | (~w & ~x)
The terms can also be represented as integers:
>>> minterms = [1, 3, 7, 11, 15]
>>> dontcares = [0, 2, 5]
>>> SOPform([w, x, y, z], minterms, dontcares)
(y & z) | (~w & ~x)
They can also be specified using dicts, which does not have to be fully
specified:
>>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}]
>>> SOPform([w, x, y, z], minterms)
(x & ~w) | (y & z & ~x)
Or a combination:
>>> minterms = [4, 7, 11, [1, 1, 1, 1]]
>>> dontcares = [{w : 0, x : 0, y: 0}, 5]
>>> SOPform([w, x, y, z], minterms, dontcares)
(w & y & z) | (~w & ~y) | (x & z & ~w)
References
==========
.. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
if minterms == []:
return false
variables = tuple(map(sympify, variables))
minterms = _input_to_binlist(minterms, variables)
dontcares = _input_to_binlist((dontcares or []), variables)
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
new = _simplified_pairs(minterms + dontcares)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest Product of Sums form.
The variables must be given as the first argument.
Return a logical And function (i.e., the "product of sums" or "POS"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import POSform
>>> from sympy import symbols
>>> w, x, y, z = symbols('w x y z')
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform([w, x, y, z], minterms, dontcares)
z & (y | ~w)
The terms can also be represented as integers:
>>> minterms = [1, 3, 7, 11, 15]
>>> dontcares = [0, 2, 5]
>>> POSform([w, x, y, z], minterms, dontcares)
z & (y | ~w)
They can also be specified using dicts, which does not have to be fully
specified:
>>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}]
>>> POSform([w, x, y, z], minterms)
(x | y) & (x | z) & (~w | ~x)
Or a combination:
>>> minterms = [4, 7, 11, [1, 1, 1, 1]]
>>> dontcares = [{w : 0, x : 0, y: 0}, 5]
>>> POSform([w, x, y, z], minterms, dontcares)
(w | x) & (y | ~w) & (z | ~y)
References
==========
.. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
if minterms == []:
return false
variables = tuple(map(sympify, variables))
minterms = _input_to_binlist(minterms, variables)
dontcares = _input_to_binlist((dontcares or []), variables)
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
maxterms = []
for t in product((0, 1), repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
new = _simplified_pairs(maxterms + dontcares)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def ANFform(variables, truthvalues):
"""
The ANFform function converts the list of truth values to
Algebraic Normal Form (ANF).
The variables must be given as the first argument.
Return True, False, logical And funciton (i.e., the
"Zhegalkin monomial") or logical Xor function (i.e.,
the "Zhegalkin polynomial"). When True and False
are represented by 1 and 0, respectively, then
And is multiplication and Xor is addition.
Formally a "Zhegalkin monomial" is the product (logical
And) of a finite set of distinct variables, including
the empty set whose product is denoted 1 (True).
A "Zhegalkin polynomial" is the sum (logical Xor) of a
set of Zhegalkin monomials, with the empty set denoted
by 0 (False).
Parameters
==========
variables : list of variables
truthvalues : list of 1's and 0's (result column of truth table)
Examples
========
>>> from sympy.logic.boolalg import ANFform
>>> from sympy.abc import x, y
>>> ANFform([x], [1, 0])
x ^ True
>>> ANFform([x, y], [0, 1, 1, 1])
x ^ y ^ (x & y)
References
==========
.. [1] https://en.wikipedia.org/wiki/Zhegalkin_polynomial
"""
n_vars = len(variables)
n_values = len(truthvalues)
if n_values != 2 ** n_vars:
raise ValueError("The number of truth values must be equal to 2^%d, "
"got %d" % (n_vars, n_values))
variables = tuple(map(sympify, variables))
coeffs = anf_coeffs(truthvalues)
terms = []
for i, t in enumerate(product((0, 1), repeat=n_vars)):
if coeffs[i] == 1:
terms.append(t)
return Xor(*[_convert_to_varsANF(x, variables) for x in terms],
remove_true=False)
def anf_coeffs(truthvalues):
"""
Convert a list of truth values of some boolean expression
to the list of coefficients of the polynomial mod 2 (exclusive
disjunction) representing the boolean expression in ANF
(i.e., the "Zhegalkin polynomial").
There are `2^n` possible Zhegalkin monomials in `n` variables, since
each monomial is fully specified by the presence or absence of
each variable.
We can enumerate all the monomials. For example, boolean
function with four variables ``(a, b, c, d)`` can contain
up to `2^4 = 16` monomials. The 13-th monomial is the
product ``a & b & d``, because 13 in binary is 1, 1, 0, 1.
A given monomial's presence or absence in a polynomial corresponds
to that monomial's coefficient being 1 or 0 respectively.
Examples
========
>>> from sympy.logic.boolalg import anf_coeffs, bool_monomial, Xor
>>> from sympy.abc import a, b, c
>>> truthvalues = [0, 1, 1, 0, 0, 1, 0, 1]
>>> coeffs = anf_coeffs(truthvalues)
>>> coeffs
[0, 1, 1, 0, 0, 0, 1, 0]
>>> polynomial = Xor(*[
... bool_monomial(k, [a, b, c])
... for k, coeff in enumerate(coeffs) if coeff == 1
... ])
>>> polynomial
b ^ c ^ (a & b)
"""
s = '{:b}'.format(len(truthvalues))
n = len(s) - 1
if len(truthvalues) != 2**n:
raise ValueError("The number of truth values must be a power of two, "
"got %d" % len(truthvalues))
coeffs = [[v] for v in truthvalues]
for i in range(n):
tmp = []
for j in range(2 ** (n-i-1)):
tmp.append(coeffs[2*j] +
list(map(lambda x, y: x^y, coeffs[2*j], coeffs[2*j+1])))
coeffs = tmp
return coeffs[0]
def bool_minterm(k, variables):
"""
Return the k-th minterm.
Minterms are numbered by a binary encoding of the complementation
pattern of the variables. This convention assigns the value 1 to
the direct form and 0 to the complemented form.
Parameters
==========
k : int or list of 1's and 0's (complementation patter)
variables : list of variables
Examples
========
>>> from sympy.logic.boolalg import bool_minterm
>>> from sympy.abc import x, y, z
>>> bool_minterm([1, 0, 1], [x, y, z])
x & z & ~y
>>> bool_minterm(6, [x, y, z])
x & y & ~z
References
==========
.. [1] https://en.wikipedia.org/wiki/Canonical_normal_form#Indexing_minterms
"""
if isinstance(k, int):
k = ibin(k, len(variables))
variables = tuple(map(sympify, variables))
return _convert_to_varsSOP(k, variables)
def bool_maxterm(k, variables):
"""
Return the k-th maxterm.
Each maxterm is assigned an index based on the opposite
conventional binary encoding used for minterms. The maxterm
convention assigns the value 0 to the direct form and 1 to
the complemented form.
Parameters
==========
k : int or list of 1's and 0's (complementation pattern)
variables : list of variables
Examples
========
>>> from sympy.logic.boolalg import bool_maxterm
>>> from sympy.abc import x, y, z
>>> bool_maxterm([1, 0, 1], [x, y, z])
y | ~x | ~z
>>> bool_maxterm(6, [x, y, z])
z | ~x | ~y
References
==========
.. [1] https://en.wikipedia.org/wiki/Canonical_normal_form#Indexing_maxterms
"""
if isinstance(k, int):
k = ibin(k, len(variables))
variables = tuple(map(sympify, variables))
return _convert_to_varsPOS(k, variables)
def bool_monomial(k, variables):
"""
Return the k-th monomial.
Monomials are numbered by a binary encoding of the presence and
absences of the variables. This convention assigns the value
1 to the presence of variable and 0 to the absence of variable.
Each boolean function can be uniquely represented by a
Zhegalkin Polynomial (Algebraic Normal Form). The Zhegalkin
Polynomial of the boolean function with `n` variables can contain
up to `2^n` monomials. We can enumarate all the monomials.
Each monomial is fully specified by the presence or absence
of each variable.
For example, boolean function with four variables ``(a, b, c, d)``
can contain up to `2^4 = 16` monomials. The 13-th monomial is the
product ``a & b & d``, because 13 in binary is 1, 1, 0, 1.
Parameters
==========
k : int or list of 1's and 0's
variables : list of variables
Examples
========
>>> from sympy.logic.boolalg import bool_monomial
>>> from sympy.abc import x, y, z
>>> bool_monomial([1, 0, 1], [x, y, z])
x & z
>>> bool_monomial(6, [x, y, z])
x & y
"""
if isinstance(k, int):
k = ibin(k, len(variables))
variables = tuple(map(sympify, variables))
return _convert_to_varsANF(k, variables)
def _find_predicates(expr):
"""Helper to find logical predicates in BooleanFunctions.
A logical predicate is defined here as anything within a BooleanFunction
that is not a BooleanFunction itself.
"""
if not isinstance(expr, BooleanFunction):
return {expr}
return set().union(*(map(_find_predicates, expr.args)))
def simplify_logic(expr, form=None, deep=True, force=False):
"""
This function simplifies a boolean function to its simplified version
in SOP or POS form. The return type is an Or or And object in SymPy.
Parameters
==========
expr : Boolean expression
form : string (``'cnf'`` or ``'dnf'``) or ``None`` (default).
If ``'cnf'`` or ``'dnf'``, the simplest expression in the corresponding
normal form is returned; if ``None``, the answer is returned
according to the form with fewest args (in CNF by default).
deep : bool (default ``True``)
Indicates whether to recursively simplify any
non-boolean functions contained within the input.
force : bool (default ``False``)
As the simplifications require exponential time in the number
of variables, there is by default a limit on expressions with
8 variables. When the expression has more than 8 variables
only symbolical simplification (controlled by ``deep``) is
made. By setting ``force`` to ``True``, this limit is removed. Be
aware that this can lead to very long simplification times.
Examples
========
>>> from sympy.logic import simplify_logic
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> b = (~x & ~y & ~z) | ( ~x & ~y & z)
>>> simplify_logic(b)
~x & ~y
>>> S(b)
(z & ~x & ~y) | (~x & ~y & ~z)
>>> simplify_logic(_)
~x & ~y
"""
if form not in (None, 'cnf', 'dnf'):
raise ValueError("form can be cnf or dnf only")
expr = sympify(expr)
# check for quick exit if form is given: right form and all args are
# literal and do not involve Not
if form:
form_ok = False
if form == 'cnf':
form_ok = is_cnf(expr)
elif form == 'dnf':
form_ok = is_dnf(expr)
if form_ok and all(is_literal(a)
for a in expr.args):
return expr
if deep:
variables = _find_predicates(expr)
from sympy.simplify.simplify import simplify
s = tuple(map(simplify, variables))
expr = expr.xreplace(dict(zip(variables, s)))
if not isinstance(expr, BooleanFunction):
return expr
# get variables in case not deep or after doing
# deep simplification since they may have changed
variables = _find_predicates(expr)
if not force and len(variables) > 8:
return expr
# group into constants and variable values
c, v = sift(ordered(variables), lambda x: x in (True, False), binary=True)
variables = c + v
truthtable = []
# standardize constants to be 1 or 0 in keeping with truthtable
c = [1 if i == True else 0 for i in c]
for t in product((0, 1), repeat=len(v)):
if expr.xreplace(dict(zip(v, t))) == True:
truthtable.append(c + list(t))
big = len(truthtable) >= (2 ** (len(variables) - 1))
if form == 'dnf' or form is None and big:
return SOPform(variables, truthtable)
return POSform(variables, truthtable)
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol;
# of times it appeared as a Not(symbol);
# of times it appeared as a Symbol in an And or Or;
# of times it appeared as a Not(Symbol) in an And or Or;
a sorted tuple of tuples, (i, j, k), where i is the number of arguments
in an And or Or with which it appeared as a Symbol, and j is
the number of arguments that were Not(Symbol); k is the number
of times that (i, j) was seen.
]
Examples
========
>>> from sympy.logic.boolalg import _finger as finger
>>> from sympy import And, Or, Not, Xor, to_cnf, symbols
>>> from sympy.abc import a, b, x, y
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(finger(eq))
{(0, 0, 1, 0, ((2, 0, 1),)): [x],
(0, 0, 1, 0, ((2, 1, 1),)): [a, b],
(0, 0, 1, 2, ((2, 0, 1),)): [y]}
>>> dict(finger(x & ~y))
{(0, 1, 0, 0, ()): [y], (1, 0, 0, 0, ()): [x]}
In the following, the (5, 2, 6) means that there were 6 Or
functions in which a symbol appeared as itself amongst 5 arguments in
which there were also 2 negated symbols, e.g. ``(a0 | a1 | a2 | ~a3 | ~a4)``
is counted once for a0, a1 and a2.
>>> dict(finger(to_cnf(Xor(*symbols('a:5')))))
{(0, 0, 8, 8, ((5, 0, 1), (5, 2, 6), (5, 4, 1))): [a0, a1, a2, a3, a4]}
The equation must not have more than one level of nesting:
>>> dict(finger(And(Or(x, y), y)))
{(0, 0, 1, 0, ((2, 0, 1),)): [x], (1, 0, 1, 0, ((2, 0, 1),)): [y]}
>>> dict(finger(And(Or(x, And(a, x)), y)))
Traceback (most recent call last):
...
NotImplementedError: unexpected level of nesting
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = dict(list(zip(f, [[0]*4 + [defaultdict(int)] for fi in f])))
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args), sum(isinstance(ai, Not) for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1][o] += 1
elif ai.is_Not:
d[ai.args[0]][3] += 1
else:
raise NotImplementedError('unexpected level of nesting')
inv = defaultdict(list)
for k, v in ordered(iter(d.items())):
v[-1] = tuple(sorted([i + (j,) for i, j in v[-1].items()]))
inv[tuple(v)].append(k)
return inv
def bool_map(bool1, bool2):
"""
Return the simplified version of *bool1*, and the mapping of variables
that makes the two expressions *bool1* and *bool2* represent the same
logical behaviour for some correspondence between the variables
of each.
If more than one mappings of this sort exist, one of them
is returned.
For example, ``And(x, y)`` is logically equivalent to ``And(a, b)`` for
the mapping ``{x: a, y: b}`` or ``{x: b, y: a}``.
If no such mapping exists, return ``False``.
Examples
========
>>> from sympy import SOPform, bool_map, Or, And, Not, Xor
>>> from sympy.abc import w, x, y, z, a, b, c, d
>>> function1 = SOPform([x, z, y],[[1, 0, 1], [0, 0, 1]])
>>> function2 = SOPform([a, b, c],[[1, 0, 1], [1, 0, 0]])
>>> bool_map(function1, function2)
(y & ~z, {y: a, z: b})
The results are not necessarily unique, but they are canonical. Here,
``(w, z)`` could be ``(a, d)`` or ``(d, a)``:
>>> eq = Or(And(Not(y), w), And(Not(y), z), And(x, y))
>>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c))
>>> bool_map(eq, eq2)
((x & y) | (w & ~y) | (z & ~y), {w: a, x: b, y: c, z: d})
>>> eq = And(Xor(a, b), c, And(c,d))
>>> bool_map(eq, eq.subs(c, x))
(c & d & (a | b) & (~a | ~b), {a: a, b: b, c: d, d: x})
"""
def match(function1, function2):
"""Return the mapping that equates variables between two
simplified boolean expressions if possible.
By "simplified" we mean that a function has been denested
and is either an And (or an Or) whose arguments are either
symbols (x), negated symbols (Not(x)), or Or (or an And) whose
arguments are only symbols or negated symbols. For example,
``And(x, Not(y), Or(w, Not(z)))``.
Basic.match is not robust enough (see issue 4835) so this is
a workaround that is valid for simplified boolean expressions
"""
# do some quick checks
if function1.__class__ != function2.__class__:
return None # maybe simplification makes them the same?
if len(function1.args) != len(function2.args):
return None # maybe simplification makes them the same?
if function1.is_Symbol:
return {function1: function2}
# get the fingerprint dictionaries
f1 = _finger(function1)
f2 = _finger(function2)
# more quick checks
if len(f1) != len(f2):
return False
# assemble the match dictionary if possible
matchdict = {}
for k in f1.keys():
if k not in f2:
return False
if len(f1[k]) != len(f2[k]):
return False
for i, x in enumerate(f1[k]):
matchdict[x] = f2[k][i]
return matchdict
a = simplify_logic(bool1)
b = simplify_logic(bool2)
m = match(a, b)
if m:
return a, m
return m
def simplify_patterns_and():
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.core import Wild
from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt
a = Wild('a')
b = Wild('b')
c = Wild('c')
# With a better canonical fewer results are required
_matchers_and = ((And(Eq(a, b), Ge(a, b)), Eq(a, b)),
(And(Eq(a, b), Gt(a, b)), S.false),
(And(Eq(a, b), Le(a, b)), Eq(a, b)),
(And(Eq(a, b), Lt(a, b)), S.false),
(And(Ge(a, b), Gt(a, b)), Gt(a, b)),
(And(Ge(a, b), Le(a, b)), Eq(a, b)),
(And(Ge(a, b), Lt(a, b)), S.false),
(And(Ge(a, b), Ne(a, b)), Gt(a, b)),
(And(Gt(a, b), Le(a, b)), S.false),
(And(Gt(a, b), Lt(a, b)), S.false),
(And(Gt(a, b), Ne(a, b)), Gt(a, b)),
(And(Le(a, b), Lt(a, b)), Lt(a, b)),
(And(Le(a, b), Ne(a, b)), Lt(a, b)),
(And(Lt(a, b), Ne(a, b)), Lt(a, b)),
# Min/max
(And(Ge(a, b), Ge(a, c)), Ge(a, Max(b, c))),
(And(Ge(a, b), Gt(a, c)), ITE(b > c, Ge(a, b), Gt(a, c))),
(And(Gt(a, b), Gt(a, c)), Gt(a, Max(b, c))),
(And(Le(a, b), Le(a, c)), Le(a, Min(b, c))),
(And(Le(a, b), Lt(a, c)), ITE(b < c, Le(a, b), Lt(a, c))),
(And(Lt(a, b), Lt(a, c)), Lt(a, Min(b, c))),
# Sign
(And(Eq(a, b), Eq(a, -b)), And(Eq(a, S.Zero), Eq(b, S.Zero))),
)
return _matchers_and
def simplify_patterns_or():
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.core import Wild
from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt
a = Wild('a')
b = Wild('b')
c = Wild('c')
_matchers_or = ((Or(Eq(a, b), Ge(a, b)), Ge(a, b)),
(Or(Eq(a, b), Gt(a, b)), Ge(a, b)),
(Or(Eq(a, b), Le(a, b)), Le(a, b)),
(Or(Eq(a, b), Lt(a, b)), Le(a, b)),
(Or(Ge(a, b), Gt(a, b)), Ge(a, b)),
(Or(Ge(a, b), Le(a, b)), S.true),
(Or(Ge(a, b), Lt(a, b)), S.true),
(Or(Ge(a, b), Ne(a, b)), S.true),
(Or(Gt(a, b), Le(a, b)), S.true),
(Or(Gt(a, b), Lt(a, b)), Ne(a, b)),
(Or(Gt(a, b), Ne(a, b)), Ne(a, b)),
(Or(Le(a, b), Lt(a, b)), Le(a, b)),
(Or(Le(a, b), Ne(a, b)), S.true),
(Or(Lt(a, b), Ne(a, b)), Ne(a, b)),
# Min/max
(Or(Ge(a, b), Ge(a, c)), Ge(a, Min(b, c))),
(Or(Ge(a, b), Gt(a, c)), ITE(b > c, Gt(a, c), Ge(a, b))),
(Or(Gt(a, b), Gt(a, c)), Gt(a, Min(b, c))),
(Or(Le(a, b), Le(a, c)), Le(a, Max(b, c))),
(Or(Le(a, b), Lt(a, c)), ITE(b >= c, Le(a, b), Lt(a, c))),
(Or(Lt(a, b), Lt(a, c)), Lt(a, Max(b, c))),
)
return _matchers_or
def simplify_patterns_xor():
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.core import Wild
from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt
a = Wild('a')
b = Wild('b')
c = Wild('c')
_matchers_xor = ((Xor(Eq(a, b), Ge(a, b)), Gt(a, b)),
(Xor(Eq(a, b), Gt(a, b)), Ge(a, b)),
(Xor(Eq(a, b), Le(a, b)), Lt(a, b)),
(Xor(Eq(a, b), Lt(a, b)), Le(a, b)),
(Xor(Ge(a, b), Gt(a, b)), Eq(a, b)),
(Xor(Ge(a, b), Le(a, b)), Ne(a, b)),
(Xor(Ge(a, b), Lt(a, b)), S.true),
(Xor(Ge(a, b), Ne(a, b)), Le(a, b)),
(Xor(Gt(a, b), Le(a, b)), S.true),
(Xor(Gt(a, b), Lt(a, b)), Ne(a, b)),
(Xor(Gt(a, b), Ne(a, b)), Lt(a, b)),
(Xor(Le(a, b), Lt(a, b)), Eq(a, b)),
(Xor(Le(a, b), Ne(a, b)), Ge(a, b)),
(Xor(Lt(a, b), Ne(a, b)), Gt(a, b)),
# Min/max
(Xor(Ge(a, b), Ge(a, c)),
And(Ge(a, Min(b, c)), Lt(a, Max(b, c)))),
(Xor(Ge(a, b), Gt(a, c)),
ITE(b > c, And(Gt(a, c), Lt(a, b)),
And(Ge(a, b), Le(a, c)))),
(Xor(Gt(a, b), Gt(a, c)),
And(Gt(a, Min(b, c)), Le(a, Max(b, c)))),
(Xor(Le(a, b), Le(a, c)),
And(Le(a, Max(b, c)), Gt(a, Min(b, c)))),
(Xor(Le(a, b), Lt(a, c)),
ITE(b < c, And(Lt(a, c), Gt(a, b)),
And(Le(a, b), Ge(a, c)))),
(Xor(Lt(a, b), Lt(a, c)),
And(Lt(a, Max(b, c)), Ge(a, Min(b, c)))),
)
return _matchers_xor
def simplify_univariate(expr):
"""return a simplified version of univariate boolean expression, else ``expr``"""
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core.relational import Eq, Ne
if not isinstance(expr, BooleanFunction):
return expr
if expr.atoms(Eq, Ne):
return expr
c = expr
free = c.free_symbols
if len(free) != 1:
return c
x = free.pop()
ok, i = Piecewise((0, c), evaluate=False
)._intervals(x, err_on_Eq=True)
if not ok:
return c
if not i:
return S.false
args = []
for a, b, _, _ in i:
if a is S.NegativeInfinity:
if b is S.Infinity:
c = S.true
else:
if c.subs(x, b) == True:
c = (x <= b)
else:
c = (x < b)
else:
incl_a = (c.subs(x, a) == True)
incl_b = (c.subs(x, b) == True)
if incl_a and incl_b:
if b.is_infinite:
c = (x >= a)
else:
c = And(a <= x, x <= b)
elif incl_a:
c = And(a <= x, x < b)
elif incl_b:
if b.is_infinite:
c = (x > a)
else:
c = And(a < x, x <= b)
else:
c = And(a < x, x < b)
args.append(c)
return Or(*args)
|
d7c7844e1e8e6cf0ad789260aebb6b9022b4df5ca60d076f41bbde694ca609c3 | """Inference in propositional logic"""
from sympy.logic.boolalg import And, Not, conjuncts, to_cnf, BooleanFunction
from sympy.core.sorting import ordered
from sympy.core.sympify import sympify
from sympy.external.importtools import import_module
def literal_symbol(literal):
"""
The symbol in this literal (without the negation).
Examples
========
>>> from sympy.abc import A
>>> from sympy.logic.inference import literal_symbol
>>> literal_symbol(A)
A
>>> literal_symbol(~A)
A
"""
if literal is True or literal is False:
return literal
try:
if literal.is_Symbol:
return literal
if literal.is_Not:
return literal_symbol(literal.args[0])
else:
raise ValueError
except (AttributeError, ValueError):
raise ValueError("Argument must be a boolean literal.")
def satisfiable(expr, algorithm=None, all_models=False, minimal=False):
"""
Check satisfiability of a propositional sentence.
Returns a model when it succeeds.
Returns {true: true} for trivially true expressions.
On setting all_models to True, if given expr is satisfiable then
returns a generator of models. However, if expr is unsatisfiable
then returns a generator containing the single element False.
Examples
========
>>> from sympy.abc import A, B
>>> from sympy.logic.inference import satisfiable
>>> satisfiable(A & ~B)
{A: True, B: False}
>>> satisfiable(A & ~A)
False
>>> satisfiable(True)
{True: True}
>>> next(satisfiable(A & ~A, all_models=True))
False
>>> models = satisfiable((A >> B) & B, all_models=True)
>>> next(models)
{A: False, B: True}
>>> next(models)
{A: True, B: True}
>>> def use_models(models):
... for model in models:
... if model:
... # Do something with the model.
... print(model)
... else:
... # Given expr is unsatisfiable.
... print("UNSAT")
>>> use_models(satisfiable(A >> ~A, all_models=True))
{A: False}
>>> use_models(satisfiable(A ^ A, all_models=True))
UNSAT
"""
if algorithm is None or algorithm == "pycosat":
pycosat = import_module('pycosat')
if pycosat is not None:
algorithm = "pycosat"
else:
if algorithm == "pycosat":
raise ImportError("pycosat module is not present")
# Silently fall back to dpll2 if pycosat
# is not installed
algorithm = "dpll2"
if algorithm=="minisat22":
pysat = import_module('pysat')
if pysat is None:
algorithm = "dpll2"
if algorithm == "dpll":
from sympy.logic.algorithms.dpll import dpll_satisfiable
return dpll_satisfiable(expr)
elif algorithm == "dpll2":
from sympy.logic.algorithms.dpll2 import dpll_satisfiable
return dpll_satisfiable(expr, all_models)
elif algorithm == "pycosat":
from sympy.logic.algorithms.pycosat_wrapper import pycosat_satisfiable
return pycosat_satisfiable(expr, all_models)
elif algorithm == "minisat22":
from sympy.logic.algorithms.minisat22_wrapper import minisat22_satisfiable
return minisat22_satisfiable(expr, all_models, minimal)
raise NotImplementedError
def valid(expr):
"""
Check validity of a propositional sentence.
A valid propositional sentence is True under every assignment.
Examples
========
>>> from sympy.abc import A, B
>>> from sympy.logic.inference import valid
>>> valid(A | ~A)
True
>>> valid(A | B)
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Validity
"""
return not satisfiable(Not(expr))
def pl_true(expr, model=None, deep=False):
"""
Returns whether the given assignment is a model or not.
If the assignment does not specify the value for every proposition,
this may return None to indicate 'not obvious'.
Parameters
==========
model : dict, optional, default: {}
Mapping of symbols to boolean values to indicate assignment.
deep: boolean, optional, default: False
Gives the value of the expression under partial assignments
correctly. May still return None to indicate 'not obvious'.
Examples
========
>>> from sympy.abc import A, B
>>> from sympy.logic.inference import pl_true
>>> pl_true( A & B, {A: True, B: True})
True
>>> pl_true(A & B, {A: False})
False
>>> pl_true(A & B, {A: True})
>>> pl_true(A & B, {A: True}, deep=True)
>>> pl_true(A >> (B >> A))
>>> pl_true(A >> (B >> A), deep=True)
True
>>> pl_true(A & ~A)
>>> pl_true(A & ~A, deep=True)
False
>>> pl_true(A & B & (~A | ~B), {A: True})
>>> pl_true(A & B & (~A | ~B), {A: True}, deep=True)
False
"""
from sympy.core.symbol import Symbol
boolean = (True, False)
def _validate(expr):
if isinstance(expr, Symbol) or expr in boolean:
return True
if not isinstance(expr, BooleanFunction):
return False
return all(_validate(arg) for arg in expr.args)
if expr in boolean:
return expr
expr = sympify(expr)
if not _validate(expr):
raise ValueError("%s is not a valid boolean expression" % expr)
if not model:
model = {}
model = {k: v for k, v in model.items() if v in boolean}
result = expr.subs(model)
if result in boolean:
return bool(result)
if deep:
model = {k: True for k in result.atoms()}
if pl_true(result, model):
if valid(result):
return True
else:
if not satisfiable(result):
return False
return None
def entails(expr, formula_set=None):
"""
Check whether the given expr_set entail an expr.
If formula_set is empty then it returns the validity of expr.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.inference import entails
>>> entails(A, [A >> B, B >> C])
False
>>> entails(C, [A >> B, B >> C, A])
True
>>> entails(A >> B)
False
>>> entails(A >> (B >> A))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Logical_consequence
"""
if formula_set:
formula_set = list(formula_set)
else:
formula_set = []
formula_set.append(Not(expr))
return not satisfiable(And(*formula_set))
class KB:
"""Base class for all knowledge bases"""
def __init__(self, sentence=None):
self.clauses_ = set()
if sentence:
self.tell(sentence)
def tell(self, sentence):
raise NotImplementedError
def ask(self, query):
raise NotImplementedError
def retract(self, sentence):
raise NotImplementedError
@property
def clauses(self):
return list(ordered(self.clauses_))
class PropKB(KB):
"""A KB for Propositional Logic. Inefficient, with no indexing."""
def tell(self, sentence):
"""Add the sentence's clauses to the KB
Examples
========
>>> from sympy.logic.inference import PropKB
>>> from sympy.abc import x, y
>>> l = PropKB()
>>> l.clauses
[]
>>> l.tell(x | y)
>>> l.clauses
[x | y]
>>> l.tell(y)
>>> l.clauses
[y, x | y]
"""
for c in conjuncts(to_cnf(sentence)):
self.clauses_.add(c)
def ask(self, query):
"""Checks if the query is true given the set of clauses.
Examples
========
>>> from sympy.logic.inference import PropKB
>>> from sympy.abc import x, y
>>> l = PropKB()
>>> l.tell(x & ~y)
>>> l.ask(x)
True
>>> l.ask(y)
False
"""
return entails(query, self.clauses_)
def retract(self, sentence):
"""Remove the sentence's clauses from the KB
Examples
========
>>> from sympy.logic.inference import PropKB
>>> from sympy.abc import x, y
>>> l = PropKB()
>>> l.clauses
[]
>>> l.tell(x | y)
>>> l.clauses
[x | y]
>>> l.retract(x | y)
>>> l.clauses
[]
"""
for c in conjuncts(to_cnf(sentence)):
self.clauses_.discard(c)
|
e78b5ff2e7247bdd6b5de0eb275abe47c60a8b02e669ea7a267c08a5ff59cca9 | '''Functions returning normal forms of matrices'''
from sympy.polys.polytools import Poly
from sympy.polys.matrices import DomainMatrix
from sympy.polys.matrices.normalforms import (
smith_normal_form as _snf,
invariant_factors as _invf,
hermite_normal_form as _hnf,
)
def _to_domain(m, domain=None):
"""Convert Matrix to DomainMatrix"""
# XXX: deprecated support for RawMatrix:
ring = getattr(m, "ring", None)
m = m.applyfunc(lambda e: e.as_expr() if isinstance(e, Poly) else e)
dM = DomainMatrix.from_Matrix(m)
domain = domain or ring
if domain is not None:
dM = dM.convert_to(domain)
return dM
def smith_normal_form(m, domain=None):
'''
Return the Smith Normal Form of a matrix `m` over the ring `domain`.
This will only work if the ring is a principal ideal domain.
Examples
========
>>> from sympy import Matrix, ZZ
>>> from sympy.matrices.normalforms import smith_normal_form
>>> m = Matrix([[12, 6, 4], [3, 9, 6], [2, 16, 14]])
>>> print(smith_normal_form(m, domain=ZZ))
Matrix([[1, 0, 0], [0, 10, 0], [0, 0, -30]])
'''
dM = _to_domain(m, domain)
return _snf(dM).to_Matrix()
def invariant_factors(m, domain=None):
'''
Return the tuple of abelian invariants for a matrix `m`
(as in the Smith-Normal form)
References
==========
.. [1] https://en.wikipedia.org/wiki/Smith_normal_form#Algorithm
.. [2] http://sierra.nmsu.edu/morandi/notes/SmithNormalForm.pdf
'''
dM = _to_domain(m, domain)
factors = _invf(dM)
factors = tuple(dM.domain.to_sympy(f) for f in factors)
# XXX: deprecated.
if hasattr(m, "ring"):
if m.ring.is_PolynomialRing:
K = m.ring
to_poly = lambda f: Poly(f, K.symbols, domain=K.domain)
factors = tuple(to_poly(f) for f in factors)
return factors
def hermite_normal_form(A, *, D=None, check_rank=False):
r'''
Compute the Hermite Normal Form of a Matrix *A* of integers.
Parameters
==========
A: $m \times n$ Matrix of integers.
D: positive integer (optional)
Let $W$ be the HNF of *A*. If known in advance, a positive integer *D*
being any multiple of $\det(W)$ may be provided. In this case, if *A*
also has rank $m$, then we may use an alternative algorithm that works
mod *D* in order to prevent coefficient explosion.
check_rank: boolean (default ``False``)
The basic assumption is that, if you pass a value for *D*, then
you already believe that *A* has rank $m$, so we do not waste time
checking it for you. If you do want this to be checked (and the
ordinary, non-modulo *D* algorithm to be used if the check fails), then
set *check_rank* to ``True``.
Returns
=======
Matrix
The HNF of matrix *A*.
Raises
======
:obj:`sympy.polys.matrices.exceptions.DMDomainError`
If the domain of the matrix is not :ref:`ZZ`.
:obj:`sympy.polys.matrices.exceptions.DMShapeError`
If the mod *D* algorithm is used but the matrix has more rows than
columns.
Examples
========
>>> from sympy import Matrix
>>> from sympy.matrices.normalforms import hermite_normal_form
>>> m = Matrix([[12, 6, 4], [3, 9, 6], [2, 16, 14]])
>>> print(hermite_normal_form(m))
Matrix([[10, 0, 2], [0, 15, 3], [0, 0, 2]])
References
==========
[1] Cohen, H. *A Course in Computational Algebraic Number Theory.*
(See Algorithms 2.4.5 and 2.4.8.)
'''
return _hnf(A._rep, D=D, check_rank=check_rank).to_Matrix()
|
edaa30abb2ec04c7d23f51bec5c92eadf62cfb740e83c8e8c237aef96f03c35a | import copy
from sympy.core import S
from sympy.core.function import expand_mul
from sympy.functions.elementary.miscellaneous import Min, sqrt
from sympy.functions.elementary.complexes import sign
from .common import NonSquareMatrixError, NonPositiveDefiniteMatrixError
from .utilities import _get_intermediate_simp, _iszero
from .determinant import _find_reasonable_pivot_naive
def _rank_decomposition(M, iszerofunc=_iszero, simplify=False):
r"""Returns a pair of matrices (`C`, `F`) with matching rank
such that `A = C F`.
Parameters
==========
iszerofunc : Function, optional
A function used for detecting whether an element can
act as a pivot. ``lambda x: x.is_zero`` is used by default.
simplify : Bool or Function, optional
A function used to simplify elements when looking for a
pivot. By default SymPy's ``simplify`` is used.
Returns
=======
(C, F) : Matrices
`C` and `F` are full-rank matrices with rank as same as `A`,
whose product gives `A`.
See Notes for additional mathematical details.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([
... [1, 3, 1, 4],
... [2, 7, 3, 9],
... [1, 5, 3, 1],
... [1, 2, 0, 8]
... ])
>>> C, F = A.rank_decomposition()
>>> C
Matrix([
[1, 3, 4],
[2, 7, 9],
[1, 5, 1],
[1, 2, 8]])
>>> F
Matrix([
[1, 0, -2, 0],
[0, 1, 1, 0],
[0, 0, 0, 1]])
>>> C * F == A
True
Notes
=====
Obtaining `F`, an RREF of `A`, is equivalent to creating a
product
.. math::
E_n E_{n-1} ... E_1 A = F
where `E_n, E_{n-1}, \dots, E_1` are the elimination matrices or
permutation matrices equivalent to each row-reduction step.
The inverse of the same product of elimination matrices gives
`C`:
.. math::
C = \left(E_n E_{n-1} \dots E_1\right)^{-1}
It is not necessary, however, to actually compute the inverse:
the columns of `C` are those from the original matrix with the
same column indices as the indices of the pivot columns of `F`.
References
==========
.. [1] https://en.wikipedia.org/wiki/Rank_factorization
.. [2] Piziak, R.; Odell, P. L. (1 June 1999).
"Full Rank Factorization of Matrices".
Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882
See Also
========
sympy.matrices.matrices.MatrixReductions.rref
"""
F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc,
pivots=True)
rank = len(pivot_cols)
C = M.extract(range(M.rows), pivot_cols)
F = F[:rank, :]
return C, F
def _liupc(M):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
.. [1] Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(M.rows)]
for r, c, _ in M.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*M.rows
virtual = [inf]*M.rows
for r in range(M.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def _row_structure_symbolic_cholesky(M):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
.. [1] Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
R, parent = M.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(M.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = list(sorted(set(Lrow[k])))
return Lrow
def _cholesky(M, hermitian=True):
"""Returns the Cholesky-type decomposition L of a matrix A
such that L * L.H == A if hermitian flag is True,
or L * L.T == A if hermitian is False.
A must be a Hermitian positive-definite matrix if hermitian is True,
or a symmetric matrix if it is False.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T
Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
The matrix can have complex entries:
>>> from sympy import I
>>> A = Matrix(((9, 3*I), (-3*I, 5)))
>>> A.cholesky()
Matrix([
[ 3, 0],
[-I, 2]])
>>> A.cholesky() * A.cholesky().H
Matrix([
[ 9, 3*I],
[-3*I, 5]])
Non-hermitian Cholesky-type decomposition may be useful when the
matrix is not positive-definite.
>>> A = Matrix([[1, 2], [2, 1]])
>>> L = A.cholesky(hermitian=False)
>>> L
Matrix([
[1, 0],
[2, sqrt(3)*I]])
>>> L*L.T == A
True
See Also
========
sympy.matrices.dense.DenseMatrix.LDLdecomposition
sympy.matrices.matrices.MatrixBase.LUdecomposition
QRdecomposition
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
L = MutableDenseMatrix.zeros(M.rows, M.rows)
if hermitian:
for i in range(M.rows):
for j in range(i):
L[i, j] = ((1 / L[j, j])*(M[i, j] -
sum(L[i, k]*L[j, k].conjugate() for k in range(j))))
Lii2 = (M[i, i] -
sum(L[i, k]*L[i, k].conjugate() for k in range(i)))
if Lii2.is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
L[i, i] = sqrt(Lii2)
else:
for i in range(M.rows):
for j in range(i):
L[i, j] = ((1 / L[j, j])*(M[i, j] -
sum(L[i, k]*L[j, k] for k in range(j))))
L[i, i] = sqrt(M[i, i] -
sum(L[i, k]**2 for k in range(i)))
return M._new(L)
def _cholesky_sparse(M, hermitian=True):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
The matrix can have complex entries:
>>> from sympy import I
>>> A = SparseMatrix(((9, 3*I), (-3*I, 5)))
>>> A.cholesky()
Matrix([
[ 3, 0],
[-I, 2]])
>>> A.cholesky() * A.cholesky().H
Matrix([
[ 9, 3*I],
[-3*I, 5]])
Non-hermitian Cholesky-type decomposition may be useful when the
matrix is not positive-definite.
>>> A = SparseMatrix([[1, 2], [2, 1]])
>>> L = A.cholesky(hermitian=False)
>>> L
Matrix([
[1, 0],
[2, sqrt(3)*I]])
>>> L*L.T == A
True
See Also
========
sympy.matrices.sparse.SparseMatrix.LDLdecomposition
sympy.matrices.matrices.MatrixBase.LUdecomposition
QRdecomposition
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
dps = _get_intermediate_simp(expand_mul, expand_mul)
Crowstruc = M.row_structure_symbolic_cholesky()
C = MutableDenseMatrix.zeros(M.rows)
for i in range(len(Crowstruc)):
for j in Crowstruc[i]:
if i != j:
C[i, j] = M[i, j]
summ = 0
for p1 in Crowstruc[i]:
if p1 < j:
for p2 in Crowstruc[j]:
if p2 < j:
if p1 == p2:
if hermitian:
summ += C[i, p1]*C[j, p1].conjugate()
else:
summ += C[i, p1]*C[j, p1]
else:
break
else:
break
C[i, j] = dps((C[i, j] - summ) / C[j, j])
else: # i == j
C[j, j] = M[j, j]
summ = 0
for k in Crowstruc[j]:
if k < j:
if hermitian:
summ += C[j, k]*C[j, k].conjugate()
else:
summ += C[j, k]**2
else:
break
Cjj2 = dps(C[j, j] - summ)
if hermitian and Cjj2.is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
C[j, j] = sqrt(Cjj2)
return M._new(C)
def _LDLdecomposition(M, hermitian=True):
"""Returns the LDL Decomposition (L, D) of matrix A,
such that L * D * L.H == A if hermitian flag is True, or
L * D * L.T == A if hermitian is False.
This method eliminates the use of square root.
Further this ensures that all the diagonal entries of L are 1.
A must be a Hermitian positive-definite matrix if hermitian is True,
or a symmetric matrix otherwise.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T * A.inv() == eye(A.rows)
True
The matrix can have complex entries:
>>> from sympy import I
>>> A = Matrix(((9, 3*I), (-3*I, 5)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0],
[-I/3, 1]])
>>> D
Matrix([
[9, 0],
[0, 4]])
>>> L*D*L.H == A
True
See Also
========
sympy.matrices.dense.DenseMatrix.cholesky
sympy.matrices.matrices.MatrixBase.LUdecomposition
QRdecomposition
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
D = MutableDenseMatrix.zeros(M.rows, M.rows)
L = MutableDenseMatrix.eye(M.rows)
if hermitian:
for i in range(M.rows):
for j in range(i):
L[i, j] = (1 / D[j, j])*(M[i, j] - sum(
L[i, k]*L[j, k].conjugate()*D[k, k] for k in range(j)))
D[i, i] = (M[i, i] -
sum(L[i, k]*L[i, k].conjugate()*D[k, k] for k in range(i)))
if D[i, i].is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
else:
for i in range(M.rows):
for j in range(i):
L[i, j] = (1 / D[j, j])*(M[i, j] - sum(
L[i, k]*L[j, k]*D[k, k] for k in range(j)))
D[i, i] = M[i, i] - sum(L[i, k]**2*D[k, k] for k in range(i))
return M._new(L), M._new(D)
def _LDLdecomposition_sparse(M, hermitian=True):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
dps = _get_intermediate_simp(expand_mul, expand_mul)
Lrowstruc = M.row_structure_symbolic_cholesky()
L = MutableDenseMatrix.eye(M.rows)
D = MutableDenseMatrix.zeros(M.rows, M.cols)
for i in range(len(Lrowstruc)):
for j in Lrowstruc[i]:
if i != j:
L[i, j] = M[i, j]
summ = 0
for p1 in Lrowstruc[i]:
if p1 < j:
for p2 in Lrowstruc[j]:
if p2 < j:
if p1 == p2:
if hermitian:
summ += L[i, p1]*L[j, p1].conjugate()*D[p1, p1]
else:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] = dps((L[i, j] - summ) / D[j, j])
else: # i == j
D[i, i] = M[i, i]
summ = 0
for k in Lrowstruc[i]:
if k < i:
if hermitian:
summ += L[i, k]*L[i, k].conjugate()*D[k, k]
else:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] = dps(D[i, i] - summ)
if hermitian and D[i, i].is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
return M._new(L), M._new(D)
def _LUdecomposition(M, iszerofunc=_iszero, simpfunc=None, rankcheck=False):
"""Returns (L, U, perm) where L is a lower triangular matrix with unit
diagonal, U is an upper triangular matrix, and perm is a list of row
swap index pairs. If A is the original matrix, then
``A = (L*U).permuteBkwd(perm)``, and the row permutation matrix P such
that $P A = L U$ can be computed by ``P = eye(A.rows).permuteFwd(perm)``.
See documentation for LUCombined for details about the keyword argument
rankcheck, iszerofunc, and simpfunc.
Parameters
==========
rankcheck : bool, optional
Determines if this function should detect the rank
deficiency of the matrixis and should raise a
``ValueError``.
iszerofunc : function, optional
A function which determines if a given expression is zero.
The function should be a callable that takes a single
SymPy expression and returns a 3-valued boolean value
``True``, ``False``, or ``None``.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
simpfunc : function or None, optional
A function that simplifies the input.
If this is specified as a function, this function should be
a callable that takes a single SymPy expression and returns
an another SymPy expression that is algebraically
equivalent.
If ``None``, it indicates that the pivot search algorithm
should not attempt to simplify any candidate pivots.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[4, 3], [6, 3]])
>>> L, U, _ = a.LUdecomposition()
>>> L
Matrix([
[ 1, 0],
[3/2, 1]])
>>> U
Matrix([
[4, 3],
[0, -3/2]])
See Also
========
sympy.matrices.dense.DenseMatrix.cholesky
sympy.matrices.dense.DenseMatrix.LDLdecomposition
QRdecomposition
LUdecomposition_Simple
LUdecompositionFF
LUsolve
"""
combined, p = M.LUdecomposition_Simple(iszerofunc=iszerofunc,
simpfunc=simpfunc, rankcheck=rankcheck)
# L is lower triangular ``M.rows x M.rows``
# U is upper triangular ``M.rows x M.cols``
# L has unit diagonal. For each column in combined, the subcolumn
# below the diagonal of combined is shared by L.
# If L has more columns than combined, then the remaining subcolumns
# below the diagonal of L are zero.
# The upper triangular portion of L and combined are equal.
def entry_L(i, j):
if i < j:
# Super diagonal entry
return M.zero
elif i == j:
return M.one
elif j < combined.cols:
return combined[i, j]
# Subdiagonal entry of L with no corresponding
# entry in combined
return M.zero
def entry_U(i, j):
return M.zero if i > j else combined[i, j]
L = M._new(combined.rows, combined.rows, entry_L)
U = M._new(combined.rows, combined.cols, entry_U)
return L, U, p
def _LUdecomposition_Simple(M, iszerofunc=_iszero, simpfunc=None,
rankcheck=False):
r"""Compute the PLU decomposition of the matrix.
Parameters
==========
rankcheck : bool, optional
Determines if this function should detect the rank
deficiency of the matrixis and should raise a
``ValueError``.
iszerofunc : function, optional
A function which determines if a given expression is zero.
The function should be a callable that takes a single
SymPy expression and returns a 3-valued boolean value
``True``, ``False``, or ``None``.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
simpfunc : function or None, optional
A function that simplifies the input.
If this is specified as a function, this function should be
a callable that takes a single SymPy expression and returns
an another SymPy expression that is algebraically
equivalent.
If ``None``, it indicates that the pivot search algorithm
should not attempt to simplify any candidate pivots.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
Returns
=======
(lu, row_swaps) : (Matrix, list)
If the original matrix is a $m, n$ matrix:
*lu* is a $m, n$ matrix, which contains result of the
decomposition in a compresed form. See the notes section
to see how the matrix is compressed.
*row_swaps* is a $m$-element list where each element is a
pair of row exchange indices.
``A = (L*U).permute_backward(perm)``, and the row
permutation matrix $P$ from the formula $P A = L U$ can be
computed by ``P=eye(A.row).permute_forward(perm)``.
Raises
======
ValueError
Raised if ``rankcheck=True`` and the matrix is found to
be rank deficient during the computation.
Notes
=====
About the PLU decomposition:
PLU decomposition is a generalization of a LU decomposition
which can be extended for rank-deficient matrices.
It can further be generalized for non-square matrices, and this
is the notation that SymPy is using.
PLU decomposition is a decomposition of a $m, n$ matrix $A$ in
the form of $P A = L U$ where
* $L$ is a $m, m$ lower triangular matrix with unit diagonal
entries.
* $U$ is a $m, n$ upper triangular matrix.
* $P$ is a $m, m$ permutation matrix.
So, for a square matrix, the decomposition would look like:
.. math::
L = \begin{bmatrix}
1 & 0 & 0 & \cdots & 0 \\
L_{1, 0} & 1 & 0 & \cdots & 0 \\
L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1
\end{bmatrix}
.. math::
U = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & U_{n-1, n-1}
\end{bmatrix}
And for a matrix with more rows than the columns,
the decomposition would look like:
.. math::
L = \begin{bmatrix}
1 & 0 & 0 & \cdots & 0 & 0 & \cdots & 0 \\
L_{1, 0} & 1 & 0 & \cdots & 0 & 0 & \cdots & 0 \\
L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \ddots
& \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1 & 0
& \cdots & 0 \\
L_{n, 0} & L_{n, 1} & L_{n, 2} & \cdots & L_{n, n-1} & 1
& \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots
& \ddots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & L_{m-1, n-1}
& 0 & \cdots & 1 \\
\end{bmatrix}
.. math::
U = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & U_{n-1, n-1} \\
0 & 0 & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 0
\end{bmatrix}
Finally, for a matrix with more columns than the rows, the
decomposition would look like:
.. math::
L = \begin{bmatrix}
1 & 0 & 0 & \cdots & 0 \\
L_{1, 0} & 1 & 0 & \cdots & 0 \\
L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & 1
\end{bmatrix}
.. math::
U = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1}
& \cdots & U_{0, n-1} \\
0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1}
& \cdots & U_{1, n-1} \\
0 & 0 & U_{2, 2} & \cdots & U_{2, m-1}
& \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots
& \cdots & \vdots \\
0 & 0 & 0 & \cdots & U_{m-1, m-1}
& \cdots & U_{m-1, n-1} \\
\end{bmatrix}
About the compressed LU storage:
The results of the decomposition are often stored in compressed
forms rather than returning $L$ and $U$ matrices individually.
It may be less intiuitive, but it is commonly used for a lot of
numeric libraries because of the efficiency.
The storage matrix is defined as following for this specific
method:
* The subdiagonal elements of $L$ are stored in the subdiagonal
portion of $LU$, that is $LU_{i, j} = L_{i, j}$ whenever
$i > j$.
* The elements on the diagonal of $L$ are all 1, and are not
explicitly stored.
* $U$ is stored in the upper triangular portion of $LU$, that is
$LU_{i, j} = U_{i, j}$ whenever $i <= j$.
* For a case of $m > n$, the right side of the $L$ matrix is
trivial to store.
* For a case of $m < n$, the below side of the $U$ matrix is
trivial to store.
So, for a square matrix, the compressed output matrix would be:
.. math::
LU = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & U_{n-1, n-1}
\end{bmatrix}
For a matrix with more rows than the columns, the compressed
output matrix would be:
.. math::
LU = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots
& U_{n-1, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots
& L_{m-1, n-1} \\
\end{bmatrix}
For a matrix with more columns than the rows, the compressed
output matrix would be:
.. math::
LU = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1}
& \cdots & U_{0, n-1} \\
L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1}
& \cdots & U_{1, n-1} \\
L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, m-1}
& \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots
& \cdots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & U_{m-1, m-1}
& \cdots & U_{m-1, n-1} \\
\end{bmatrix}
About the pivot searching algorithm:
When a matrix contains symbolic entries, the pivot search algorithm
differs from the case where every entry can be categorized as zero or
nonzero.
The algorithm searches column by column through the submatrix whose
top left entry coincides with the pivot position.
If it exists, the pivot is the first entry in the current search
column that iszerofunc guarantees is nonzero.
If no such candidate exists, then each candidate pivot is simplified
if simpfunc is not None.
The search is repeated, with the difference that a candidate may be
the pivot if ``iszerofunc()`` cannot guarantee that it is nonzero.
In the second search the pivot is the first candidate that
iszerofunc can guarantee is nonzero.
If no such candidate exists, then the pivot is the first candidate
for which iszerofunc returns None.
If no such candidate exists, then the search is repeated in the next
column to the right.
The pivot search algorithm differs from the one in ``rref()``, which
relies on ``_find_reasonable_pivot()``.
Future versions of ``LUdecomposition_simple()`` may use
``_find_reasonable_pivot()``.
See Also
========
sympy.matrices.matrices.MatrixBase.LUdecomposition
LUdecompositionFF
LUsolve
"""
if rankcheck:
# https://github.com/sympy/sympy/issues/9796
pass
if S.Zero in M.shape:
# Define LU decomposition of a matrix with no entries as a matrix
# of the same dimensions with all zero entries.
return M.zeros(M.rows, M.cols), []
dps = _get_intermediate_simp()
lu = M.as_mutable()
row_swaps = []
pivot_col = 0
for pivot_row in range(0, lu.rows - 1):
# Search for pivot. Prefer entry that iszeropivot determines
# is nonzero, over entry that iszeropivot cannot guarantee
# is zero.
# XXX ``_find_reasonable_pivot`` uses slow zero testing. Blocked by bug #10279
# Future versions of LUdecomposition_simple can pass iszerofunc and simpfunc
# to _find_reasonable_pivot().
# In pass 3 of _find_reasonable_pivot(), the predicate in ``if x.equals(S.Zero):``
# calls sympy.simplify(), and not the simplification function passed in via
# the keyword argument simpfunc.
iszeropivot = True
while pivot_col != M.cols and iszeropivot:
sub_col = (lu[r, pivot_col] for r in range(pivot_row, M.rows))
pivot_row_offset, pivot_value, is_assumed_non_zero, ind_simplified_pairs =\
_find_reasonable_pivot_naive(sub_col, iszerofunc, simpfunc)
iszeropivot = pivot_value is None
if iszeropivot:
# All candidate pivots in this column are zero.
# Proceed to next column.
pivot_col += 1
if rankcheck and pivot_col != pivot_row:
# All entries including and below the pivot position are
# zero, which indicates that the rank of the matrix is
# strictly less than min(num rows, num cols)
# Mimic behavior of previous implementation, by throwing a
# ValueError.
raise ValueError("Rank of matrix is strictly less than"
" number of rows or columns."
" Pass keyword argument"
" rankcheck=False to compute"
" the LU decomposition of this matrix.")
candidate_pivot_row = None if pivot_row_offset is None else pivot_row + pivot_row_offset
if candidate_pivot_row is None and iszeropivot:
# If candidate_pivot_row is None and iszeropivot is True
# after pivot search has completed, then the submatrix
# below and to the right of (pivot_row, pivot_col) is
# all zeros, indicating that Gaussian elimination is
# complete.
return lu, row_swaps
# Update entries simplified during pivot search.
for offset, val in ind_simplified_pairs:
lu[pivot_row + offset, pivot_col] = val
if pivot_row != candidate_pivot_row:
# Row swap book keeping:
# Record which rows were swapped.
# Update stored portion of L factor by multiplying L on the
# left and right with the current permutation.
# Swap rows of U.
row_swaps.append([pivot_row, candidate_pivot_row])
# Update L.
lu[pivot_row, 0:pivot_row], lu[candidate_pivot_row, 0:pivot_row] = \
lu[candidate_pivot_row, 0:pivot_row], lu[pivot_row, 0:pivot_row]
# Swap pivot row of U with candidate pivot row.
lu[pivot_row, pivot_col:lu.cols], lu[candidate_pivot_row, pivot_col:lu.cols] = \
lu[candidate_pivot_row, pivot_col:lu.cols], lu[pivot_row, pivot_col:lu.cols]
# Introduce zeros below the pivot by adding a multiple of the
# pivot row to a row under it, and store the result in the
# row under it.
# Only entries in the target row whose index is greater than
# start_col may be nonzero.
start_col = pivot_col + 1
for row in range(pivot_row + 1, lu.rows):
# Store factors of L in the subcolumn below
# (pivot_row, pivot_row).
lu[row, pivot_row] = \
dps(lu[row, pivot_col]/lu[pivot_row, pivot_col])
# Form the linear combination of the pivot row and the current
# row below the pivot row that zeros the entries below the pivot.
# Employing slicing instead of a loop here raises
# NotImplementedError: Cannot add Zero to MutableSparseMatrix
# in sympy/matrices/tests/test_sparse.py.
# c = pivot_row + 1 if pivot_row == pivot_col else pivot_col
for c in range(start_col, lu.cols):
lu[row, c] = dps(lu[row, c] - lu[row, pivot_row]*lu[pivot_row, c])
if pivot_row != pivot_col:
# matrix rank < min(num rows, num cols),
# so factors of L are not stored directly below the pivot.
# These entries are zero by construction, so don't bother
# computing them.
for row in range(pivot_row + 1, lu.rows):
lu[row, pivot_col] = M.zero
pivot_col += 1
if pivot_col == lu.cols:
# All candidate pivots are zero implies that Gaussian
# elimination is complete.
return lu, row_swaps
if rankcheck:
if iszerofunc(
lu[Min(lu.rows, lu.cols) - 1, Min(lu.rows, lu.cols) - 1]):
raise ValueError("Rank of matrix is strictly less than"
" number of rows or columns."
" Pass keyword argument"
" rankcheck=False to compute"
" the LU decomposition of this matrix.")
return lu, row_swaps
def _LUdecompositionFF(M):
"""Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
See Also
========
sympy.matrices.matrices.MatrixBase.LUdecomposition
LUdecomposition_Simple
LUsolve
References
==========
.. [1] W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
"""
from sympy.matrices import SparseMatrix
zeros = SparseMatrix.zeros
eye = SparseMatrix.eye
n, m = M.rows, M.cols
U, L, P = M.as_mutable(), eye(n), eye(n)
DD = zeros(n, n)
oldpivot = 1
for k in range(n - 1):
if U[k, k] == 0:
for kpivot in range(k + 1, n):
if U[kpivot, k]:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L [k, k] = Ukk = U[k, k]
DD[k, k] = oldpivot * Ukk
for i in range(k + 1, n):
L[i, k] = Uik = U[i, k]
for j in range(k + 1, m):
U[i, j] = (Ukk * U[i, j] - U[k, j] * Uik) / oldpivot
U[i, k] = 0
oldpivot = Ukk
DD[n - 1, n - 1] = oldpivot
return P, L, DD, U
def _singular_value_decomposition(A):
r"""Returns a Condensed Singular Value decomposition.
Explanation
===========
A Singular Value decomposition is a decomposition in the form $A = U \Sigma V$
where
- $U, V$ are column orthogonal matrix.
- $\Sigma$ is a diagonal matrix, where the main diagonal contains singular
values of matrix A.
A column orthogonal matrix satisfies
$\mathbb{I} = U^H U$ while a full orthogonal matrix satisfies
relation $\mathbb{I} = U U^H = U^H U$ where $\mathbb{I}$ is an identity
matrix with matching dimensions.
For matrices which are not square or are rank-deficient, it is
sufficient to return a column orthogonal matrix because augmenting
them may introduce redundant computations.
In condensed Singular Value Decomposition we only return column orthognal
matrices because of this reason
If you want to augment the results to return a full orthogonal
decomposition, you should use the following procedures.
- Augment the $U, V$ matrices with columns that are orthogonal to every
other columns and make it square.
- Augument the $\Sigma$ matrix with zero rows to make it have the same
shape as the original matrix.
The procedure will be illustrated in the examples section.
Examples
========
we take a full rank matrix first:
>>> from sympy import Matrix
>>> A = Matrix([[1, 2],[2,1]])
>>> U, S, V = A.singular_value_decomposition()
>>> U
Matrix([
[ sqrt(2)/2, sqrt(2)/2],
[-sqrt(2)/2, sqrt(2)/2]])
>>> S
Matrix([
[1, 0],
[0, 3]])
>>> V
Matrix([
[-sqrt(2)/2, sqrt(2)/2],
[ sqrt(2)/2, sqrt(2)/2]])
If a matrix if square and full rank both U, V
are orthogonal in both directions
>>> U * U.H
Matrix([
[1, 0],
[0, 1]])
>>> U.H * U
Matrix([
[1, 0],
[0, 1]])
>>> V * V.H
Matrix([
[1, 0],
[0, 1]])
>>> V.H * V
Matrix([
[1, 0],
[0, 1]])
>>> A == U * S * V.H
True
>>> C = Matrix([
... [1, 0, 0, 0, 2],
... [0, 0, 3, 0, 0],
... [0, 0, 0, 0, 0],
... [0, 2, 0, 0, 0],
... ])
>>> U, S, V = C.singular_value_decomposition()
>>> V.H * V
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> V * V.H
Matrix([
[1/5, 0, 0, 0, 2/5],
[ 0, 1, 0, 0, 0],
[ 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 0],
[2/5, 0, 0, 0, 4/5]])
If you want to augment the results to be a full orthogonal
decomposition, you should augment $V$ with an another orthogonal
column.
You are able to append an arbitrary standard basis that are linearly
independent to every other columns and you can run the Gram-Schmidt
process to make them augmented as orthogonal basis.
>>> V_aug = V.row_join(Matrix([[0,0,0,0,1],
... [0,0,0,1,0]]).H)
>>> V_aug = V_aug.QRdecomposition()[0]
>>> V_aug
Matrix([
[0, sqrt(5)/5, 0, -2*sqrt(5)/5, 0],
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 2*sqrt(5)/5, 0, sqrt(5)/5, 0]])
>>> V_aug.H * V_aug
Matrix([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
>>> V_aug * V_aug.H
Matrix([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
Similarly we augment U
>>> U_aug = U.row_join(Matrix([0,0,1,0]))
>>> U_aug = U_aug.QRdecomposition()[0]
>>> U_aug
Matrix([
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0]])
>>> U_aug.H * U_aug
Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
>>> U_aug * U_aug.H
Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
We add 2 zero columns and one row to S
>>> S_aug = S.col_join(Matrix([[0,0,0]]))
>>> S_aug = S_aug.row_join(Matrix([[0,0,0,0],
... [0,0,0,0]]).H)
>>> S_aug
Matrix([
[2, 0, 0, 0, 0],
[0, sqrt(5), 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0]])
>>> U_aug * S_aug * V_aug.H == C
True
"""
AH = A.H
m, n = A.shape
if m >= n:
V, S = (AH * A).diagonalize()
ranked = []
for i, x in enumerate(S.diagonal()):
if not x.is_zero:
ranked.append(i)
V = V[:, ranked]
Singular_vals = [sqrt(S[i, i]) for i in range(S.rows) if i in ranked]
S = S.zeros(len(Singular_vals))
for i in range(len(Singular_vals)):
S[i, i] = Singular_vals[i]
V, _ = V.QRdecomposition()
U = A * V * S.inv()
else:
U, S = (A * AH).diagonalize()
ranked = []
for i, x in enumerate(S.diagonal()):
if not x.is_zero:
ranked.append(i)
U = U[:, ranked]
Singular_vals = [sqrt(S[i, i]) for i in range(S.rows) if i in ranked]
S = S.zeros(len(Singular_vals))
for i in range(len(Singular_vals)):
S[i, i] = Singular_vals[i]
U, _ = U.QRdecomposition()
V = AH * U * S.inv()
return U, S, V
def _QRdecomposition_optional(M, normalize=True):
def dot(u, v):
return u.dot(v, hermitian=True)
dps = _get_intermediate_simp(expand_mul, expand_mul)
A = M.as_mutable()
ranked = list()
Q = A
R = A.zeros(A.cols)
for j in range(A.cols):
for i in range(j):
if Q[:, i].is_zero_matrix:
continue
R[i, j] = dot(Q[:, i], Q[:, j]) / dot(Q[:, i], Q[:, i])
R[i, j] = dps(R[i, j])
Q[:, j] -= Q[:, i] * R[i, j]
Q[:, j] = dps(Q[:, j])
if Q[:, j].is_zero_matrix is False:
ranked.append(j)
R[j, j] = M.one
Q = Q.extract(range(Q.rows), ranked)
R = R.extract(ranked, range(R.cols))
if normalize:
# Normalization
for i in range(Q.cols):
norm = Q[:, i].norm()
Q[:, i] /= norm
R[i, :] *= norm
return M.__class__(Q), M.__class__(R)
def _QRdecomposition(M):
r"""Returns a QR decomposition.
Explanation
===========
A QR decomposition is a decomposition in the form $A = Q R$
where
- $Q$ is a column orthogonal matrix.
- $R$ is a upper triangular (trapezoidal) matrix.
A column orthogonal matrix satisfies
$\mathbb{I} = Q^H Q$ while a full orthogonal matrix satisfies
relation $\mathbb{I} = Q Q^H = Q^H Q$ where $I$ is an identity
matrix with matching dimensions.
For matrices which are not square or are rank-deficient, it is
sufficient to return a column orthogonal matrix because augmenting
them may introduce redundant computations.
And an another advantage of this is that you can easily inspect the
matrix rank by counting the number of columns of $Q$.
If you want to augment the results to return a full orthogonal
decomposition, you should use the following procedures.
- Augment the $Q$ matrix with columns that are orthogonal to every
other columns and make it square.
- Augument the $R$ matrix with zero rows to make it have the same
shape as the original matrix.
The procedure will be illustrated in the examples section.
Examples
========
A full rank matrix example:
>>> from sympy import Matrix
>>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175, -58/175],
[ 3/7, 158/175, 6/175],
[-2/7, 6/35, -33/35]])
>>> R
Matrix([
[14, 21, -14],
[ 0, 175, -70],
[ 0, 0, 35]])
If the matrix is square and full rank, the $Q$ matrix becomes
orthogonal in both directions, and needs no augmentation.
>>> Q * Q.H
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> Q.H * Q
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> A == Q*R
True
A rank deficient matrix example:
>>> A = Matrix([[12, -51, 0], [6, 167, 0], [-4, 24, 0]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175],
[ 3/7, 158/175],
[-2/7, 6/35]])
>>> R
Matrix([
[14, 21, 0],
[ 0, 175, 0]])
QRdecomposition might return a matrix Q that is rectangular.
In this case the orthogonality condition might be satisfied as
$\mathbb{I} = Q.H*Q$ but not in the reversed product
$\mathbb{I} = Q * Q.H$.
>>> Q.H * Q
Matrix([
[1, 0],
[0, 1]])
>>> Q * Q.H
Matrix([
[27261/30625, 348/30625, -1914/6125],
[ 348/30625, 30589/30625, 198/6125],
[ -1914/6125, 198/6125, 136/1225]])
If you want to augment the results to be a full orthogonal
decomposition, you should augment $Q$ with an another orthogonal
column.
You are able to append an arbitrary standard basis that are linearly
independent to every other columns and you can run the Gram-Schmidt
process to make them augmented as orthogonal basis.
>>> Q_aug = Q.row_join(Matrix([0, 0, 1]))
>>> Q_aug = Q_aug.QRdecomposition()[0]
>>> Q_aug
Matrix([
[ 6/7, -69/175, 58/175],
[ 3/7, 158/175, -6/175],
[-2/7, 6/35, 33/35]])
>>> Q_aug.H * Q_aug
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> Q_aug * Q_aug.H
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
Augmenting the $R$ matrix with zero row is straightforward.
>>> R_aug = R.col_join(Matrix([[0, 0, 0]]))
>>> R_aug
Matrix([
[14, 21, 0],
[ 0, 175, 0],
[ 0, 0, 0]])
>>> Q_aug * R_aug == A
True
A zero matrix example:
>>> from sympy import Matrix
>>> A = Matrix.zeros(3, 4)
>>> Q, R = A.QRdecomposition()
They may return matrices with zero rows and columns.
>>> Q
Matrix(3, 0, [])
>>> R
Matrix(0, 4, [])
>>> Q*R
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
As the same augmentation rule described above, $Q$ can be augmented
with columns of an identity matrix and $R$ can be augmented with
rows of a zero matrix.
>>> Q_aug = Q.row_join(Matrix.eye(3))
>>> R_aug = R.col_join(Matrix.zeros(3, 4))
>>> Q_aug * Q_aug.T
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> R_aug
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
>>> Q_aug * R_aug == A
True
See Also
========
sympy.matrices.dense.DenseMatrix.cholesky
sympy.matrices.dense.DenseMatrix.LDLdecomposition
sympy.matrices.matrices.MatrixBase.LUdecomposition
QRsolve
"""
return _QRdecomposition_optional(M, normalize=True)
def _upper_hessenberg_decomposition(A):
"""Converts a matrix into Hessenberg matrix H
Returns 2 matrices H, P s.t.
$P H P^{T} = A$, where H is an upper hessenberg matrix
and P is an orthogonal matrix
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([
... [1,2,3],
... [-3,5,6],
... [4,-8,9],
... ])
>>> H, P = A.upper_hessenberg_decomposition()
>>> H
Matrix([
[1, 6/5, 17/5],
[5, 213/25, -134/25],
[0, 216/25, 137/25]])
>>> P
Matrix([
[1, 0, 0],
[0, -3/5, 4/5],
[0, 4/5, 3/5]])
>>> P * H * P.H == A
True
References
==========
.. [#] https://mathworld.wolfram.com/HessenbergDecomposition.html
"""
M = A.as_mutable()
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
n = M.cols
P = M.eye(n)
H = M
for j in range(n - 2):
u = H[j + 1:, j]
if u[1:, :].is_zero_matrix:
continue
if sign(u[0]) != 0:
u[0] = u[0] + sign(u[0]) * u.norm()
else:
u[0] = u[0] + u.norm()
v = u / u.norm()
H[j + 1:, :] = H[j + 1:, :] - 2 * v * (v.H * H[j + 1:, :])
H[:, j + 1:] = H[:, j + 1:] - (H[:, j + 1:] * (2 * v)) * v.H
P[:, j + 1:] = P[:, j + 1:] - (P[:, j + 1:] * (2 * v)) * v.H
return H, P
|
4a04398cdff30449fb51c9de208a09f57fac65576d0f8948268595d594082b3a | from types import FunctionType
from sympy.core.numbers import Float, Integer
from sympy.core.singleton import S
from sympy.core.symbol import uniquely_named_symbol
from sympy.core.mul import Mul
from sympy.polys import PurePoly, cancel
from sympy.simplify.simplify import (simplify as _simplify,
dotprodsimp as _dotprodsimp)
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.numbers import nC
from sympy.polys.matrices.domainmatrix import DomainMatrix
from .common import NonSquareMatrixError
from .utilities import (
_get_intermediate_simp, _get_intermediate_simp_bool,
_iszero, _is_zero_after_expand_mul)
def _find_reasonable_pivot(col, iszerofunc=_iszero, simpfunc=_simplify):
""" Find the lowest index of an item in ``col`` that is
suitable for a pivot. If ``col`` consists only of
Floats, the pivot with the largest norm is returned.
Otherwise, the first element where ``iszerofunc`` returns
False is used. If ``iszerofunc`` doesn't return false,
items are simplified and retested until a suitable
pivot is found.
Returns a 4-tuple
(pivot_offset, pivot_val, assumed_nonzero, newly_determined)
where pivot_offset is the index of the pivot, pivot_val is
the (possibly simplified) value of the pivot, assumed_nonzero
is True if an assumption that the pivot was non-zero
was made without being proved, and newly_determined are
elements that were simplified during the process of pivot
finding."""
newly_determined = []
col = list(col)
# a column that contains a mix of floats and integers
# but at least one float is considered a numerical
# column, and so we do partial pivoting
if all(isinstance(x, (Float, Integer)) for x in col) and any(
isinstance(x, Float) for x in col):
col_abs = [abs(x) for x in col]
max_value = max(col_abs)
if iszerofunc(max_value):
# just because iszerofunc returned True, doesn't
# mean the value is numerically zero. Make sure
# to replace all entries with numerical zeros
if max_value != 0:
newly_determined = [(i, 0) for i, x in enumerate(col) if x != 0]
return (None, None, False, newly_determined)
index = col_abs.index(max_value)
return (index, col[index], False, newly_determined)
# PASS 1 (iszerofunc directly)
possible_zeros = []
for i, x in enumerate(col):
is_zero = iszerofunc(x)
# is someone wrote a custom iszerofunc, it may return
# BooleanFalse or BooleanTrue instead of True or False,
# so use == for comparison instead of `is`
if is_zero == False:
# we found something that is definitely not zero
return (i, x, False, newly_determined)
possible_zeros.append(is_zero)
# by this point, we've found no certain non-zeros
if all(possible_zeros):
# if everything is definitely zero, we have
# no pivot
return (None, None, False, newly_determined)
# PASS 2 (iszerofunc after simplify)
# we haven't found any for-sure non-zeros, so
# go through the elements iszerofunc couldn't
# make a determination about and opportunistically
# simplify to see if we find something
for i, x in enumerate(col):
if possible_zeros[i] is not None:
continue
simped = simpfunc(x)
is_zero = iszerofunc(simped)
if is_zero in (True, False):
newly_determined.append((i, simped))
if is_zero == False:
return (i, simped, False, newly_determined)
possible_zeros[i] = is_zero
# after simplifying, some things that were recognized
# as zeros might be zeros
if all(possible_zeros):
# if everything is definitely zero, we have
# no pivot
return (None, None, False, newly_determined)
# PASS 3 (.equals(0))
# some expressions fail to simplify to zero, but
# ``.equals(0)`` evaluates to True. As a last-ditch
# attempt, apply ``.equals`` to these expressions
for i, x in enumerate(col):
if possible_zeros[i] is not None:
continue
if x.equals(S.Zero):
# ``.iszero`` may return False with
# an implicit assumption (e.g., ``x.equals(0)``
# when ``x`` is a symbol), so only treat it
# as proved when ``.equals(0)`` returns True
possible_zeros[i] = True
newly_determined.append((i, S.Zero))
if all(possible_zeros):
return (None, None, False, newly_determined)
# at this point there is nothing that could definitely
# be a pivot. To maintain compatibility with existing
# behavior, we'll assume that an illdetermined thing is
# non-zero. We should probably raise a warning in this case
i = possible_zeros.index(None)
return (i, col[i], True, newly_determined)
def _find_reasonable_pivot_naive(col, iszerofunc=_iszero, simpfunc=None):
"""
Helper that computes the pivot value and location from a
sequence of contiguous matrix column elements. As a side effect
of the pivot search, this function may simplify some of the elements
of the input column. A list of these simplified entries and their
indices are also returned.
This function mimics the behavior of _find_reasonable_pivot(),
but does less work trying to determine if an indeterminate candidate
pivot simplifies to zero. This more naive approach can be much faster,
with the trade-off that it may erroneously return a pivot that is zero.
``col`` is a sequence of contiguous column entries to be searched for
a suitable pivot.
``iszerofunc`` is a callable that returns a Boolean that indicates
if its input is zero, or None if no such determination can be made.
``simpfunc`` is a callable that simplifies its input. It must return
its input if it does not simplify its input. Passing in
``simpfunc=None`` indicates that the pivot search should not attempt
to simplify any candidate pivots.
Returns a 4-tuple:
(pivot_offset, pivot_val, assumed_nonzero, newly_determined)
``pivot_offset`` is the sequence index of the pivot.
``pivot_val`` is the value of the pivot.
pivot_val and col[pivot_index] are equivalent, but will be different
when col[pivot_index] was simplified during the pivot search.
``assumed_nonzero`` is a boolean indicating if the pivot cannot be
guaranteed to be zero. If assumed_nonzero is true, then the pivot
may or may not be non-zero. If assumed_nonzero is false, then
the pivot is non-zero.
``newly_determined`` is a list of index-value pairs of pivot candidates
that were simplified during the pivot search.
"""
# indeterminates holds the index-value pairs of each pivot candidate
# that is neither zero or non-zero, as determined by iszerofunc().
# If iszerofunc() indicates that a candidate pivot is guaranteed
# non-zero, or that every candidate pivot is zero then the contents
# of indeterminates are unused.
# Otherwise, the only viable candidate pivots are symbolic.
# In this case, indeterminates will have at least one entry,
# and all but the first entry are ignored when simpfunc is None.
indeterminates = []
for i, col_val in enumerate(col):
col_val_is_zero = iszerofunc(col_val)
if col_val_is_zero == False:
# This pivot candidate is non-zero.
return i, col_val, False, []
elif col_val_is_zero is None:
# The candidate pivot's comparison with zero
# is indeterminate.
indeterminates.append((i, col_val))
if len(indeterminates) == 0:
# All candidate pivots are guaranteed to be zero, i.e. there is
# no pivot.
return None, None, False, []
if simpfunc is None:
# Caller did not pass in a simplification function that might
# determine if an indeterminate pivot candidate is guaranteed
# to be nonzero, so assume the first indeterminate candidate
# is non-zero.
return indeterminates[0][0], indeterminates[0][1], True, []
# newly_determined holds index-value pairs of candidate pivots
# that were simplified during the search for a non-zero pivot.
newly_determined = []
for i, col_val in indeterminates:
tmp_col_val = simpfunc(col_val)
if id(col_val) != id(tmp_col_val):
# simpfunc() simplified this candidate pivot.
newly_determined.append((i, tmp_col_val))
if iszerofunc(tmp_col_val) == False:
# Candidate pivot simplified to a guaranteed non-zero value.
return i, tmp_col_val, False, newly_determined
return indeterminates[0][0], indeterminates[0][1], True, newly_determined
# This functions is a candidate for caching if it gets implemented for matrices.
def _berkowitz_toeplitz_matrix(M):
"""Return (A,T) where T the Toeplitz matrix used in the Berkowitz algorithm
corresponding to ``M`` and A is the first principal submatrix.
"""
# the 0 x 0 case is trivial
if M.rows == 0 and M.cols == 0:
return M._new(1,1, [M.one])
#
# Partition M = [ a_11 R ]
# [ C A ]
#
a, R = M[0,0], M[0, 1:]
C, A = M[1:, 0], M[1:,1:]
#
# The Toeplitz matrix looks like
#
# [ 1 ]
# [ -a 1 ]
# [ -RC -a 1 ]
# [ -RAC -RC -a 1 ]
# [ -RA**2C -RAC -RC -a 1 ]
# etc.
# Compute the diagonal entries.
# Because multiplying matrix times vector is so much
# more efficient than matrix times matrix, recursively
# compute -R * A**n * C.
diags = [C]
for i in range(M.rows - 2):
diags.append(A.multiply(diags[i], dotprodsimp=None))
diags = [(-R).multiply(d, dotprodsimp=None)[0, 0] for d in diags]
diags = [M.one, -a] + diags
def entry(i,j):
if j > i:
return M.zero
return diags[i - j]
toeplitz = M._new(M.cols + 1, M.rows, entry)
return (A, toeplitz)
# This functions is a candidate for caching if it gets implemented for matrices.
def _berkowitz_vector(M):
""" Run the Berkowitz algorithm and return a vector whose entries
are the coefficients of the characteristic polynomial of ``M``.
Given N x N matrix, efficiently compute
coefficients of characteristic polynomials of ``M``
without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial when ``M``
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the Berkowitz vector is
an N x 1 vector whose entries are coefficients of the
polynomial
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
"""
# handle the trivial cases
if M.rows == 0 and M.cols == 0:
return M._new(1, 1, [M.one])
elif M.rows == 1 and M.cols == 1:
return M._new(2, 1, [M.one, -M[0,0]])
submat, toeplitz = _berkowitz_toeplitz_matrix(M)
return toeplitz.multiply(_berkowitz_vector(submat), dotprodsimp=None)
def _adjugate(M, method="berkowitz"):
"""Returns the adjugate, or classical adjoint, of
a matrix. That is, the transpose of the matrix of cofactors.
https://en.wikipedia.org/wiki/Adjugate
Parameters
==========
method : string, optional
Method to use to find the cofactors, can be "bareiss", "berkowitz" or
"lu".
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> M.adjugate()
Matrix([
[ 4, -2],
[-3, 1]])
See Also
========
cofactor_matrix
sympy.matrices.common.MatrixCommon.transpose
"""
return M.cofactor_matrix(method=method).transpose()
# This functions is a candidate for caching if it gets implemented for matrices.
def _charpoly(M, x='lambda', simplify=_simplify):
"""Computes characteristic polynomial det(x*I - M) where I is
the identity matrix.
A PurePoly is returned, so using different variables for ``x`` does
not affect the comparison or the polynomials:
Parameters
==========
x : string, optional
Name for the "lambda" variable, defaults to "lambda".
simplify : function, optional
Simplification function to use on the characteristic polynomial
calculated. Defaults to ``simplify``.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[1, 3], [2, 0]])
>>> M.charpoly()
PurePoly(lambda**2 - lambda - 6, lambda, domain='ZZ')
>>> M.charpoly(x) == M.charpoly(y)
True
>>> M.charpoly(x) == M.charpoly(y)
True
Specifying ``x`` is optional; a symbol named ``lambda`` is used by
default (which looks good when pretty-printed in unicode):
>>> M.charpoly().as_expr()
lambda**2 - lambda - 6
And if ``x`` clashes with an existing symbol, underscores will
be prepended to the name to make it unique:
>>> M = Matrix([[1, 2], [x, 0]])
>>> M.charpoly(x).as_expr()
_x**2 - _x - 2*x
Whether you pass a symbol or not, the generator can be obtained
with the gen attribute since it may not be the same as the symbol
that was passed:
>>> M.charpoly(x).gen
_x
>>> M.charpoly(x).gen == x
False
Notes
=====
The Samuelson-Berkowitz algorithm is used to compute
the characteristic polynomial efficiently and without any
division operations. Thus the characteristic polynomial over any
commutative ring without zero divisors can be computed.
If the determinant det(x*I - M) can be found out easily as
in the case of an upper or a lower triangular matrix, then
instead of Samuelson-Berkowitz algorithm, eigenvalues are computed
and the characteristic polynomial with their help.
See Also
========
det
"""
if not M.is_square:
raise NonSquareMatrixError()
if M.is_lower or M.is_upper:
diagonal_elements = M.diagonal()
x = uniquely_named_symbol(x, diagonal_elements, modify=lambda s: '_' + s)
m = 1
for i in diagonal_elements:
m = m * (x - simplify(i))
return PurePoly(m, x)
berk_vector = _berkowitz_vector(M)
x = uniquely_named_symbol(x, berk_vector, modify=lambda s: '_' + s)
return PurePoly([simplify(a) for a in berk_vector], x)
def _cofactor(M, i, j, method="berkowitz"):
"""Calculate the cofactor of an element.
Parameters
==========
method : string, optional
Method to use to find the cofactors, can be "bareiss", "berkowitz" or
"lu".
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> M.cofactor(0, 1)
-3
See Also
========
cofactor_matrix
minor
minor_submatrix
"""
if not M.is_square or M.rows < 1:
raise NonSquareMatrixError()
return S.NegativeOne**((i + j) % 2) * M.minor(i, j, method)
def _cofactor_matrix(M, method="berkowitz"):
"""Return a matrix containing the cofactor of each element.
Parameters
==========
method : string, optional
Method to use to find the cofactors, can be "bareiss", "berkowitz" or
"lu".
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> M.cofactor_matrix()
Matrix([
[ 4, -3],
[-2, 1]])
See Also
========
cofactor
minor
minor_submatrix
"""
if not M.is_square or M.rows < 1:
raise NonSquareMatrixError()
return M._new(M.rows, M.cols,
lambda i, j: M.cofactor(i, j, method))
def _per(M):
"""Returns the permanent of a matrix. Unlike determinant,
permanent is defined for both square and non-square matrices.
For an m x n matrix, with m less than or equal to n,
it is given as the sum over the permutations s of size
less than or equal to m on [1, 2, . . . n] of the product
from i = 1 to m of M[i, s[i]]. Taking the transpose will
not affect the value of the permanent.
In the case of a square matrix, this is the same as the permutation
definition of the determinant, but it does not take the sign of the
permutation into account. Computing the permanent with this definition
is quite inefficient, so here the Ryser formula is used.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> M.per()
450
>>> M = Matrix([1, 5, 7])
>>> M.per()
13
References
==========
.. [1] Prof. Frank Ben's notes: https://math.berkeley.edu/~bernd/ban275.pdf
.. [2] Wikipedia article on Permanent: https://en.wikipedia.org/wiki/Permanent_(mathematics)
.. [3] https://reference.wolfram.com/language/ref/Permanent.html
.. [4] Permanent of a rectangular matrix : https://arxiv.org/pdf/0904.3251.pdf
"""
import itertools
m, n = M.shape
if m > n:
M = M.T
m, n = n, m
s = list(range(n))
subsets = []
for i in range(1, m + 1):
subsets += list(map(list, itertools.combinations(s, i)))
perm = 0
for subset in subsets:
prod = 1
sub_len = len(subset)
for i in range(m):
prod *= sum([M[i, j] for j in subset])
perm += prod * S.NegativeOne**sub_len * nC(n - sub_len, m - sub_len)
perm *= S.NegativeOne**m
perm = sympify(perm)
return perm.simplify()
def _det_DOM(M):
DOM = DomainMatrix.from_Matrix(M, field=True, extension=True)
K = DOM.domain
return K.to_sympy(DOM.det())
# This functions is a candidate for caching if it gets implemented for matrices.
def _det(M, method="bareiss", iszerofunc=None):
"""Computes the determinant of a matrix if ``M`` is a concrete matrix object
otherwise return an expressions ``Determinant(M)`` if ``M`` is a
``MatrixSymbol`` or other expression.
Parameters
==========
method : string, optional
Specifies the algorithm used for computing the matrix determinant.
If the matrix is at most 3x3, a hard-coded formula is used and the
specified method is ignored. Otherwise, it defaults to
``'bareiss'``.
Also, if the matrix is an upper or a lower triangular matrix, determinant
is computed by simple multiplication of diagonal elements, and the
specified method is ignored.
If it is set to ``'domain-ge'``, then Gaussian elimination method will
be used via using DomainMatrix.
If it is set to ``'bareiss'``, Bareiss' fraction-free algorithm will
be used.
If it is set to ``'berkowitz'``, Berkowitz' algorithm will be used.
Otherwise, if it is set to ``'lu'``, LU decomposition will be used.
.. note::
For backward compatibility, legacy keys like "bareis" and
"det_lu" can still be used to indicate the corresponding
methods.
And the keys are also case-insensitive for now. However, it is
suggested to use the precise keys for specifying the method.
iszerofunc : FunctionType or None, optional
If it is set to ``None``, it will be defaulted to ``_iszero`` if the
method is set to ``'bareiss'``, and ``_is_zero_after_expand_mul`` if
the method is set to ``'lu'``.
It can also accept any user-specified zero testing function, if it
is formatted as a function which accepts a single symbolic argument
and returns ``True`` if it is tested as zero and ``False`` if it
tested as non-zero, and also ``None`` if it is undecidable.
Returns
=======
det : Basic
Result of determinant.
Raises
======
ValueError
If unrecognized keys are given for ``method`` or ``iszerofunc``.
NonSquareMatrixError
If attempted to calculate determinant from a non-square matrix.
Examples
========
>>> from sympy import Matrix, eye, det
>>> I3 = eye(3)
>>> det(I3)
1
>>> M = Matrix([[1, 2], [3, 4]])
>>> det(M)
-2
>>> det(M) == M.det()
True
>>> M.det(method="domain-ge")
-2
"""
# sanitize `method`
method = method.lower()
if method == "bareis":
method = "bareiss"
elif method == "det_lu":
method = "lu"
if method not in ("bareiss", "berkowitz", "lu", "domain-ge"):
raise ValueError("Determinant method '%s' unrecognized" % method)
if iszerofunc is None:
if method == "bareiss":
iszerofunc = _is_zero_after_expand_mul
elif method == "lu":
iszerofunc = _iszero
elif not isinstance(iszerofunc, FunctionType):
raise ValueError("Zero testing method '%s' unrecognized" % iszerofunc)
n = M.rows
if n == M.cols: # square check is done in individual method functions
if n == 0:
return M.one
elif n == 1:
return M[0, 0]
elif n == 2:
m = M[0, 0] * M[1, 1] - M[0, 1] * M[1, 0]
return _get_intermediate_simp(_dotprodsimp)(m)
elif n == 3:
m = (M[0, 0] * M[1, 1] * M[2, 2]
+ M[0, 1] * M[1, 2] * M[2, 0]
+ M[0, 2] * M[1, 0] * M[2, 1]
- M[0, 2] * M[1, 1] * M[2, 0]
- M[0, 0] * M[1, 2] * M[2, 1]
- M[0, 1] * M[1, 0] * M[2, 2])
return _get_intermediate_simp(_dotprodsimp)(m)
dets = []
for b in M.strongly_connected_components():
if method == "domain-ge": # uses DomainMatrix to evalute determinant
det = _det_DOM(M[b, b])
elif method == "bareiss":
det = M[b, b]._eval_det_bareiss(iszerofunc=iszerofunc)
elif method == "berkowitz":
det = M[b, b]._eval_det_berkowitz()
elif method == "lu":
det = M[b, b]._eval_det_lu(iszerofunc=iszerofunc)
dets.append(det)
return Mul(*dets)
# This functions is a candidate for caching if it gets implemented for matrices.
def _det_bareiss(M, iszerofunc=_is_zero_after_expand_mul):
"""Compute matrix determinant using Bareiss' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
Parameters
==========
iszerofunc : function, optional
The function to use to determine zeros when doing an LU decomposition.
Defaults to ``lambda x: x.is_zero``.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
"""
# Recursively implemented Bareiss' algorithm as per Deanna Richelle Leggett's
# thesis http://www.math.usm.edu/perry/Research/Thesis_DRL.pdf
def bareiss(mat, cumm=1):
if mat.rows == 0:
return mat.one
elif mat.rows == 1:
return mat[0, 0]
# find a pivot and extract the remaining matrix
# With the default iszerofunc, _find_reasonable_pivot slows down
# the computation by the factor of 2.5 in one test.
# Relevant issues: #10279 and #13877.
pivot_pos, pivot_val, _, _ = _find_reasonable_pivot(mat[:, 0], iszerofunc=iszerofunc)
if pivot_pos is None:
return mat.zero
# if we have a valid pivot, we'll do a "row swap", so keep the
# sign of the det
sign = (-1) ** (pivot_pos % 2)
# we want every row but the pivot row and every column
rows = list(i for i in range(mat.rows) if i != pivot_pos)
cols = list(range(mat.cols))
tmp_mat = mat.extract(rows, cols)
def entry(i, j):
ret = (pivot_val*tmp_mat[i, j + 1] - mat[pivot_pos, j + 1]*tmp_mat[i, 0]) / cumm
if _get_intermediate_simp_bool(True):
return _dotprodsimp(ret)
elif not ret.is_Atom:
return cancel(ret)
return ret
return sign*bareiss(M._new(mat.rows - 1, mat.cols - 1, entry), pivot_val)
if not M.is_square:
raise NonSquareMatrixError()
if M.rows == 0:
return M.one
# sympy/matrices/tests/test_matrices.py contains a test that
# suggests that the determinant of a 0 x 0 matrix is one, by
# convention.
return bareiss(M)
def _det_berkowitz(M):
""" Use the Berkowitz algorithm to compute the determinant."""
if not M.is_square:
raise NonSquareMatrixError()
if M.rows == 0:
return M.one
# sympy/matrices/tests/test_matrices.py contains a test that
# suggests that the determinant of a 0 x 0 matrix is one, by
# convention.
berk_vector = _berkowitz_vector(M)
return (-1)**(len(berk_vector) - 1) * berk_vector[-1]
# This functions is a candidate for caching if it gets implemented for matrices.
def _det_LU(M, iszerofunc=_iszero, simpfunc=None):
""" Computes the determinant of a matrix from its LU decomposition.
This function uses the LU decomposition computed by
LUDecomposition_Simple().
The keyword arguments iszerofunc and simpfunc are passed to
LUDecomposition_Simple().
iszerofunc is a callable that returns a boolean indicating if its
input is zero, or None if it cannot make the determination.
simpfunc is a callable that simplifies its input.
The default is simpfunc=None, which indicate that the pivot search
algorithm should not attempt to simplify any candidate pivots.
If simpfunc fails to simplify its input, then it must return its input
instead of a copy.
Parameters
==========
iszerofunc : function, optional
The function to use to determine zeros when doing an LU decomposition.
Defaults to ``lambda x: x.is_zero``.
simpfunc : function, optional
The simplification function to use when looking for zeros for pivots.
"""
if not M.is_square:
raise NonSquareMatrixError()
if M.rows == 0:
return M.one
# sympy/matrices/tests/test_matrices.py contains a test that
# suggests that the determinant of a 0 x 0 matrix is one, by
# convention.
lu, row_swaps = M.LUdecomposition_Simple(iszerofunc=iszerofunc,
simpfunc=simpfunc)
# P*A = L*U => det(A) = det(L)*det(U)/det(P) = det(P)*det(U).
# Lower triangular factor L encoded in lu has unit diagonal => det(L) = 1.
# P is a permutation matrix => det(P) in {-1, 1} => 1/det(P) = det(P).
# LUdecomposition_Simple() returns a list of row exchange index pairs, rather
# than a permutation matrix, but det(P) = (-1)**len(row_swaps).
# Avoid forming the potentially time consuming product of U's diagonal entries
# if the product is zero.
# Bottom right entry of U is 0 => det(A) = 0.
# It may be impossible to determine if this entry of U is zero when it is symbolic.
if iszerofunc(lu[lu.rows-1, lu.rows-1]):
return M.zero
# Compute det(P)
det = -M.one if len(row_swaps)%2 else M.one
# Compute det(U) by calculating the product of U's diagonal entries.
# The upper triangular portion of lu is the upper triangular portion of the
# U factor in the LU decomposition.
for k in range(lu.rows):
det *= lu[k, k]
# return det(P)*det(U)
return det
def _minor(M, i, j, method="berkowitz"):
"""Return the (i,j) minor of ``M``. That is,
return the determinant of the matrix obtained by deleting
the `i`th row and `j`th column from ``M``.
Parameters
==========
i, j : int
The row and column to exclude to obtain the submatrix.
method : string, optional
Method to use to find the determinant of the submatrix, can be
"bareiss", "berkowitz" or "lu".
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> M.minor(1, 1)
-12
See Also
========
minor_submatrix
cofactor
det
"""
if not M.is_square:
raise NonSquareMatrixError()
return M.minor_submatrix(i, j).det(method=method)
def _minor_submatrix(M, i, j):
"""Return the submatrix obtained by removing the `i`th row
and `j`th column from ``M`` (works with Pythonic negative indices).
Parameters
==========
i, j : int
The row and column to exclude to obtain the submatrix.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> M.minor_submatrix(1, 1)
Matrix([
[1, 3],
[7, 9]])
See Also
========
minor
cofactor
"""
if i < 0:
i += M.rows
if j < 0:
j += M.cols
if not 0 <= i < M.rows or not 0 <= j < M.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < ``M.rows`` "
"(%d)" % M.rows + "and 0 <= j < ``M.cols`` (%d)." % M.cols)
rows = [a for a in range(M.rows) if a != i]
cols = [a for a in range(M.cols) if a != j]
return M.extract(rows, cols)
|
a460d0dfe886f684cc108b82c2b15ed17e65c64bd9485fa2333a1df334efd4e0 | from sympy.core.numbers import mod_inverse
from .common import MatrixError, NonSquareMatrixError, NonInvertibleMatrixError
from .utilities import _iszero
def _pinv_full_rank(M):
"""Subroutine for full row or column rank matrices.
For full row rank matrices, inverse of ``A * A.H`` Exists.
For full column rank matrices, inverse of ``A.H * A`` Exists.
This routine can apply for both cases by checking the shape
and have small decision.
"""
if M.is_zero_matrix:
return M.H
if M.rows >= M.cols:
return M.H.multiply(M).inv().multiply(M.H)
else:
return M.H.multiply(M.multiply(M.H).inv())
def _pinv_rank_decomposition(M):
"""Subroutine for rank decomposition
With rank decompositions, `A` can be decomposed into two full-
rank matrices, and each matrix can take pseudoinverse
individually.
"""
if M.is_zero_matrix:
return M.H
B, C = M.rank_decomposition()
Bp = _pinv_full_rank(B)
Cp = _pinv_full_rank(C)
return Cp.multiply(Bp)
def _pinv_diagonalization(M):
"""Subroutine using diagonalization
This routine can sometimes fail if SymPy's eigenvalue
computation is not reliable.
"""
if M.is_zero_matrix:
return M.H
A = M
AH = M.H
try:
if M.rows >= M.cols:
P, D = AH.multiply(A).diagonalize(normalize=True)
D_pinv = D.applyfunc(lambda x: 0 if _iszero(x) else 1 / x)
return P.multiply(D_pinv).multiply(P.H).multiply(AH)
else:
P, D = A.multiply(AH).diagonalize(
normalize=True)
D_pinv = D.applyfunc(lambda x: 0 if _iszero(x) else 1 / x)
return AH.multiply(P).multiply(D_pinv).multiply(P.H)
except MatrixError:
raise NotImplementedError(
'pinv for rank-deficient matrices where '
'diagonalization of A.H*A fails is not supported yet.')
def _pinv(M, method='RD'):
"""Calculate the Moore-Penrose pseudoinverse of the matrix.
The Moore-Penrose pseudoinverse exists and is unique for any matrix.
If the matrix is invertible, the pseudoinverse is the same as the
inverse.
Parameters
==========
method : String, optional
Specifies the method for computing the pseudoinverse.
If ``'RD'``, Rank-Decomposition will be used.
If ``'ED'``, Diagonalization will be used.
Examples
========
Computing pseudoinverse by rank decomposition :
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A.pinv()
Matrix([
[-17/18, 4/9],
[ -1/9, 1/9],
[ 13/18, -2/9]])
Computing pseudoinverse by diagonalization :
>>> B = A.pinv(method='ED')
>>> B.simplify()
>>> B
Matrix([
[-17/18, 4/9],
[ -1/9, 1/9],
[ 13/18, -2/9]])
See Also
========
inv
pinv_solve
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse
"""
# Trivial case: pseudoinverse of all-zero matrix is its transpose.
if M.is_zero_matrix:
return M.H
if method == 'RD':
return _pinv_rank_decomposition(M)
elif method == 'ED':
return _pinv_diagonalization(M)
else:
raise ValueError('invalid pinv method %s' % repr(method))
def _inv_mod(M, m):
r"""
Returns the inverse of the matrix `K` (mod `m`), if it exists.
Method to find the matrix inverse of `K` (mod `m`) implemented in this function:
* Compute `\mathrm{adj}(K) = \mathrm{cof}(K)^t`, the adjoint matrix of `K`.
* Compute `r = 1/\mathrm{det}(K) \pmod m`.
* `K^{-1} = r\cdot \mathrm{adj}(K) \pmod m`.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.inv_mod(5)
Matrix([
[3, 1],
[4, 2]])
>>> A.inv_mod(3)
Matrix([
[1, 1],
[0, 1]])
"""
if not M.is_square:
raise NonSquareMatrixError()
N = M.cols
det_K = M.det()
det_inv = None
try:
det_inv = mod_inverse(det_K, m)
except ValueError:
raise NonInvertibleMatrixError('Matrix is not invertible (mod %d)' % m)
K_adj = M.adjugate()
K_inv = M.__class__(N, N,
[det_inv * K_adj[i, j] % m for i in range(N) for j in range(N)])
return K_inv
def _verify_invertible(M, iszerofunc=_iszero):
"""Initial check to see if a matrix is invertible. Raises or returns
determinant for use in _inv_ADJ."""
if not M.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
d = M.det(method='berkowitz')
zero = d.equals(0)
if zero is None: # if equals() can't decide, will rref be able to?
ok = M.rref(simplify=True)[0]
zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
if zero:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")
return d
def _inv_ADJ(M, iszerofunc=_iszero):
"""Calculates the inverse using the adjugate matrix and a determinant.
See Also
========
inv
inverse_GE
inverse_LU
inverse_CH
inverse_LDL
"""
d = _verify_invertible(M, iszerofunc=iszerofunc)
return M.adjugate() / d
def _inv_GE(M, iszerofunc=_iszero):
"""Calculates the inverse using Gaussian elimination.
See Also
========
inv
inverse_ADJ
inverse_LU
inverse_CH
inverse_LDL
"""
from .dense import Matrix
if not M.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
big = Matrix.hstack(M.as_mutable(), Matrix.eye(M.rows))
red = big.rref(iszerofunc=iszerofunc, simplify=True)[0]
if any(iszerofunc(red[j, j]) for j in range(red.rows)):
raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")
return M._new(red[:, big.rows:])
def _inv_LU(M, iszerofunc=_iszero):
"""Calculates the inverse using LU decomposition.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_CH
inverse_LDL
"""
if not M.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
if M.free_symbols:
_verify_invertible(M, iszerofunc=iszerofunc)
return M.LUsolve(M.eye(M.rows), iszerofunc=_iszero)
def _inv_CH(M, iszerofunc=_iszero):
"""Calculates the inverse using cholesky decomposition.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_LU
inverse_LDL
"""
_verify_invertible(M, iszerofunc=iszerofunc)
return M.cholesky_solve(M.eye(M.rows))
def _inv_LDL(M, iszerofunc=_iszero):
"""Calculates the inverse using LDL decomposition.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_LU
inverse_CH
"""
_verify_invertible(M, iszerofunc=iszerofunc)
return M.LDLsolve(M.eye(M.rows))
def _inv_QR(M, iszerofunc=_iszero):
"""Calculates the inverse using QR decomposition.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_CH
inverse_LDL
"""
_verify_invertible(M, iszerofunc=iszerofunc)
return M.QRsolve(M.eye(M.rows))
def _inv_block(M, iszerofunc=_iszero):
"""Calculates the inverse using BLOCKWISE inversion.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_CH
inverse_LDL
"""
from sympy.matrices.expressions.blockmatrix import BlockMatrix
i = M.shape[0]
if i <= 20 :
return M.inv(method="LU", iszerofunc=_iszero)
A = M[:i // 2, :i //2]
B = M[:i // 2, i // 2:]
C = M[i // 2:, :i // 2]
D = M[i // 2:, i // 2:]
try:
D_inv = _inv_block(D)
except NonInvertibleMatrixError:
return M.inv(method="LU", iszerofunc=_iszero)
B_D_i = B*D_inv
BDC = B_D_i*C
A_n = A - BDC
try:
A_n = _inv_block(A_n)
except NonInvertibleMatrixError:
return M.inv(method="LU", iszerofunc=_iszero)
B_n = -A_n*B_D_i
dc = D_inv*C
C_n = -dc*A_n
D_n = D_inv + dc*-B_n
nn = BlockMatrix([[A_n, B_n], [C_n, D_n]]).as_explicit()
return nn
def _inv(M, method=None, iszerofunc=_iszero, try_block_diag=False):
"""
Return the inverse of a matrix using the method indicated. Default for
dense matrices is is Gauss elimination, default for sparse matrices is LDL.
Parameters
==========
method : ('GE', 'LU', 'ADJ', 'CH', 'LDL')
iszerofunc : function, optional
Zero-testing function to use.
try_block_diag : bool, optional
If True then will try to form block diagonal matrices using the
method get_diag_blocks(), invert these individually, and then
reconstruct the full inverse matrix.
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix([
... [ 2, -1, 0],
... [-1, 2, -1],
... [ 0, 0, 2]])
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv(method='LDL') # use of 'method=' is optional
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A * _
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> A = Matrix(A)
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv('ADJ') == A.inv('GE') == A.inv('LU') == A.inv('CH') == A.inv('LDL') == A.inv('QR')
True
Notes
=====
According to the ``method`` keyword, it calls the appropriate method:
GE .... inverse_GE(); default for dense matrices
LU .... inverse_LU()
ADJ ... inverse_ADJ()
CH ... inverse_CH()
LDL ... inverse_LDL(); default for sparse matrices
QR ... inverse_QR()
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the ``iszerofunc`` argument to a function that
should return True if its argument is zero. The ADJ routine computes
the determinant and uses that to detect singular matrices in addition
to testing for zeros on the diagonal.
See Also
========
inverse_ADJ
inverse_GE
inverse_LU
inverse_CH
inverse_LDL
Raises
======
ValueError
If the determinant of the matrix is zero.
"""
from sympy.matrices import diag, SparseMatrix
if method is None:
method = 'LDL' if isinstance(M, SparseMatrix) else 'GE'
if try_block_diag:
blocks = M.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return diag(*r)
if method == "GE":
rv = M.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
rv = M.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
rv = M.inverse_ADJ(iszerofunc=iszerofunc)
elif method == "CH":
rv = M.inverse_CH(iszerofunc=iszerofunc)
elif method == "LDL":
rv = M.inverse_LDL(iszerofunc=iszerofunc)
elif method == "QR":
rv = M.inverse_QR(iszerofunc=iszerofunc)
elif method == "BLOCK":
rv = M.inverse_BLOCK(iszerofunc=iszerofunc)
else:
raise ValueError("Inversion method unrecognized")
return M._new(rv)
|
acc62b76c208221ba6764de56b8979e94ef330676383c378d35a8a5d417bd108 | """
Fundamental operations of dense matrices.
The dense matrix is stored as a list of lists
"""
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
feature="densetools",
issue=12695,
deprecated_since_version="1.1").warn()
def trace(matlist, K):
"""
Returns the trace of a matrix.
Examples
========
>>> from sympy.matrices.densetools import trace, eye
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> b = eye(4, ZZ)
>>> trace(a, ZZ)
10
>>> trace(b, ZZ)
4
"""
result = K.zero
for i in range(len(matlist)):
result += matlist[i][i]
return result
def transpose(matlist, K):
"""
Returns the transpose of a matrix
Examples
========
>>> from sympy.matrices.densetools import transpose
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> transpose(a, ZZ)
[[3, 2, 6], [7, 4, 2], [4, 5, 3]]
"""
return [list(a) for a in (zip(*matlist))]
def conjugate(matlist, K):
"""
Returns the conjugate of a matrix row-wise.
Examples
========
>>> from sympy.matrices.densetools import conjugate
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(2), ZZ(6)],
... [ZZ(7), ZZ(4), ZZ(2)],
... [ZZ(4), ZZ(5), ZZ(3)]]
>>> conjugate(a, ZZ)
[[3, 2, 6], [7, 4, 2], [4, 5, 3]]
See Also
========
conjugate_row
"""
return [conjugate_row(row, K) for row in matlist]
def conjugate_row(row, K):
"""
Returns the conjugate of a row element-wise
Examples
========
>>> from sympy.matrices.densetools import conjugate_row
>>> from sympy import ZZ
>>> a = [ZZ(3), ZZ(2), ZZ(6)]
>>> conjugate_row(a, ZZ)
[3, 2, 6]
"""
result = []
for r in row:
conj = getattr(r, 'conjugate', None)
if conj is not None:
conjrow = conj()
else:
conjrow = r
result.append(conjrow)
return result
def conjugate_transpose(matlist, K):
"""
Returns the conjugate-transpose of a matrix
Examples
========
>>> from sympy import ZZ
>>> from sympy.matrices.densetools import conjugate_transpose
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> conjugate_transpose(a, ZZ)
[[3, 2, 6], [7, 4, 2], [4, 5, 3]]
"""
return conjugate(transpose(matlist, K), K)
def augment(matlist, column, K):
"""
Augments a matrix and a column.
Examples
========
>>> from sympy.matrices.densetools import augment
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> b = [
... [ZZ(4)],
... [ZZ(5)],
... [ZZ(6)]]
>>> augment(a, b, ZZ)
[[3, 7, 4, 4], [2, 4, 5, 5], [6, 2, 3, 6]]
"""
return [row + element for row, element in zip(matlist, column)]
def eye(n, K):
"""
Returns an identity matrix of size n.
Examples
========
>>> from sympy.matrices.densetools import eye
>>> from sympy import ZZ
>>> eye(3, ZZ)
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
"""
result = []
for i in range(n):
result.append([])
for j in range(n):
if (i == j):
result[i].append(K(1))
else:
result[i].append(K.zero)
return result
def row(matlist, i):
"""
Returns the ith row of a matrix
Examples
========
>>> from sympy.matrices.densetools import row
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> row(a, 2)
[6, 2, 3]
"""
return matlist[i]
def col(matlist, i):
"""
Returns the ith column of a matrix
Note: Currently very expensive
Examples
========
>>> from sympy.matrices.densetools import col
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> col(a, 1)
[[7], [4], [2]]
"""
matcol = [list(l) for l in zip(*matlist)]
return [[l] for l in matcol[i]]
def rowswap(matlist, index1, index2, K):
"""
Returns the matrix with index1 row and index2 row swapped
"""
matlist[index1], matlist[index2] = matlist[index2], matlist[index1]
return matlist
def rowmul(matlist, index, k, K):
"""
Multiplies index row with k
"""
for i in range(len(matlist[index])):
matlist[index][i] = k*matlist[index][i]
return matlist
def rowadd(matlist, index1, index2, k, K):
"""
Adds the index1 row with index2 row which in turn is multiplied by k
"""
for i in range(len(matlist[index1])):
matlist[index1][i] = (matlist[index1][i] + k*matlist[index2][i])
return matlist
def isHermitian(matlist, K):
"""
Checks whether matrix is hermitian
Examples
========
>>> from sympy.matrices.densetools import isHermitian
>>> from sympy import QQ
>>> a = [
... [QQ(2,1), QQ(-1,1), QQ(-1,1)],
... [QQ(0,1), QQ(4,1), QQ(-1,1)],
... [QQ(0,1), QQ(0,1), QQ(3,1)]]
>>> isHermitian(a, QQ)
False
"""
return conjugate_transpose(matlist, K) == matlist
|
79d54e854c2066eb8c274a8338250433f72b0fa8dcc5f2759973ec03475c8c7d | from mpmath.matrices.matrices import _matrix
from sympy.core import Basic, Dict, Tuple
from sympy.core.numbers import Integer
from sympy.core.cache import cacheit
from sympy.core.sympify import converter as sympify_converter, _sympify
from sympy.matrices.dense import DenseMatrix
from sympy.matrices.expressions import MatrixExpr
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.repmatrix import RepMatrix
from sympy.matrices.sparse import SparseRepMatrix
from sympy.multipledispatch import dispatch
def sympify_matrix(arg):
return arg.as_immutable()
sympify_converter[MatrixBase] = sympify_matrix
def sympify_mpmath_matrix(arg):
mat = [_sympify(x) for x in arg]
return ImmutableDenseMatrix(arg.rows, arg.cols, mat)
sympify_converter[_matrix] = sympify_mpmath_matrix
class ImmutableRepMatrix(RepMatrix, MatrixExpr): # type: ignore
"""Immutable matrix based on RepMatrix
Uses DomainMAtrix as the internal representation.
"""
#
# This is a subclass of RepMatrix that adds/overrides some methods to make
# the instances Basic and immutable. ImmutableRepMatrix is a superclass for
# both ImmutableDenseMatrix and ImmutableSparseMatrix.
#
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
__hash__ = MatrixExpr.__hash__
def copy(self):
return self
@property
def cols(self):
return self._cols
@property
def rows(self):
return self._rows
@property
def shape(self):
return self._rows, self._cols
def as_immutable(self):
return self
def _entry(self, i, j, **kwargs):
return self[i, j]
def __setitem__(self, *args):
raise TypeError("Cannot set values of {}".format(self.__class__))
def is_diagonalizable(self, reals_only=False, **kwargs):
return super().is_diagonalizable(
reals_only=reals_only, **kwargs)
is_diagonalizable.__doc__ = SparseRepMatrix.is_diagonalizable.__doc__
is_diagonalizable = cacheit(is_diagonalizable)
class ImmutableDenseMatrix(DenseMatrix, ImmutableRepMatrix): # type: ignore
"""Create an immutable version of a matrix.
Examples
========
>>> from sympy import eye
>>> from sympy.matrices import ImmutableMatrix
>>> ImmutableMatrix(eye(3))
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> _[0, 0] = 42
Traceback (most recent call last):
...
TypeError: Cannot set values of ImmutableDenseMatrix
"""
# MatrixExpr is set as NotIterable, but we want explicit matrices to be
# iterable
_iterable = True
_class_priority = 8
_op_priority = 10.001
@classmethod
def _new(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], ImmutableDenseMatrix):
return args[0]
if kwargs.get('copy', True) is False:
if len(args) != 3:
raise TypeError("'copy=False' requires a matrix be initialized as rows,cols,[list]")
rows, cols, flat_list = args
else:
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
flat_list = list(flat_list) # create a shallow copy
rep = cls._flat_list_to_DomainMatrix(rows, cols, flat_list)
return cls._fromrep(rep)
@classmethod
def _fromrep(cls, rep):
rows, cols = rep.shape
flat_list = rep.to_sympy().to_list_flat()
obj = Basic.__new__(cls,
Integer(rows),
Integer(cols),
Tuple(*flat_list, sympify=False))
obj._rows = rows
obj._cols = cols
obj._rep = rep
return obj
# make sure ImmutableDenseMatrix is aliased as ImmutableMatrix
ImmutableMatrix = ImmutableDenseMatrix
class ImmutableSparseMatrix(SparseRepMatrix, ImmutableRepMatrix): # type:ignore
"""Create an immutable version of a sparse matrix.
Examples
========
>>> from sympy import eye
>>> from sympy.matrices.immutable import ImmutableSparseMatrix
>>> ImmutableSparseMatrix(1, 1, {})
Matrix([[0]])
>>> ImmutableSparseMatrix(eye(3))
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> _[0, 0] = 42
Traceback (most recent call last):
...
TypeError: Cannot set values of ImmutableSparseMatrix
>>> _.shape
(3, 3)
"""
is_Matrix = True
_class_priority = 9
@classmethod
def _new(cls, *args, **kwargs):
rows, cols, smat = cls._handle_creation_inputs(*args, **kwargs)
rep = cls._smat_to_DomainMatrix(rows, cols, smat)
return cls._fromrep(rep)
@classmethod
def _fromrep(cls, rep):
rows, cols = rep.shape
smat = rep.to_sympy().to_dok()
obj = Basic.__new__(cls, Integer(rows), Integer(cols), Dict(smat))
obj._rows = rows
obj._cols = cols
obj._rep = rep
return obj
@dispatch(ImmutableDenseMatrix, ImmutableDenseMatrix)
def _eval_is_eq(lhs, rhs): # noqa:F811
"""Helper method for Equality with matrices.sympy.
Relational automatically converts matrices to ImmutableDenseMatrix
instances, so this method only applies here. Returns True if the
matrices are definitively the same, False if they are definitively
different, and None if undetermined (e.g. if they contain Symbols).
Returning None triggers default handling of Equalities.
"""
if lhs.shape != rhs.shape:
return False
return (lhs - rhs).is_zero_matrix
|
9755dd9c152fee98c64a9f5a12acb90ad6e3f3ace68f82c75f52b5cfccd0a07a | """
Basic methods common to all matrices to be used
when creating more advanced matrices (e.g., matrices over rings,
etc.).
"""
from collections import defaultdict
from collections.abc import Iterable
from inspect import isfunction
from functools import reduce
from sympy.assumptions.refine import refine
from sympy.core import SympifyError, Add
from sympy.core.basic import Atom
from sympy.core.decorators import call_highest_priority
from sympy.core.kind import Kind, NumberKind
from sympy.core.logic import fuzzy_and, FuzzyBool
from sympy.core.mod import Mod
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.functions import Abs
from sympy.polys.polytools import Poly
from sympy.simplify import simplify as _simplify
from sympy.simplify.simplify import dotprodsimp as _dotprodsimp
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import flatten, is_sequence
from sympy.utilities.misc import as_int, filldedent
from sympy.tensor.array import NDimArray
from .utilities import _get_intermediate_simp_bool
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
"""Wrong matrix shape"""
pass
class NonSquareMatrixError(ShapeError):
pass
class NonInvertibleMatrixError(ValueError, MatrixError):
"""The matrix in not invertible (division by multidimensional zero error)."""
pass
class NonPositiveDefiniteMatrixError(ValueError, MatrixError):
"""The matrix is not a positive-definite matrix."""
pass
class MatrixRequired:
"""All subclasses of matrix objects must implement the
required matrix properties listed here."""
rows = None # type: int
cols = None # type: int
_simplify = None
@classmethod
def _new(cls, *args, **kwargs):
"""`_new` must, at minimum, be callable as
`_new(rows, cols, mat) where mat is a flat list of the
elements of the matrix."""
raise NotImplementedError("Subclasses must implement this.")
def __eq__(self, other):
raise NotImplementedError("Subclasses must implement this.")
def __getitem__(self, key):
"""Implementations of __getitem__ should accept ints, in which
case the matrix is indexed as a flat list, tuples (i,j) in which
case the (i,j) entry is returned, slices, or mixed tuples (a,b)
where a and b are any combination of slices and integers."""
raise NotImplementedError("Subclasses must implement this.")
def __len__(self):
"""The total number of entries in the matrix."""
raise NotImplementedError("Subclasses must implement this.")
@property
def shape(self):
raise NotImplementedError("Subclasses must implement this.")
class MatrixShaping(MatrixRequired):
"""Provides basic matrix shaping and extracting of submatrices"""
def _eval_col_del(self, col):
def entry(i, j):
return self[i, j] if j < col else self[i, j + 1]
return self._new(self.rows, self.cols - 1, entry)
def _eval_col_insert(self, pos, other):
def entry(i, j):
if j < pos:
return self[i, j]
elif pos <= j < pos + other.cols:
return other[i, j - pos]
return self[i, j - other.cols]
return self._new(self.rows, self.cols + other.cols, entry)
def _eval_col_join(self, other):
rows = self.rows
def entry(i, j):
if i < rows:
return self[i, j]
return other[i - rows, j]
return classof(self, other)._new(self.rows + other.rows, self.cols,
entry)
def _eval_extract(self, rowsList, colsList):
mat = list(self)
cols = self.cols
indices = (i * cols + j for i in rowsList for j in colsList)
return self._new(len(rowsList), len(colsList),
list(mat[i] for i in indices))
def _eval_get_diag_blocks(self):
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def _eval_row_del(self, row):
def entry(i, j):
return self[i, j] if i < row else self[i + 1, j]
return self._new(self.rows - 1, self.cols, entry)
def _eval_row_insert(self, pos, other):
entries = list(self)
insert_pos = pos * self.cols
entries[insert_pos:insert_pos] = list(other)
return self._new(self.rows + other.rows, self.cols, entries)
def _eval_row_join(self, other):
cols = self.cols
def entry(i, j):
if j < cols:
return self[i, j]
return other[i, j - cols]
return classof(self, other)._new(self.rows, self.cols + other.cols,
entry)
def _eval_tolist(self):
return [list(self[i,:]) for i in range(self.rows)]
def _eval_todok(self):
dok = {}
rows, cols = self.shape
for i in range(rows):
for j in range(cols):
val = self[i, j]
if val != self.zero:
dok[i, j] = val
return dok
def _eval_vec(self):
rows = self.rows
def entry(n, _):
# we want to read off the columns first
j = n // rows
i = n - j * rows
return self[i, j]
return self._new(len(self), 1, entry)
def _eval_vech(self, diagonal):
c = self.cols
v = []
if diagonal:
for j in range(c):
for i in range(j, c):
v.append(self[i, j])
else:
for j in range(c):
for i in range(j + 1, c):
v.append(self[i, j])
return self._new(len(v), 1, v)
def col_del(self, col):
"""Delete the specified column."""
if col < 0:
col += self.cols
if not 0 <= col < self.cols:
raise IndexError("Column {} is out of range.".format(col))
return self._eval_col_del(col)
def col_insert(self, pos, other):
"""Insert one or more columns at the given column position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.col_insert(1, V)
Matrix([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]])
See Also
========
col
row_insert
"""
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(other)
pos = as_int(pos)
if pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != other.rows:
raise ShapeError(
"`self` and `other` must have the same number of rows.")
return self._eval_col_insert(pos, other)
def col_join(self, other):
"""Concatenates two matrices along self's last and other's first row.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.col_join(V)
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 1, 1]])
See Also
========
col
row_join
"""
# A null matrix can always be stacked (see #10770)
if self.rows == 0 and self.cols != other.cols:
return self._new(0, other.cols, []).col_join(other)
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_col_join(other)
def col(self, j):
"""Elementary column selector.
Examples
========
>>> from sympy import eye
>>> eye(2).col(0)
Matrix([
[1],
[0]])
See Also
========
row
col_del
col_join
col_insert
"""
return self[:, j]
def extract(self, rowsList, colsList):
"""Return a submatrix by specifying a list of rows and columns.
Negative indices can be given. All indices must be in the range
-n <= i < n where n is the number of rows or columns.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 3, range(12))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
>>> m.extract([0, 1, 3], [0, 1])
Matrix([
[0, 1],
[3, 4],
[9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1])
Matrix([
[2],
[2],
[5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1])
Matrix([
[2],
[8]])
RowsList or colsList can also be a list of booleans, in which case
the rows or columns corresponding to the True values will be selected:
>>> m.extract([0, 1, 2, 3], [True, False, True])
Matrix([
[0, 2],
[3, 5],
[6, 8],
[9, 11]])
"""
if not is_sequence(rowsList) or not is_sequence(colsList):
raise TypeError("rowsList and colsList must be iterable")
# ensure rowsList and colsList are lists of integers
if rowsList and all(isinstance(i, bool) for i in rowsList):
rowsList = [index for index, item in enumerate(rowsList) if item]
if colsList and all(isinstance(i, bool) for i in colsList):
colsList = [index for index, item in enumerate(colsList) if item]
# ensure everything is in range
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._eval_extract(rowsList, colsList)
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
return self._eval_get_diag_blocks()
@classmethod
def hstack(cls, *args):
"""Return a matrix formed by joining args horizontally (i.e.
by repeated application of row_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.hstack(eye(2), 2*eye(2))
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]])
"""
if len(args) == 0:
return cls._new()
kls = type(args[0])
return reduce(kls.row_join, args)
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if self.rows * self.cols != rows * cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self[i * cols + j])
def row_del(self, row):
"""Delete the specified row."""
if row < 0:
row += self.rows
if not 0 <= row < self.rows:
raise IndexError("Row {} is out of range.".format(row))
return self._eval_row_del(row)
def row_insert(self, pos, other):
"""Insert one or more rows at the given row position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.row_insert(1, V)
Matrix([
[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
See Also
========
row
col_insert
"""
# Allows you to build a matrix even if it is null matrix
if not self:
return self._new(other)
pos = as_int(pos)
if pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_row_insert(pos, other)
def row_join(self, other):
"""Concatenates two matrices along self's last and rhs's first column
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.row_join(V)
Matrix([
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]])
See Also
========
row
col_join
"""
# A null matrix can always be stacked (see #10770)
if self.cols == 0 and self.rows != other.rows:
return self._new(other.rows, 0, []).row_join(other)
if self.rows != other.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
return self._eval_row_join(other)
def diagonal(self, k=0):
"""Returns the kth diagonal of self. The main diagonal
corresponds to `k=0`; diagonals above and below correspond to
`k > 0` and `k < 0`, respectively. The values of `self[i, j]`
for which `j - i = k`, are returned in order of increasing
`i + j`, starting with `i + j = |k|`.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, lambda i, j: j - i); m
Matrix([
[ 0, 1, 2],
[-1, 0, 1],
[-2, -1, 0]])
>>> _.diagonal()
Matrix([[0, 0, 0]])
>>> m.diagonal(1)
Matrix([[1, 1]])
>>> m.diagonal(-2)
Matrix([[-2]])
Even though the diagonal is returned as a Matrix, the element
retrieval can be done with a single index:
>>> Matrix.diag(1, 2, 3).diagonal()[1] # instead of [0, 1]
2
See Also
========
diag - to create a diagonal matrix
"""
rv = []
k = as_int(k)
r = 0 if k > 0 else -k
c = 0 if r else k
while True:
if r == self.rows or c == self.cols:
break
rv.append(self[r, c])
r += 1
c += 1
if not rv:
raise ValueError(filldedent('''
The %s diagonal is out of range [%s, %s]''' % (
k, 1 - self.rows, self.cols - 1)))
return self._new(1, len(rv), rv)
def row(self, i):
"""Elementary row selector.
Examples
========
>>> from sympy import eye
>>> eye(2).row(0)
Matrix([[1, 0]])
See Also
========
col
row_del
row_join
row_insert
"""
return self[i, :]
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy.matrices import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def todok(self):
"""Return the matrix as dictionary of keys.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix.eye(3)
>>> M.todok()
{(0, 0): 1, (1, 1): 1, (2, 2): 1}
"""
return self._eval_todok()
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> from sympy import Matrix, ones
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return self._eval_tolist()
def todod(M):
"""Returns matrix as dict of dicts containing non-zero elements of the Matrix
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[0, 1],[0, 3]])
>>> A
Matrix([
[0, 1],
[0, 3]])
>>> A.todod()
{0: {1: 1}, 1: {1: 3}}
"""
rowsdict = {}
Mlol = M.tolist()
for i, Mi in enumerate(Mlol):
row = {j: Mij for j, Mij in enumerate(Mi) if Mij}
if row:
rowsdict[i] = row
return rowsdict
def vec(self):
"""Return the Matrix converted into a one column matrix by stacking columns
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 3], [2, 4]])
>>> m
Matrix([
[1, 3],
[2, 4]])
>>> m.vec()
Matrix([
[1],
[2],
[3],
[4]])
See Also
========
vech
"""
return self._eval_vec()
def vech(self, diagonal=True, check_symmetry=True):
"""Reshapes the matrix into a column vector by stacking the
elements in the lower triangle.
Parameters
==========
diagonal : bool, optional
If ``True``, it includes the diagonal elements.
check_symmetry : bool, optional
If ``True``, it checks whether the matrix is symmetric.
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 2], [2, 3]])
>>> m
Matrix([
[1, 2],
[2, 3]])
>>> m.vech()
Matrix([
[1],
[2],
[3]])
>>> m.vech(diagonal=False)
Matrix([[2]])
Notes
=====
This should work for symmetric matrices and ``vech`` can
represent symmetric matrices in vector form with less size than
``vec``.
See Also
========
vec
"""
if not self.is_square:
raise NonSquareMatrixError
if check_symmetry and not self.is_symmetric():
raise ValueError("The matrix is not symmetric.")
return self._eval_vech(diagonal)
@classmethod
def vstack(cls, *args):
"""Return a matrix formed by joining args vertically (i.e.
by repeated application of col_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.vstack(eye(2), 2*eye(2))
Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]])
"""
if len(args) == 0:
return cls._new()
kls = type(args[0])
return reduce(kls.col_join, args)
class MatrixSpecial(MatrixRequired):
"""Construction of special matrices"""
@classmethod
def _eval_diag(cls, rows, cols, diag_dict):
"""diag_dict is a defaultdict containing
all the entries of the diagonal matrix."""
def entry(i, j):
return diag_dict[(i, j)]
return cls._new(rows, cols, entry)
@classmethod
def _eval_eye(cls, rows, cols):
vals = [cls.zero]*(rows*cols)
vals[::cols+1] = [cls.one]*min(rows, cols)
return cls._new(rows, cols, vals, copy=False)
@classmethod
def _eval_jordan_block(cls, rows, cols, eigenvalue, band='upper'):
if band == 'lower':
def entry(i, j):
if i == j:
return eigenvalue
elif j + 1 == i:
return cls.one
return cls.zero
else:
def entry(i, j):
if i == j:
return eigenvalue
elif i + 1 == j:
return cls.one
return cls.zero
return cls._new(rows, cols, entry)
@classmethod
def _eval_ones(cls, rows, cols):
def entry(i, j):
return cls.one
return cls._new(rows, cols, entry)
@classmethod
def _eval_zeros(cls, rows, cols):
return cls._new(rows, cols, [cls.zero]*(rows*cols), copy=False)
@classmethod
def _eval_wilkinson(cls, n):
def entry(i, j):
return cls.one if i + 1 == j else cls.zero
D = cls._new(2*n + 1, 2*n + 1, entry)
wminus = cls.diag([i for i in range(-n, n + 1)], unpack=True) + D + D.T
wplus = abs(cls.diag([i for i in range(-n, n + 1)], unpack=True)) + D + D.T
return wminus, wplus
@classmethod
def diag(kls, *args, strict=False, unpack=True, rows=None, cols=None, **kwargs):
"""Returns a matrix with the specified diagonal.
If matrices are passed, a block-diagonal matrix
is created (i.e. the "direct sum" of the matrices).
kwargs
======
rows : rows of the resulting matrix; computed if
not given.
cols : columns of the resulting matrix; computed if
not given.
cls : class for the resulting matrix
unpack : bool which, when True (default), unpacks a single
sequence rather than interpreting it as a Matrix.
strict : bool which, when False (default), allows Matrices to
have variable-length rows.
Examples
========
>>> from sympy.matrices import Matrix
>>> Matrix.diag(1, 2, 3)
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
The current default is to unpack a single sequence. If this is
not desired, set `unpack=False` and it will be interpreted as
a matrix.
>>> Matrix.diag([1, 2, 3]) == Matrix.diag(1, 2, 3)
True
When more than one element is passed, each is interpreted as
something to put on the diagonal. Lists are converted to
matrices. Filling of the diagonal always continues from
the bottom right hand corner of the previous item: this
will create a block-diagonal matrix whether the matrices
are square or not.
>>> col = [1, 2, 3]
>>> row = [[4, 5]]
>>> Matrix.diag(col, row)
Matrix([
[1, 0, 0],
[2, 0, 0],
[3, 0, 0],
[0, 4, 5]])
When `unpack` is False, elements within a list need not all be
of the same length. Setting `strict` to True would raise a
ValueError for the following:
>>> Matrix.diag([[1, 2, 3], [4, 5], [6]], unpack=False)
Matrix([
[1, 2, 3],
[4, 5, 0],
[6, 0, 0]])
The type of the returned matrix can be set with the ``cls``
keyword.
>>> from sympy.matrices import ImmutableMatrix
>>> from sympy.utilities.misc import func_name
>>> func_name(Matrix.diag(1, cls=ImmutableMatrix))
'ImmutableDenseMatrix'
A zero dimension matrix can be used to position the start of
the filling at the start of an arbitrary row or column:
>>> from sympy import ones
>>> r2 = ones(0, 2)
>>> Matrix.diag(r2, 1, 2)
Matrix([
[0, 0, 1, 0],
[0, 0, 0, 2]])
See Also
========
eye
diagonal - to extract a diagonal
.dense.diag
.expressions.blockmatrix.BlockMatrix
.sparsetools.banded - to create multi-diagonal matrices
"""
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.dense import Matrix
from sympy.matrices import SparseMatrix
klass = kwargs.get('cls', kls)
if unpack and len(args) == 1 and is_sequence(args[0]) and \
not isinstance(args[0], MatrixBase):
args = args[0]
# fill a default dict with the diagonal entries
diag_entries = defaultdict(int)
rmax = cmax = 0 # keep track of the biggest index seen
for m in args:
if isinstance(m, list):
if strict:
# if malformed, Matrix will raise an error
_ = Matrix(m)
r, c = _.shape
m = _.tolist()
else:
r, c, smat = SparseMatrix._handle_creation_inputs(m)
for (i, j), _ in smat.items():
diag_entries[(i + rmax, j + cmax)] = _
m = [] # to skip process below
elif hasattr(m, 'shape'): # a Matrix
# convert to list of lists
r, c = m.shape
m = m.tolist()
else: # in this case, we're a single value
diag_entries[(rmax, cmax)] = m
rmax += 1
cmax += 1
continue
# process list of lists
for i in range(len(m)):
for j, _ in enumerate(m[i]):
diag_entries[(i + rmax, j + cmax)] = _
rmax += r
cmax += c
if rows is None:
rows, cols = cols, rows
if rows is None:
rows, cols = rmax, cmax
else:
cols = rows if cols is None else cols
if rows < rmax or cols < cmax:
raise ValueError(filldedent('''
The constructed matrix is {} x {} but a size of {} x {}
was specified.'''.format(rmax, cmax, rows, cols)))
return klass._eval_diag(rows, cols, diag_entries)
@classmethod
def eye(kls, rows, cols=None, **kwargs):
"""Returns an identity matrix.
Args
====
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
if rows < 0 or cols < 0:
raise ValueError("Cannot create a {} x {} matrix. "
"Both dimensions must be positive".format(rows, cols))
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_eye(rows, cols)
@classmethod
def jordan_block(kls, size=None, eigenvalue=None, *, band='upper', **kwargs):
"""Returns a Jordan block
Parameters
==========
size : Integer, optional
Specifies the shape of the Jordan block matrix.
eigenvalue : Number or Symbol
Specifies the value for the main diagonal of the matrix.
.. note::
The keyword ``eigenval`` is also specified as an alias
of this keyword, but it is not recommended to use.
We may deprecate the alias in later release.
band : 'upper' or 'lower', optional
Specifies the position of the off-diagonal to put `1` s on.
cls : Matrix, optional
Specifies the matrix class of the output form.
If it is not specified, the class type where the method is
being executed on will be returned.
rows, cols : Integer, optional
Specifies the shape of the Jordan block matrix. See Notes
section for the details of how these key works.
.. note::
This feature will be deprecated in the future.
Returns
=======
Matrix
A Jordan block matrix.
Raises
======
ValueError
If insufficient arguments are given for matrix size
specification, or no eigenvalue is given.
Examples
========
Creating a default Jordan block:
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> Matrix.jordan_block(4, x)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
Creating an alternative Jordan block matrix where `1` is on
lower off-diagonal:
>>> Matrix.jordan_block(4, x, band='lower')
Matrix([
[x, 0, 0, 0],
[1, x, 0, 0],
[0, 1, x, 0],
[0, 0, 1, x]])
Creating a Jordan block with keyword arguments
>>> Matrix.jordan_block(size=4, eigenvalue=x)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
Notes
=====
.. note::
This feature will be deprecated in the future.
The keyword arguments ``size``, ``rows``, ``cols`` relates to
the Jordan block size specifications.
If you want to create a square Jordan block, specify either
one of the three arguments.
If you want to create a rectangular Jordan block, specify
``rows`` and ``cols`` individually.
+--------------------------------+---------------------+
| Arguments Given | Matrix Shape |
+----------+----------+----------+----------+----------+
| size | rows | cols | rows | cols |
+==========+==========+==========+==========+==========+
| size | Any | size | size |
+----------+----------+----------+----------+----------+
| | None | ValueError |
| +----------+----------+----------+----------+
| None | rows | None | rows | rows |
| +----------+----------+----------+----------+
| | None | cols | cols | cols |
+ +----------+----------+----------+----------+
| | rows | cols | rows | cols |
+----------+----------+----------+----------+----------+
References
==========
.. [1] https://en.wikipedia.org/wiki/Jordan_matrix
"""
if 'rows' in kwargs or 'cols' in kwargs:
SymPyDeprecationWarning(
feature="Keyword arguments 'rows' or 'cols'",
issue=16102,
useinstead="a more generic banded matrix constructor",
deprecated_since_version="1.4"
).warn()
klass = kwargs.pop('cls', kls)
rows = kwargs.pop('rows', None)
cols = kwargs.pop('cols', None)
eigenval = kwargs.get('eigenval', None)
if eigenvalue is None and eigenval is None:
raise ValueError("Must supply an eigenvalue")
elif eigenvalue != eigenval and None not in (eigenval, eigenvalue):
raise ValueError(
"Inconsistent values are given: 'eigenval'={}, "
"'eigenvalue'={}".format(eigenval, eigenvalue))
else:
if eigenval is not None:
eigenvalue = eigenval
if (size, rows, cols) == (None, None, None):
raise ValueError("Must supply a matrix size")
if size is not None:
rows, cols = size, size
elif rows is not None and cols is None:
cols = rows
elif cols is not None and rows is None:
rows = cols
rows, cols = as_int(rows), as_int(cols)
return klass._eval_jordan_block(rows, cols, eigenvalue, band)
@classmethod
def ones(kls, rows, cols=None, **kwargs):
"""Returns a matrix of ones.
Args
====
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_ones(rows, cols)
@classmethod
def zeros(kls, rows, cols=None, **kwargs):
"""Returns a matrix of zeros.
Args
====
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
if rows < 0 or cols < 0:
raise ValueError("Cannot create a {} x {} matrix. "
"Both dimensions must be positive".format(rows, cols))
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_zeros(rows, cols)
@classmethod
def companion(kls, poly):
"""Returns a companion matrix of a polynomial.
Examples
========
>>> from sympy import Matrix, Poly, Symbol, symbols
>>> x = Symbol('x')
>>> c0, c1, c2, c3, c4 = symbols('c0:5')
>>> p = Poly(c0 + c1*x + c2*x**2 + c3*x**3 + c4*x**4 + x**5, x)
>>> Matrix.companion(p)
Matrix([
[0, 0, 0, 0, -c0],
[1, 0, 0, 0, -c1],
[0, 1, 0, 0, -c2],
[0, 0, 1, 0, -c3],
[0, 0, 0, 1, -c4]])
"""
poly = kls._sympify(poly)
if not isinstance(poly, Poly):
raise ValueError("{} must be a Poly instance.".format(poly))
if not poly.is_monic:
raise ValueError("{} must be a monic polynomial.".format(poly))
if not poly.is_univariate:
raise ValueError(
"{} must be a univariate polynomial.".format(poly))
size = poly.degree()
if not size >= 1:
raise ValueError(
"{} must have degree not less than 1.".format(poly))
coeffs = poly.all_coeffs()
def entry(i, j):
if j == size - 1:
return -coeffs[-1 - i]
elif i == j + 1:
return kls.one
return kls.zero
return kls._new(size, size, entry)
@classmethod
def wilkinson(kls, n, **kwargs):
"""Returns two square Wilkinson Matrix of size 2*n + 1
$W_{2n + 1}^-, W_{2n + 1}^+ =$ Wilkinson(n)
Examples
========
>>> from sympy.matrices import Matrix
>>> wminus, wplus = Matrix.wilkinson(3)
>>> wminus
Matrix([
[-3, 1, 0, 0, 0, 0, 0],
[ 1, -2, 1, 0, 0, 0, 0],
[ 0, 1, -1, 1, 0, 0, 0],
[ 0, 0, 1, 0, 1, 0, 0],
[ 0, 0, 0, 1, 1, 1, 0],
[ 0, 0, 0, 0, 1, 2, 1],
[ 0, 0, 0, 0, 0, 1, 3]])
>>> wplus
Matrix([
[3, 1, 0, 0, 0, 0, 0],
[1, 2, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 2, 1],
[0, 0, 0, 0, 0, 1, 3]])
References
==========
.. [1] https://blogs.mathworks.com/cleve/2013/04/15/wilkinsons-matrices-2/
.. [2] J. H. Wilkinson, The Algebraic Eigenvalue Problem, Claredon Press, Oxford, 1965, 662 pp.
"""
klass = kwargs.get('cls', kls)
n = as_int(n)
return klass._eval_wilkinson(n)
class MatrixProperties(MatrixRequired):
"""Provides basic properties of a matrix."""
def _eval_atoms(self, *types):
result = set()
for i in self:
result.update(i.atoms(*types))
return result
def _eval_free_symbols(self):
return set().union(*(i.free_symbols for i in self if i))
def _eval_has(self, *patterns):
return any(a.has(*patterns) for a in self)
def _eval_is_anti_symmetric(self, simpfunc):
if not all(simpfunc(self[i, j] + self[j, i]).is_zero for i in range(self.rows) for j in range(self.cols)):
return False
return True
def _eval_is_diagonal(self):
for i in range(self.rows):
for j in range(self.cols):
if i != j and self[i, j]:
return False
return True
# _eval_is_hermitian is called by some general SymPy
# routines and has a different *args signature. Make
# sure the names don't clash by adding `_matrix_` in name.
def _eval_is_matrix_hermitian(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i].conjugate()))
return mat.is_zero_matrix
def _eval_is_Identity(self) -> FuzzyBool:
def dirac(i, j):
if i == j:
return 1
return 0
return all(self[i, j] == dirac(i, j)
for i in range(self.rows)
for j in range(self.cols))
def _eval_is_lower_hessenberg(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 2, self.cols))
def _eval_is_lower(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
def _eval_is_symbolic(self):
return self.has(Symbol)
def _eval_is_symmetric(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i]))
return mat.is_zero_matrix
def _eval_is_zero_matrix(self):
if any(i.is_zero == False for i in self):
return False
if any(i.is_zero is None for i in self):
return None
return True
def _eval_is_upper_hessenberg(self):
return all(self[i, j].is_zero
for i in range(2, self.rows)
for j in range(min(self.cols, (i - 1))))
def _eval_values(self):
return [i for i in self if not i.is_zero]
def _has_positive_diagonals(self):
diagonal_entries = (self[i, i] for i in range(self.rows))
return fuzzy_and(x.is_positive for x in diagonal_entries)
def _has_nonnegative_diagonals(self):
diagonal_entries = (self[i, i] for i in range(self.rows))
return fuzzy_and(x.is_nonnegative for x in diagonal_entries)
def atoms(self, *types):
"""Returns the atoms that form the current object.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import Matrix
>>> Matrix([[x]])
Matrix([[x]])
>>> _.atoms()
{x}
>>> Matrix([[x, y], [y, x]])
Matrix([
[x, y],
[y, x]])
>>> _.atoms()
{x, y}
"""
types = tuple(t if isinstance(t, type) else type(t) for t in types)
if not types:
types = (Atom,)
return self._eval_atoms(*types)
@property
def free_symbols(self):
"""Returns the free symbols within the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix([[x], [1]]).free_symbols
{x}
"""
return self._eval_free_symbols()
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import Matrix, SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = Matrix(((1, x), (0.2, 3)))
>>> B = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
>>> B.has(x)
True
>>> B.has(y)
False
>>> B.has(Float)
True
"""
return self._eval_has(*patterns)
def is_anti_symmetric(self, simplify=True):
"""Check if matrix M is an antisymmetric matrix,
that is, M is a square matrix with all M[i, j] == -M[j, i].
When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is
simplified before testing to see if it is zero. By default,
the SymPy simplify function is used. To use a custom function
set simplify to a function that accepts a single argument which
returns a simplified expression. To skip simplification, set
simplify to False but note that although this will be faster,
it may induce false negatives.
Examples
========
>>> from sympy import Matrix, symbols
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_anti_symmetric()
True
>>> x, y = symbols('x y')
>>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0])
>>> m
Matrix([
[ 0, 0, x],
[-y, 0, 0]])
>>> m.is_anti_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y,
... -(x + 1)**2, 0, x*y,
... -y, -x*y, 0])
Simplification of matrix elements is done by default so even
though two elements which should be equal and opposite wouldn't
pass an equality test, the matrix is still reported as
anti-symmetric:
>>> m[0, 1] == -m[1, 0]
False
>>> m.is_anti_symmetric()
True
If 'simplify=False' is used for the case when a Matrix is already
simplified, this will speed things up. Here, we see that without
simplification the matrix does not appear anti-symmetric:
>>> m.is_anti_symmetric(simplify=False)
False
But if the matrix were already expanded, then it would appear
anti-symmetric and simplification in the is_anti_symmetric routine
is not needed:
>>> m = m.expand()
>>> m.is_anti_symmetric(simplify=False)
True
"""
# accept custom simplification
simpfunc = simplify
if not isfunction(simplify):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_anti_symmetric(simpfunc)
def is_diagonal(self):
"""Check if matrix is diagonal,
that is matrix in which the entries outside the main diagonal are all zero.
Examples
========
>>> from sympy import Matrix, diag
>>> m = Matrix(2, 2, [1, 0, 0, 2])
>>> m
Matrix([
[1, 0],
[0, 2]])
>>> m.is_diagonal()
True
>>> m = Matrix(2, 2, [1, 1, 0, 2])
>>> m
Matrix([
[1, 1],
[0, 2]])
>>> m.is_diagonal()
False
>>> m = diag(1, 2, 3)
>>> m
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> m.is_diagonal()
True
See Also
========
is_lower
is_upper
sympy.matrices.matrices.MatrixEigen.is_diagonalizable
diagonalize
"""
return self._eval_is_diagonal()
@property
def is_weakly_diagonally_dominant(self):
r"""Tests if the matrix is row weakly diagonally dominant.
Explanation
===========
A $n, n$ matrix $A$ is row weakly diagonally dominant if
.. math::
\left|A_{i, i}\right| \ge \sum_{j = 0, j \neq i}^{n-1}
\left|A_{i, j}\right| \quad {\text{for all }}
i \in \{ 0, ..., n-1 \}
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]])
>>> A.is_weakly_diagonally_dominant
True
>>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]])
>>> A.is_weakly_diagonally_dominant
False
>>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]])
>>> A.is_weakly_diagonally_dominant
True
Notes
=====
If you want to test whether a matrix is column diagonally
dominant, you can apply the test after transposing the matrix.
"""
if not self.is_square:
return False
rows, cols = self.shape
def test_row(i):
summation = self.zero
for j in range(cols):
if i != j:
summation += Abs(self[i, j])
return (Abs(self[i, i]) - summation).is_nonnegative
return fuzzy_and(test_row(i) for i in range(rows))
@property
def is_strongly_diagonally_dominant(self):
r"""Tests if the matrix is row strongly diagonally dominant.
Explanation
===========
A $n, n$ matrix $A$ is row strongly diagonally dominant if
.. math::
\left|A_{i, i}\right| > \sum_{j = 0, j \neq i}^{n-1}
\left|A_{i, j}\right| \quad {\text{for all }}
i \in \{ 0, ..., n-1 \}
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]])
>>> A.is_strongly_diagonally_dominant
False
>>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]])
>>> A.is_strongly_diagonally_dominant
False
>>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]])
>>> A.is_strongly_diagonally_dominant
True
Notes
=====
If you want to test whether a matrix is column diagonally
dominant, you can apply the test after transposing the matrix.
"""
if not self.is_square:
return False
rows, cols = self.shape
def test_row(i):
summation = self.zero
for j in range(cols):
if i != j:
summation += Abs(self[i, j])
return (Abs(self[i, i]) - summation).is_positive
return fuzzy_and(test_row(i) for i in range(rows))
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = Matrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
if not self.is_square:
return False
return self._eval_is_matrix_hermitian(_simplify)
@property
def is_Identity(self) -> FuzzyBool:
if not self.is_square:
return False
return self._eval_is_Identity()
@property
def is_lower_hessenberg(self):
r"""Checks if the matrix is in the lower-Hessenberg form.
The lower hessenberg matrix has zero entries
above the first superdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]])
>>> a
Matrix([
[1, 2, 0, 0],
[5, 2, 3, 0],
[3, 4, 3, 7],
[5, 6, 1, 1]])
>>> a.is_lower_hessenberg
True
See Also
========
is_upper_hessenberg
is_lower
"""
return self._eval_is_lower_hessenberg()
@property
def is_lower(self):
"""Check if matrix is a lower triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_lower
True
>>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4, 0, 6, 6, 5])
>>> m
Matrix([
[0, 0, 0],
[2, 0, 0],
[1, 4, 0],
[6, 6, 5]])
>>> m.is_lower
True
>>> from sympy.abc import x, y
>>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y])
>>> m
Matrix([
[x**2 + y, x + y**2],
[ 0, x + y]])
>>> m.is_lower
False
See Also
========
is_upper
is_diagonal
is_lower_hessenberg
"""
return self._eval_is_lower()
@property
def is_square(self):
"""Checks if a matrix is square.
A matrix is square if the number of rows equals the number of columns.
The empty matrix is square by definition, since the number of rows and
the number of columns are both zero.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[1, 2, 3], [4, 5, 6]])
>>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> c = Matrix([])
>>> a.is_square
False
>>> b.is_square
True
>>> c.is_square
True
"""
return self.rows == self.cols
def is_symbolic(self):
"""Checks if any elements contain Symbols.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.is_symbolic()
True
"""
return self._eval_is_symbolic()
def is_symmetric(self, simplify=True):
"""Check if matrix is symmetric matrix,
that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry.
They can be skipped using 'simplify=False'; while speeding things a bit,
this may however induce false negatives.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [0, 1, 1, 2])
>>> m
Matrix([
[0, 1],
[1, 2]])
>>> m.is_symmetric()
True
>>> m = Matrix(2, 2, [0, 1, 2, 0])
>>> m
Matrix([
[0, 1],
[2, 0]])
>>> m.is_symmetric()
False
>>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0])
>>> m
Matrix([
[0, 0, 0],
[0, 0, 0]])
>>> m.is_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3])
>>> m
Matrix([
[ 1, x**2 + 2*x + 1, y],
[(x + 1)**2, 2, 0],
[ y, 0, 3]])
>>> m.is_symmetric()
True
If the matrix is already simplified, you may speed-up is_symmetric()
test by using 'simplify=False'.
>>> bool(m.is_symmetric(simplify=False))
False
>>> m1 = m.expand()
>>> m1.is_symmetric(simplify=False)
True
"""
simpfunc = simplify
if not isfunction(simplify):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_symmetric(simpfunc)
@property
def is_upper_hessenberg(self):
"""Checks if the matrix is the upper-Hessenberg form.
The upper hessenberg matrix has zero entries
below the first subdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]])
>>> a
Matrix([
[1, 4, 2, 3],
[3, 4, 1, 7],
[0, 2, 3, 4],
[0, 0, 1, 3]])
>>> a.is_upper_hessenberg
True
See Also
========
is_lower_hessenberg
is_upper
"""
return self._eval_is_upper_hessenberg()
@property
def is_upper(self):
"""Check if matrix is an upper triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_upper
True
>>> m = Matrix(4, 3, [5, 1, 9, 0, 4, 6, 0, 0, 5, 0, 0, 0])
>>> m
Matrix([
[5, 1, 9],
[0, 4, 6],
[0, 0, 5],
[0, 0, 0]])
>>> m.is_upper
True
>>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1])
>>> m
Matrix([
[4, 2, 5],
[6, 1, 1]])
>>> m.is_upper
False
See Also
========
is_lower
is_diagonal
is_upper_hessenberg
"""
return all(self[i, j].is_zero
for i in range(1, self.rows)
for j in range(min(i, self.cols)))
@property
def is_zero_matrix(self):
"""Checks if a matrix is a zero matrix.
A matrix is zero if every element is zero. A matrix need not be square
to be considered zero. The empty matrix is zero by the principle of
vacuous truth. For a matrix that may or may not be zero (e.g.
contains a symbol), this will be None
Examples
========
>>> from sympy import Matrix, zeros
>>> from sympy.abc import x
>>> a = Matrix([[0, 0], [0, 0]])
>>> b = zeros(3, 4)
>>> c = Matrix([[0, 1], [0, 0]])
>>> d = Matrix([])
>>> e = Matrix([[x, 0], [0, 0]])
>>> a.is_zero_matrix
True
>>> b.is_zero_matrix
True
>>> c.is_zero_matrix
False
>>> d.is_zero_matrix
True
>>> e.is_zero_matrix
"""
return self._eval_is_zero_matrix()
def values(self):
"""Return non-zero values of self."""
return self._eval_values()
class MatrixOperations(MatrixRequired):
"""Provides basic matrix shape and elementwise
operations. Should not be instantiated directly."""
def _eval_adjoint(self):
return self.transpose().conjugate()
def _eval_applyfunc(self, f):
out = self._new(self.rows, self.cols, [f(x) for x in self])
return out
def _eval_as_real_imag(self): # type: ignore
from sympy.functions.elementary.complexes import re, im
return (self.applyfunc(re), self.applyfunc(im))
def _eval_conjugate(self):
return self.applyfunc(lambda x: x.conjugate())
def _eval_permute_cols(self, perm):
# apply the permutation to a list
mapping = list(perm)
def entry(i, j):
return self[i, mapping[j]]
return self._new(self.rows, self.cols, entry)
def _eval_permute_rows(self, perm):
# apply the permutation to a list
mapping = list(perm)
def entry(i, j):
return self[mapping[i], j]
return self._new(self.rows, self.cols, entry)
def _eval_trace(self):
return sum(self[i, i] for i in range(self.rows))
def _eval_transpose(self):
return self._new(self.cols, self.rows, lambda i, j: self[j, i])
def adjoint(self):
"""Conjugate transpose or Hermitian conjugation."""
return self._eval_adjoint()
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
return self._eval_applyfunc(f)
def as_real_imag(self, deep=True, **hints):
"""Returns a tuple containing the (real, imaginary) part of matrix."""
# XXX: Ignoring deep and hints...
return self._eval_as_real_imag()
def conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
sympy.matrices.matrices.MatrixBase.D: Dirac conjugation
"""
return self._eval_conjugate()
def doit(self, **kwargs):
return self.applyfunc(lambda x: x.doit())
def evalf(self, n=15, subs=None, maxn=100, chop=False, strict=False, quad=None, verbose=False):
"""Apply evalf() to each element of self."""
options = {'subs':subs, 'maxn':maxn, 'chop':chop, 'strict':strict,
'quad':quad, 'verbose':verbose}
return self.applyfunc(lambda i: i.evalf(n, **options))
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""Apply core.function.expand to each entry of the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix(1, 1, [x*(x+1)])
Matrix([[x*(x + 1)]])
>>> _.expand()
Matrix([[x**2 + x]])
"""
return self.applyfunc(lambda x: x.expand(
deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
**hints))
@property
def H(self):
"""Return Hermite conjugate.
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m
Matrix([
[ 0],
[1 + I],
[ 2],
[ 3]])
>>> m.H
Matrix([[0, 1 - I, 2, 3]])
See Also
========
conjugate: By-element conjugation
sympy.matrices.matrices.MatrixBase.D: Dirac conjugation
"""
return self.T.C
def permute(self, perm, orientation='rows', direction='forward'):
r"""Permute the rows or columns of a matrix by the given list of
swaps.
Parameters
==========
perm : Permutation, list, or list of lists
A representation for the permutation.
If it is ``Permutation``, it is used directly with some
resizing with respect to the matrix size.
If it is specified as list of lists,
(e.g., ``[[0, 1], [0, 2]]``), then the permutation is formed
from applying the product of cycles. The direction how the
cyclic product is applied is described in below.
If it is specified as a list, the list should represent
an array form of a permutation. (e.g., ``[1, 2, 0]``) which
would would form the swapping function
`0 \mapsto 1, 1 \mapsto 2, 2\mapsto 0`.
orientation : 'rows', 'cols'
A flag to control whether to permute the rows or the columns
direction : 'forward', 'backward'
A flag to control whether to apply the permutations from
the start of the list first, or from the back of the list
first.
For example, if the permutation specification is
``[[0, 1], [0, 2]]``,
If the flag is set to ``'forward'``, the cycle would be
formed as `0 \mapsto 2, 2 \mapsto 1, 1 \mapsto 0`.
If the flag is set to ``'backward'``, the cycle would be
formed as `0 \mapsto 1, 1 \mapsto 2, 2 \mapsto 0`.
If the argument ``perm`` is not in a form of list of lists,
this flag takes no effect.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='forward')
Matrix([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='backward')
Matrix([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
Notes
=====
If a bijective function
`\sigma : \mathbb{N}_0 \rightarrow \mathbb{N}_0` denotes the
permutation.
If the matrix `A` is the matrix to permute, represented as
a horizontal or a vertical stack of vectors:
.. math::
A =
\begin{bmatrix}
a_0 \\ a_1 \\ \vdots \\ a_{n-1}
\end{bmatrix} =
\begin{bmatrix}
\alpha_0 & \alpha_1 & \cdots & \alpha_{n-1}
\end{bmatrix}
If the matrix `B` is the result, the permutation of matrix rows
is defined as:
.. math::
B := \begin{bmatrix}
a_{\sigma(0)} \\ a_{\sigma(1)} \\ \vdots \\ a_{\sigma(n-1)}
\end{bmatrix}
And the permutation of matrix columns is defined as:
.. math::
B := \begin{bmatrix}
\alpha_{\sigma(0)} & \alpha_{\sigma(1)} &
\cdots & \alpha_{\sigma(n-1)}
\end{bmatrix}
"""
from sympy.combinatorics import Permutation
# allow british variants and `columns`
if direction == 'forwards':
direction = 'forward'
if direction == 'backwards':
direction = 'backward'
if orientation == 'columns':
orientation = 'cols'
if direction not in ('forward', 'backward'):
raise TypeError("direction='{}' is an invalid kwarg. "
"Try 'forward' or 'backward'".format(direction))
if orientation not in ('rows', 'cols'):
raise TypeError("orientation='{}' is an invalid kwarg. "
"Try 'rows' or 'cols'".format(orientation))
if not isinstance(perm, (Permutation, Iterable)):
raise ValueError(
"{} must be a list, a list of lists, "
"or a SymPy permutation object.".format(perm))
# ensure all swaps are in range
max_index = self.rows if orientation == 'rows' else self.cols
if not all(0 <= t <= max_index for t in flatten(list(perm))):
raise IndexError("`swap` indices out of range.")
if perm and not isinstance(perm, Permutation) and \
isinstance(perm[0], Iterable):
if direction == 'forward':
perm = list(reversed(perm))
perm = Permutation(perm, size=max_index+1)
else:
perm = Permutation(perm, size=max_index+1)
if orientation == 'rows':
return self._eval_permute_rows(perm)
if orientation == 'cols':
return self._eval_permute_cols(perm)
def permute_cols(self, swaps, direction='forward'):
"""Alias for
``self.permute(swaps, orientation='cols', direction=direction)``
See Also
========
permute
"""
return self.permute(swaps, orientation='cols', direction=direction)
def permute_rows(self, swaps, direction='forward'):
"""Alias for
``self.permute(swaps, orientation='rows', direction=direction)``
See Also
========
permute
"""
return self.permute(swaps, orientation='rows', direction=direction)
def refine(self, assumptions=True):
"""Apply refine to each element of the matrix.
Examples
========
>>> from sympy import Symbol, Matrix, Abs, sqrt, Q
>>> x = Symbol('x')
>>> Matrix([[Abs(x)**2, sqrt(x**2)],[sqrt(x**2), Abs(x)**2]])
Matrix([
[ Abs(x)**2, sqrt(x**2)],
[sqrt(x**2), Abs(x)**2]])
>>> _.refine(Q.real(x))
Matrix([
[ x**2, Abs(x)],
[Abs(x), x**2]])
"""
return self.applyfunc(lambda x: refine(x, assumptions))
def replace(self, F, G, map=False, simultaneous=True, exact=None):
"""Replaces Function F in Matrix entries with Function G.
Examples
========
>>> from sympy import symbols, Function, Matrix
>>> F, G = symbols('F, G', cls=Function)
>>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M
Matrix([
[F(0), F(1)],
[F(1), F(2)]])
>>> N = M.replace(F,G)
>>> N
Matrix([
[G(0), G(1)],
[G(1), G(2)]])
"""
return self.applyfunc(
lambda x: x.replace(F, G, map=map, simultaneous=simultaneous, exact=exact))
def rot90(self, k=1):
"""Rotates Matrix by 90 degrees
Parameters
==========
k : int
Specifies how many times the matrix is rotated by 90 degrees
(clockwise when positive, counter-clockwise when negative).
Examples
========
>>> from sympy import Matrix, symbols
>>> A = Matrix(2, 2, symbols('a:d'))
>>> A
Matrix([
[a, b],
[c, d]])
Rotating the matrix clockwise one time:
>>> A.rot90(1)
Matrix([
[c, a],
[d, b]])
Rotating the matrix anticlockwise two times:
>>> A.rot90(-2)
Matrix([
[d, c],
[b, a]])
"""
mod = k%4
if mod == 0:
return self
if mod == 1:
return self[::-1, ::].T
if mod == 2:
return self[::-1, ::-1]
if mod == 3:
return self[::, ::-1].T
def simplify(self, **kwargs):
"""Apply simplify to each element of the matrix.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, cos
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
Matrix([[x*sin(y)**2 + x*cos(y)**2]])
>>> _.simplify()
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.simplify(**kwargs))
def subs(self, *args, **kwargs): # should mirror core.basic.subs
"""Return a new matrix with subs applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.subs(x, y)
Matrix([[y]])
>>> Matrix(_).subs(y, x)
Matrix([[x]])
"""
if len(args) == 1 and not isinstance(args[0], (dict, set)) and iter(args[0]) and not is_sequence(args[0]):
args = (list(args[0]),)
return self.applyfunc(lambda x: x.subs(*args, **kwargs))
def trace(self):
"""
Returns the trace of a square matrix i.e. the sum of the
diagonal elements.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.trace()
5
"""
if self.rows != self.cols:
raise NonSquareMatrixError()
return self._eval_trace()
def transpose(self):
"""
Returns the transpose of the matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.transpose()
Matrix([
[1, 3],
[2, 4]])
>>> from sympy import Matrix, I
>>> m=Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m.transpose()
Matrix([
[ 1, 3],
[2 + I, 4]])
>>> m.T == m.transpose()
True
See Also
========
conjugate: By-element conjugation
"""
return self._eval_transpose()
@property
def T(self):
'''Matrix transposition'''
return self.transpose()
@property
def C(self):
'''By-element conjugation'''
return self.conjugate()
def n(self, *args, **kwargs):
"""Apply evalf() to each element of self."""
return self.evalf(*args, **kwargs)
def xreplace(self, rule): # should mirror core.basic.xreplace
"""Return a new matrix with xreplace applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.xreplace({x: y})
Matrix([[y]])
>>> Matrix(_).xreplace({y: x})
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.xreplace(rule))
def _eval_simplify(self, **kwargs):
# XXX: We can't use self.simplify here as mutable subclasses will
# override simplify and have it return None
return MatrixOperations.simplify(self, **kwargs)
def _eval_trigsimp(self, **opts):
from sympy.simplify import trigsimp
return self.applyfunc(lambda x: trigsimp(x, **opts))
def upper_triangular(self, k=0):
"""returns the elements on and above the kth diagonal of a matrix.
If k is not specified then simply returns upper-triangular portion
of a matrix
Examples
========
>>> from sympy import ones
>>> A = ones(4)
>>> A.upper_triangular()
Matrix([
[1, 1, 1, 1],
[0, 1, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 1]])
>>> A.upper_triangular(2)
Matrix([
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]])
>>> A.upper_triangular(-1)
Matrix([
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 1, 1, 1],
[0, 0, 1, 1]])
"""
def entry(i, j):
return self[i, j] if i + k <= j else self.zero
return self._new(self.rows, self.cols, entry)
def lower_triangular(self, k=0):
"""returns the elements on and below the kth diagonal of a matrix.
If k is not specified then simply returns lower-triangular portion
of a matrix
Examples
========
>>> from sympy import ones
>>> A = ones(4)
>>> A.lower_triangular()
Matrix([
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]])
>>> A.lower_triangular(-2)
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0]])
>>> A.lower_triangular(1)
Matrix([
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]])
"""
def entry(i, j):
return self[i, j] if i + k >= j else self.zero
return self._new(self.rows, self.cols, entry)
class MatrixArithmetic(MatrixRequired):
"""Provides basic matrix arithmetic operations.
Should not be instantiated directly."""
_op_priority = 10.01
def _eval_Abs(self):
return self._new(self.rows, self.cols, lambda i, j: Abs(self[i, j]))
def _eval_add(self, other):
return self._new(self.rows, self.cols,
lambda i, j: self[i, j] + other[i, j])
def _eval_matrix_mul(self, other):
def entry(i, j):
vec = [self[i,k]*other[k,j] for k in range(self.cols)]
try:
return Add(*vec)
except (TypeError, SympifyError):
# Some matrices don't work with `sum` or `Add`
# They don't work with `sum` because `sum` tries to add `0`
# Fall back to a safe way to multiply if the `Add` fails.
return reduce(lambda a, b: a + b, vec)
return self._new(self.rows, other.cols, entry)
def _eval_matrix_mul_elementwise(self, other):
return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other[i,j])
def _eval_matrix_rmul(self, other):
def entry(i, j):
return sum(other[i,k]*self[k,j] for k in range(other.cols))
return self._new(other.rows, self.cols, entry)
def _eval_pow_by_recursion(self, num):
if num == 1:
return self
if num % 2 == 1:
a, b = self, self._eval_pow_by_recursion(num - 1)
else:
a = b = self._eval_pow_by_recursion(num // 2)
return a.multiply(b)
def _eval_pow_by_cayley(self, exp):
from sympy.discrete.recurrences import linrec_coeffs
row = self.shape[0]
p = self.charpoly()
coeffs = (-p).all_coeffs()[1:]
coeffs = linrec_coeffs(coeffs, exp)
new_mat = self.eye(row)
ans = self.zeros(row)
for i in range(row):
ans += coeffs[i]*new_mat
new_mat *= self
return ans
def _eval_pow_by_recursion_dotprodsimp(self, num, prevsimp=None):
if prevsimp is None:
prevsimp = [True]*len(self)
if num == 1:
return self
if num % 2 == 1:
a, b = self, self._eval_pow_by_recursion_dotprodsimp(num - 1,
prevsimp=prevsimp)
else:
a = b = self._eval_pow_by_recursion_dotprodsimp(num // 2,
prevsimp=prevsimp)
m = a.multiply(b, dotprodsimp=False)
lenm = len(m)
elems = [None]*lenm
for i in range(lenm):
if prevsimp[i]:
elems[i], prevsimp[i] = _dotprodsimp(m[i], withsimp=True)
else:
elems[i] = m[i]
return m._new(m.rows, m.cols, elems)
def _eval_scalar_mul(self, other):
return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other)
def _eval_scalar_rmul(self, other):
return self._new(self.rows, self.cols, lambda i, j: other*self[i,j])
def _eval_Mod(self, other):
return self._new(self.rows, self.cols, lambda i, j: Mod(self[i, j], other))
# Python arithmetic functions
def __abs__(self):
"""Returns a new matrix with entry-wise absolute values."""
return self._eval_Abs()
@call_highest_priority('__radd__')
def __add__(self, other):
"""Return self + other, raising ShapeError if shapes do not match."""
if isinstance(other, NDimArray): # Matrix and array addition is currently not implemented
return NotImplemented
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check.
if hasattr(other, 'shape'):
if self.shape != other.shape:
raise ShapeError("Matrix size mismatch: %s + %s" % (
self.shape, other.shape))
# honest SymPy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
# call the highest-priority class's _eval_add
a, b = self, other
if a.__class__ != classof(a, b):
b, a = a, b
return a._eval_add(b)
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_add(self, other)
raise TypeError('cannot add %s and %s' % (type(self), type(other)))
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self * (self.one / other)
@call_highest_priority('__rmatmul__')
def __matmul__(self, other):
other = _matrixify(other)
if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):
return NotImplemented
return self.__mul__(other)
def __mod__(self, other):
return self.applyfunc(lambda x: x % other)
@call_highest_priority('__rmul__')
def __mul__(self, other):
"""Return self*other where other is either a scalar or a matrix
of compatible dimensions.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
True
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
Matrix([
[30, 36, 42],
[66, 81, 96]])
>>> B*A
Traceback (most recent call last):
...
ShapeError: Matrices size mismatch.
>>>
See Also
========
matrix_multiply_elementwise
"""
return self.multiply(other)
def multiply(self, other, dotprodsimp=None):
"""Same as __mul__() but with optional simplification.
Parameters
==========
dotprodsimp : bool, optional
Specifies whether intermediate term algebraic simplification is used
during matrix multiplications to control expression blowup and thus
speed up calculation. Default is off.
"""
isimpbool = _get_intermediate_simp_bool(False, dotprodsimp)
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check. Double check other is not explicitly not a Matrix.
if (hasattr(other, 'shape') and len(other.shape) == 2 and
(getattr(other, 'is_Matrix', True) or
getattr(other, 'is_MatrixLike', True))):
if self.shape[1] != other.shape[0]:
raise ShapeError("Matrix size mismatch: %s * %s." % (
self.shape, other.shape))
# honest SymPy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
m = self._eval_matrix_mul(other)
if isimpbool:
return m._new(m.rows, m.cols, [_dotprodsimp(e) for e in m])
return m
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_matrix_mul(self, other)
# if 'other' is not iterable then scalar multiplication.
if not isinstance(other, Iterable):
try:
return self._eval_scalar_mul(other)
except TypeError:
pass
return NotImplemented
def multiply_elementwise(self, other):
"""Return the Hadamard product (elementwise product) of A and B
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> A.multiply_elementwise(B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
sympy.matrices.matrices.MatrixBase.cross
sympy.matrices.matrices.MatrixBase.dot
multiply
"""
if self.shape != other.shape:
raise ShapeError("Matrix shapes must agree {} != {}".format(self.shape, other.shape))
return self._eval_matrix_mul_elementwise(other)
def __neg__(self):
return self._eval_scalar_mul(-1)
@call_highest_priority('__rpow__')
def __pow__(self, exp):
"""Return self**exp a scalar or symbol."""
return self.pow(exp)
def pow(self, exp, method=None):
r"""Return self**exp a scalar or symbol.
Parameters
==========
method : multiply, mulsimp, jordan, cayley
If multiply then it returns exponentiation using recursion.
If jordan then Jordan form exponentiation will be used.
If cayley then the exponentiation is done using Cayley-Hamilton
theorem.
If mulsimp then the exponentiation is done using recursion
with dotprodsimp. This specifies whether intermediate term
algebraic simplification is used during naive matrix power to
control expression blowup and thus speed up calculation.
If None, then it heuristically decides which method to use.
"""
if method is not None and method not in ['multiply', 'mulsimp', 'jordan', 'cayley']:
raise TypeError('No such method')
if self.rows != self.cols:
raise NonSquareMatrixError()
a = self
jordan_pow = getattr(a, '_matrix_pow_by_jordan_blocks', None)
exp = sympify(exp)
if exp.is_zero:
return a._new(a.rows, a.cols, lambda i, j: int(i == j))
if exp == 1:
return a
diagonal = getattr(a, 'is_diagonal', None)
if diagonal is not None and diagonal():
return a._new(a.rows, a.cols, lambda i, j: a[i,j]**exp if i == j else 0)
if exp.is_Number and exp % 1 == 0:
if a.rows == 1:
return a._new([[a[0]**exp]])
if exp < 0:
exp = -exp
a = a.inv()
# When certain conditions are met,
# Jordan block algorithm is faster than
# computation by recursion.
if method == 'jordan':
try:
return jordan_pow(exp)
except MatrixError:
if method == 'jordan':
raise
elif method == 'cayley':
if not exp.is_Number or exp % 1 != 0:
raise ValueError("cayley method is only valid for integer powers")
return a._eval_pow_by_cayley(exp)
elif method == "mulsimp":
if not exp.is_Number or exp % 1 != 0:
raise ValueError("mulsimp method is only valid for integer powers")
return a._eval_pow_by_recursion_dotprodsimp(exp)
elif method == "multiply":
if not exp.is_Number or exp % 1 != 0:
raise ValueError("multiply method is only valid for integer powers")
return a._eval_pow_by_recursion(exp)
elif method is None and exp.is_Number and exp % 1 == 0:
# Decide heuristically which method to apply
if a.rows == 2 and exp > 100000:
return jordan_pow(exp)
elif _get_intermediate_simp_bool(True, None):
return a._eval_pow_by_recursion_dotprodsimp(exp)
elif exp > 10000:
return a._eval_pow_by_cayley(exp)
else:
return a._eval_pow_by_recursion(exp)
if jordan_pow:
try:
return jordan_pow(exp)
except NonInvertibleMatrixError:
# Raised by jordan_pow on zero determinant matrix unless exp is
# definitely known to be a non-negative integer.
# Here we raise if n is definitely not a non-negative integer
# but otherwise we can leave this as an unevaluated MatPow.
if exp.is_integer is False or exp.is_nonnegative is False:
raise
from sympy.matrices.expressions import MatPow
return MatPow(a, exp)
@call_highest_priority('__add__')
def __radd__(self, other):
return self + other
@call_highest_priority('__matmul__')
def __rmatmul__(self, other):
other = _matrixify(other)
if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):
return NotImplemented
return self.__rmul__(other)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return self.rmultiply(other)
def rmultiply(self, other, dotprodsimp=None):
"""Same as __rmul__() but with optional simplification.
Parameters
==========
dotprodsimp : bool, optional
Specifies whether intermediate term algebraic simplification is used
during matrix multiplications to control expression blowup and thus
speed up calculation. Default is off.
"""
isimpbool = _get_intermediate_simp_bool(False, dotprodsimp)
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check. Double check other is not explicitly not a Matrix.
if (hasattr(other, 'shape') and len(other.shape) == 2 and
(getattr(other, 'is_Matrix', True) or
getattr(other, 'is_MatrixLike', True))):
if self.shape[0] != other.shape[1]:
raise ShapeError("Matrix size mismatch.")
# honest SymPy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
m = self._eval_matrix_rmul(other)
if isimpbool:
return m._new(m.rows, m.cols, [_dotprodsimp(e) for e in m])
return m
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_matrix_rmul(self, other)
# if 'other' is not iterable then scalar multiplication.
if not isinstance(other, Iterable):
try:
return self._eval_scalar_rmul(other)
except TypeError:
pass
return NotImplemented
@call_highest_priority('__sub__')
def __rsub__(self, a):
return (-self) + a
@call_highest_priority('__rsub__')
def __sub__(self, a):
return self + (-a)
class MatrixCommon(MatrixArithmetic, MatrixOperations, MatrixProperties,
MatrixSpecial, MatrixShaping):
"""All common matrix operations including basic arithmetic, shaping,
and special matrices like `zeros`, and `eye`."""
_diff_wrt = True # type: bool
class _MinimalMatrix:
"""Class providing the minimum functionality
for a matrix-like object and implementing every method
required for a `MatrixRequired`. This class does not have everything
needed to become a full-fledged SymPy object, but it will satisfy the
requirements of anything inheriting from `MatrixRequired`. If you wish
to make a specialized matrix type, make sure to implement these
methods and properties with the exception of `__init__` and `__repr__`
which are included for convenience."""
is_MatrixLike = True
_sympify = staticmethod(sympify)
_class_priority = 3
zero = S.Zero
one = S.One
is_Matrix = True
is_MatrixExpr = False
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __init__(self, rows, cols=None, mat=None, copy=False):
if isfunction(mat):
# if we passed in a function, use that to populate the indices
mat = list(mat(i, j) for i in range(rows) for j in range(cols))
if cols is None and mat is None:
mat = rows
rows, cols = getattr(mat, 'shape', (rows, cols))
try:
# if we passed in a list of lists, flatten it and set the size
if cols is None and mat is None:
mat = rows
cols = len(mat[0])
rows = len(mat)
mat = [x for l in mat for x in l]
except (IndexError, TypeError):
pass
self.mat = tuple(self._sympify(x) for x in mat)
self.rows, self.cols = rows, cols
if self.rows is None or self.cols is None:
raise NotImplementedError("Cannot initialize matrix with given parameters")
def __getitem__(self, key):
def _normalize_slices(row_slice, col_slice):
"""Ensure that row_slice and col_slice do not have
`None` in their arguments. Any integers are converted
to slices of length 1"""
if not isinstance(row_slice, slice):
row_slice = slice(row_slice, row_slice + 1, None)
row_slice = slice(*row_slice.indices(self.rows))
if not isinstance(col_slice, slice):
col_slice = slice(col_slice, col_slice + 1, None)
col_slice = slice(*col_slice.indices(self.cols))
return (row_slice, col_slice)
def _coord_to_index(i, j):
"""Return the index in _mat corresponding
to the (i,j) position in the matrix. """
return i * self.cols + j
if isinstance(key, tuple):
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
# if the coordinates are not slices, make them so
# and expand the slices so they don't contain `None`
i, j = _normalize_slices(i, j)
rowsList, colsList = list(range(self.rows))[i], \
list(range(self.cols))[j]
indices = (i * self.cols + j for i in rowsList for j in
colsList)
return self._new(len(rowsList), len(colsList),
list(self.mat[i] for i in indices))
# if the key is a tuple of ints, change
# it to an array index
key = _coord_to_index(i, j)
return self.mat[key]
def __eq__(self, other):
try:
classof(self, other)
except TypeError:
return False
return (
self.shape == other.shape and list(self) == list(other))
def __len__(self):
return self.rows*self.cols
def __repr__(self):
return "_MinimalMatrix({}, {}, {})".format(self.rows, self.cols,
self.mat)
@property
def shape(self):
return (self.rows, self.cols)
class _CastableMatrix: # this is needed here ONLY FOR TESTS.
def as_mutable(self):
return self
def as_immutable(self):
return self
class _MatrixWrapper:
"""Wrapper class providing the minimum functionality for a matrix-like
object: .rows, .cols, .shape, indexability, and iterability. CommonMatrix
math operations should work on matrix-like objects. This one is intended for
matrix-like objects which use the same indexing format as SymPy with respect
to returning matrix elements instead of rows for non-tuple indexes.
"""
is_Matrix = False # needs to be here because of __getattr__
is_MatrixLike = True
def __init__(self, mat, shape):
self.mat = mat
self.shape = shape
self.rows, self.cols = shape
def __getitem__(self, key):
if isinstance(key, tuple):
return sympify(self.mat.__getitem__(key))
return sympify(self.mat.__getitem__((key // self.rows, key % self.cols)))
def __iter__(self): # supports numpy.matrix and numpy.array
mat = self.mat
cols = self.cols
return iter(sympify(mat[r, c]) for r in range(self.rows) for c in range(cols))
class MatrixKind(Kind):
"""
Kind for all matrices in SymPy.
Basic class for this kind is ``MatrixBase`` and ``MatrixExpr``,
but any expression representing the matrix can have this.
Parameters
==========
element_kind : Kind
Kind of the element. Default is :obj:NumberKind `<sympy.core.kind.NumberKind>`,
which means that the matrix contains only numbers.
Examples
========
Any instance of matrix class has ``MatrixKind``.
>>> from sympy import MatrixSymbol
>>> A = MatrixSymbol('A', 2,2)
>>> A.kind
MatrixKind(NumberKind)
Although expression representing a matrix may be not instance of
matrix class, it will have ``MatrixKind`` as well.
>>> from sympy import Integral
>>> from sympy.matrices.expressions import MatrixExpr
>>> from sympy.abc import x
>>> intM = Integral(A, x)
>>> isinstance(intM, MatrixExpr)
False
>>> intM.kind
MatrixKind(NumberKind)
Use ``isinstance()`` to check for ``MatrixKind` without specifying
the element kind. Use ``is`` with specifying the element kind.
>>> from sympy import Matrix
>>> from sympy.core import NumberKind
>>> from sympy.matrices import MatrixKind
>>> M = Matrix([1, 2])
>>> isinstance(M.kind, MatrixKind)
True
>>> M.kind is MatrixKind(NumberKind)
True
See Also
========
shape : Function to return the shape of objects with ``MatrixKind``.
"""
def __new__(cls, element_kind=NumberKind):
obj = super().__new__(cls, element_kind)
obj.element_kind = element_kind
return obj
def __repr__(self):
return "MatrixKind(%s)" % self.element_kind
def _matrixify(mat):
"""If `mat` is a Matrix or is matrix-like,
return a Matrix or MatrixWrapper object. Otherwise
`mat` is passed through without modification."""
if getattr(mat, 'is_Matrix', False) or getattr(mat, 'is_MatrixLike', False):
return mat
if not(getattr(mat, 'is_Matrix', True) or getattr(mat, 'is_MatrixLike', True)):
return mat
shape = None
if hasattr(mat, 'shape'): # numpy, scipy.sparse
if len(mat.shape) == 2:
shape = mat.shape
elif hasattr(mat, 'rows') and hasattr(mat, 'cols'): # mpmath
shape = (mat.rows, mat.cols)
if shape:
return _MatrixWrapper(mat, shape)
return mat
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if not isinstance(j, int):
jindex = getattr(j, '__index__', None)
if jindex is not None:
j = jindex()
else:
raise IndexError("Invalid index a[%r]" % (j,))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j,))
return int(j)
def classof(A, B):
"""
Get the type of the result when combining matrices of different types.
Currently the strategy is that immutability is contagious.
Examples
========
>>> from sympy import Matrix, ImmutableMatrix
>>> from sympy.matrices.common import classof
>>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix
>>> IM = ImmutableMatrix([[1, 2], [3, 4]])
>>> classof(M, IM)
<class 'sympy.matrices.immutable.ImmutableDenseMatrix'>
"""
priority_A = getattr(A, '_class_priority', None)
priority_B = getattr(B, '_class_priority', None)
if None not in (priority_A, priority_B):
if A._class_priority > B._class_priority:
return A.__class__
else:
return B.__class__
try:
import numpy
except ImportError:
pass
else:
if isinstance(A, numpy.ndarray):
return B.__class__
if isinstance(B, numpy.ndarray):
return A.__class__
raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
|
638bf9953735cd2634106259776e2347cf6738f329bf3b50cfb462f2dd640361 | import random
from sympy.core.basic import Basic
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.simplify.simplify import simplify as _simplify
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import is_sequence
from .common import ShapeError
from .decompositions import _cholesky, _LDLdecomposition
from .matrices import MatrixBase
from .repmatrix import MutableRepMatrix, RepMatrix
from .solvers import _lower_triangular_solve, _upper_triangular_solve
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class DenseMatrix(RepMatrix):
"""Matrix implementation based on DomainMatrix as the internal representation"""
#
# DenseMatrix is a superclass for both MutableDenseMatrix and
# ImmutableDenseMatrix. Methods shared by both classes but not for the
# Sparse classes should be implemented here.
#
is_MatrixExpr = False # type: bool
_op_priority = 10.01
_class_priority = 4
@property
def _mat(self):
SymPyDeprecationWarning(
feature="The private _mat attribute of Matrix",
useinstead="the .flat() method",
issue=21715,
deprecated_since_version="1.9").warn()
return self.flat()
def _eval_inverse(self, **kwargs):
return self.inv(method=kwargs.get('method', 'GE'),
iszerofunc=kwargs.get('iszerofunc', _iszero),
try_block_diag=kwargs.get('try_block_diag', False))
def as_immutable(self):
"""Returns an Immutable version of this Matrix
"""
from .immutable import ImmutableDenseMatrix as cls
return cls._fromrep(self._rep.copy())
def as_mutable(self):
"""Returns a mutable version of this matrix
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return Matrix(self)
def cholesky(self, hermitian=True):
return _cholesky(self, hermitian=hermitian)
def LDLdecomposition(self, hermitian=True):
return _LDLdecomposition(self, hermitian=hermitian)
def lower_triangular_solve(self, rhs):
return _lower_triangular_solve(self, rhs)
def upper_triangular_solve(self, rhs):
return _upper_triangular_solve(self, rhs)
cholesky.__doc__ = _cholesky.__doc__
LDLdecomposition.__doc__ = _LDLdecomposition.__doc__
lower_triangular_solve.__doc__ = _lower_triangular_solve.__doc__
upper_triangular_solve.__doc__ = _upper_triangular_solve.__doc__
def _force_mutable(x):
"""Return a matrix as a Matrix, otherwise return x."""
if getattr(x, 'is_Matrix', False):
return x.as_mutable()
elif isinstance(x, Basic):
return x
elif hasattr(x, '__array__'):
a = x.__array__()
if len(a.shape) == 0:
return sympify(a)
return Matrix(x)
return x
class MutableDenseMatrix(DenseMatrix, MutableRepMatrix):
def simplify(self, **kwargs):
"""Applies simplify to the elements of a matrix in place.
This is a shortcut for M.applyfunc(lambda x: simplify(x, ratio, measure))
See Also
========
sympy.simplify.simplify.simplify
"""
for (i, j), element in self.todok().items():
self[i, j] = _simplify(element, **kwargs)
MutableMatrix = Matrix = MutableDenseMatrix
###########
# Numpy Utility Functions:
# list2numpy, matrix2numpy, symmarray, rot_axis[123]
###########
def list2numpy(l, dtype=object): # pragma: no cover
"""Converts Python list of SymPy expressions to a NumPy array.
See Also
========
matrix2numpy
"""
from numpy import empty
a = empty(len(l), dtype)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m, dtype=object): # pragma: no cover
"""Converts SymPy's matrix to a NumPy array.
See Also
========
list2numpy
"""
from numpy import empty
a = empty(m.shape, dtype)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
def rot_axis3(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 3-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis3
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis3(theta)
Matrix([
[ 1/2, sqrt(3)/2, 0],
[-sqrt(3)/2, 1/2, 0],
[ 0, 0, 1]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis3(pi/2)
Matrix([
[ 0, 1, 0],
[-1, 0, 0],
[ 0, 0, 1]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, st, 0),
(-st, ct, 0),
(0, 0, 1))
return Matrix(lil)
def rot_axis2(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 2-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis2
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis2(theta)
Matrix([
[ 1/2, 0, -sqrt(3)/2],
[ 0, 1, 0],
[sqrt(3)/2, 0, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis2(pi/2)
Matrix([
[0, 0, -1],
[0, 1, 0],
[1, 0, 0]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, 0, -st),
(0, 1, 0),
(st, 0, ct))
return Matrix(lil)
def rot_axis1(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 1-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis1
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis1(theta)
Matrix([
[1, 0, 0],
[0, 1/2, sqrt(3)/2],
[0, -sqrt(3)/2, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis1(pi/2)
Matrix([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0]])
See Also
========
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((1, 0, 0),
(0, ct, st),
(0, -st, ct))
return Matrix(lil)
@doctest_depends_on(modules=('numpy',))
def symarray(prefix, shape, **kwargs): # pragma: no cover
r"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named ``prefix_i1_i2_``... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as SymPy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
\*\*kwargs : dict
keyword arguments passed on to Symbol
Examples
========
These doctests require numpy.
>>> from sympy import symarray
>>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>>> a = symarray('', 3)
>>> b = symarray('', 3)
>>> a[0] == b[0]
True
>>> a = symarray('a', 3)
>>> b = symarray('b', 3)
>>> a[0] == b[0]
False
Creating symarrays with a prefix:
>>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>>> symarray('a', (2, 3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>>> symarray('a', (2, 3, 2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
For setting assumptions of the underlying Symbols:
>>> [s.is_real for s in symarray('a', 2, real=True)]
[True, True]
"""
from numpy import empty, ndindex
arr = empty(shape, dtype=object)
for index in ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))),
**kwargs)
return arr
###############
# Functions
###############
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant::
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis:
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
seqs = list(map(sympify, seqs))
if not zero:
f = lambda i, j: seqs[j].subs(n, n + i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
def eye(*args, **kwargs):
"""Create square identity matrix n x n
See Also
========
diag
zeros
ones
"""
return Matrix.eye(*args, **kwargs)
def diag(*values, strict=True, unpack=False, **kwargs):
"""Returns a matrix with the provided values placed on the
diagonal. If non-square matrices are included, they will
produce a block-diagonal matrix.
Examples
========
This version of diag is a thin wrapper to Matrix.diag that differs
in that it treats all lists like matrices -- even when a single list
is given. If this is not desired, either put a `*` before the list or
set `unpack=True`.
>>> from sympy import diag
>>> diag([1, 2, 3], unpack=True) # = diag(1,2,3) or diag(*[1,2,3])
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> diag([1, 2, 3]) # a column vector
Matrix([
[1],
[2],
[3]])
See Also
========
.common.MatrixCommon.eye
.common.MatrixCommon.diagonal - to extract a diagonal
.common.MatrixCommon.diag
.expressions.blockmatrix.BlockMatrix
"""
return Matrix.diag(*values, strict=strict, unpack=unpack, **kwargs)
def GramSchmidt(vlist, orthonormal=False):
"""Apply the Gram-Schmidt process to a set of vectors.
Parameters
==========
vlist : List of Matrix
Vectors to be orthogonalized for.
orthonormal : Bool, optional
If true, return an orthonormal basis.
Returns
=======
vlist : List of Matrix
Orthogonalized vectors
Notes
=====
This routine is mostly duplicate from ``Matrix.orthogonalize``,
except for some difference that this always raises error when
linearly dependent vectors are found, and the keyword ``normalize``
has been named as ``orthonormal`` in this function.
See Also
========
.matrices.MatrixSubspaces.orthogonalize
References
==========
.. [1] https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
"""
return MutableDenseMatrix.orthogonalize(
*vlist, normalize=orthonormal, rankcheck=True
)
def hessian(f, varlist, constraints=()):
"""Compute Hessian matrix for a function f wrt parameters in varlist
which may be given as a sequence or a row/column vector. A list of
constraints may optionally be given.
Examples
========
>>> from sympy import Function, hessian, pprint
>>> from sympy.abc import x, y
>>> f = Function('f')(x, y)
>>> g1 = Function('g')(x, y)
>>> g2 = x**2 + 3*y
>>> pprint(hessian(f, (x, y), [g1, g2]))
[ d d ]
[ 0 0 --(g(x, y)) --(g(x, y)) ]
[ dx dy ]
[ ]
[ 0 0 2*x 3 ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 2*x ---(f(x, y)) -----(f(x, y))]
[dx 2 dy dx ]
[ dx ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 3 -----(f(x, y)) ---(f(x, y)) ]
[dy dy dx 2 ]
[ dy ]
References
==========
.. [1] https://en.wikipedia.org/wiki/Hessian_matrix
See Also
========
sympy.matrices.matrices.MatrixCalculus.jacobian
wronskian
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, MatrixBase):
if 1 not in varlist.shape:
raise ShapeError("`varlist` must be a column or row vector.")
if varlist.cols == 1:
varlist = varlist.T
varlist = varlist.tolist()[0]
if is_sequence(varlist):
n = len(varlist)
if not n:
raise ShapeError("`len(varlist)` must not be zero.")
else:
raise ValueError("Improper variable list in hessian function")
if not getattr(f, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
m = len(constraints)
N = m + n
out = zeros(N)
for k, g in enumerate(constraints):
if not getattr(g, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
for i in range(n):
out[k, i + m] = g.diff(varlist[i])
for i in range(n):
for j in range(i, n):
out[i + m, j + m] = f.diff(varlist[i]).diff(varlist[j])
for i in range(N):
for j in range(i + 1, N):
out[j, i] = out[i, j]
return out
def jordan_cell(eigenval, n):
"""
Create a Jordan block:
Examples
========
>>> from sympy.matrices import jordan_cell
>>> from sympy.abc import x
>>> jordan_cell(x, 4)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
"""
return Matrix.jordan_block(size=n, eigenvalue=eigenval)
def matrix_multiply_elementwise(A, B):
"""Return the Hadamard product (elementwise product) of A and B
>>> from sympy.matrices import matrix_multiply_elementwise
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> matrix_multiply_elementwise(A, B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
sympy.matrices.common.MatrixCommon.__mul__
"""
return A.multiply_elementwise(B)
def ones(*args, **kwargs):
"""Returns a matrix of ones with ``rows`` rows and ``cols`` columns;
if ``cols`` is omitted a square matrix will be returned.
See Also
========
zeros
eye
diag
"""
if 'c' in kwargs:
kwargs['cols'] = kwargs.pop('c')
return Matrix.ones(*args, **kwargs)
def randMatrix(r, c=None, min=0, max=99, seed=None, symmetric=False,
percent=100, prng=None):
"""Create random matrix with dimensions ``r`` x ``c``. If ``c`` is omitted
the matrix will be square. If ``symmetric`` is True the matrix must be
square. If ``percent`` is less than 100 then only approximately the given
percentage of elements will be non-zero.
The pseudo-random number generator used to generate matrix is chosen in the
following way.
* If ``prng`` is supplied, it will be used as random number generator.
It should be an instance of ``random.Random``, or at least have
``randint`` and ``shuffle`` methods with same signatures.
* if ``prng`` is not supplied but ``seed`` is supplied, then new
``random.Random`` with given ``seed`` will be created;
* otherwise, a new ``random.Random`` with default seed will be used.
Examples
========
>>> from sympy.matrices import randMatrix
>>> randMatrix(3) # doctest:+SKIP
[25, 45, 27]
[44, 54, 9]
[23, 96, 46]
>>> randMatrix(3, 2) # doctest:+SKIP
[87, 29]
[23, 37]
[90, 26]
>>> randMatrix(3, 3, 0, 2) # doctest:+SKIP
[0, 2, 0]
[2, 0, 1]
[0, 0, 1]
>>> randMatrix(3, symmetric=True) # doctest:+SKIP
[85, 26, 29]
[26, 71, 43]
[29, 43, 57]
>>> A = randMatrix(3, seed=1)
>>> B = randMatrix(3, seed=2)
>>> A == B
False
>>> A == randMatrix(3, seed=1)
True
>>> randMatrix(3, symmetric=True, percent=50) # doctest:+SKIP
[77, 70, 0],
[70, 0, 0],
[ 0, 0, 88]
"""
# Note that ``Random()`` is equivalent to ``Random(None)``
prng = prng or random.Random(seed)
if c is None:
c = r
if symmetric and r != c:
raise ValueError('For symmetric matrices, r must equal c, but %i != %i' % (r, c))
ij = range(r * c)
if percent != 100:
ij = prng.sample(ij, int(len(ij)*percent // 100))
m = zeros(r, c)
if not symmetric:
for ijk in ij:
i, j = divmod(ijk, c)
m[i, j] = prng.randint(min, max)
else:
for ijk in ij:
i, j = divmod(ijk, c)
if i <= j:
m[i, j] = m[j, i] = prng.randint(min, max)
return m
def wronskian(functions, var, method='bareiss'):
"""
Compute Wronskian for [] of functions
::
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1, ..., fn) = | . . . . |
| . . . . |
| (n) (n) (n) |
| D (f1) D (f2) ... D (fn) |
see: https://en.wikipedia.org/wiki/Wronskian
See Also
========
sympy.matrices.matrices.MatrixCalculus.jacobian
hessian
"""
for index in range(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return S.One
W = Matrix(n, n, lambda i, j: functions[i].diff(var, j))
return W.det(method)
def zeros(*args, **kwargs):
"""Returns a matrix of zeros with ``rows`` rows and ``cols`` columns;
if ``cols`` is omitted a square matrix will be returned.
See Also
========
ones
eye
diag
"""
if 'c' in kwargs:
kwargs['cols'] = kwargs.pop('c')
return Matrix.zeros(*args, **kwargs)
|
519c13dbd5f44434516d08992683996e854e451ef411169ee58061bb83c21abe | from sympy.core.function import expand_mul
from sympy.core.symbol import Dummy, uniquely_named_symbol, symbols
from sympy.utilities.iterables import numbered_symbols
from .common import ShapeError, NonSquareMatrixError, NonInvertibleMatrixError
from .eigen import _fuzzy_positive_definite
from .utilities import _get_intermediate_simp, _iszero
def _diagonal_solve(M, rhs):
"""Solves ``Ax = B`` efficiently, where A is a diagonal Matrix,
with non-zero diagonal entries.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.diagonal_solve(B) == B/2
True
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not M.is_diagonal():
raise TypeError("Matrix should be diagonal")
if rhs.rows != M.rows:
raise TypeError("Size mis-match")
return M._new(
rhs.rows, rhs.cols, lambda i, j: rhs[i, j] / M[i, i])
def _lower_triangular_solve(M, rhs):
"""Solves ``Ax = B``, where A is a lower triangular matrix.
See Also
========
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != M.rows:
raise ShapeError("Matrices size mismatch.")
if not M.is_lower:
raise ValueError("Matrix must be lower triangular.")
dps = _get_intermediate_simp()
X = MutableDenseMatrix.zeros(M.rows, rhs.cols)
for j in range(rhs.cols):
for i in range(M.rows):
if M[i, i] == 0:
raise TypeError("Matrix must be non-singular.")
X[i, j] = dps((rhs[i, j] - sum(M[i, k]*X[k, j]
for k in range(i))) / M[i, i])
return M._new(X)
def _lower_triangular_solve_sparse(M, rhs):
"""Solves ``Ax = B``, where A is a lower triangular matrix.
See Also
========
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != M.rows:
raise ShapeError("Matrices size mismatch.")
if not M.is_lower:
raise ValueError("Matrix must be lower triangular.")
dps = _get_intermediate_simp()
rows = [[] for i in range(M.rows)]
for i, j, v in M.row_list():
if i > j:
rows[i].append((j, v))
X = rhs.as_mutable()
for j in range(rhs.cols):
for i in range(rhs.rows):
for u, v in rows[i]:
X[i, j] -= v*X[u, j]
X[i, j] = dps(X[i, j] / M[i, i])
return M._new(X)
def _upper_triangular_solve(M, rhs):
"""Solves ``Ax = B``, where A is an upper triangular matrix.
See Also
========
lower_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != M.rows:
raise ShapeError("Matrix size mismatch.")
if not M.is_upper:
raise TypeError("Matrix is not upper triangular.")
dps = _get_intermediate_simp()
X = MutableDenseMatrix.zeros(M.rows, rhs.cols)
for j in range(rhs.cols):
for i in reversed(range(M.rows)):
if M[i, i] == 0:
raise ValueError("Matrix must be non-singular.")
X[i, j] = dps((rhs[i, j] - sum(M[i, k]*X[k, j]
for k in range(i + 1, M.rows))) / M[i, i])
return M._new(X)
def _upper_triangular_solve_sparse(M, rhs):
"""Solves ``Ax = B``, where A is an upper triangular matrix.
See Also
========
lower_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != M.rows:
raise ShapeError("Matrix size mismatch.")
if not M.is_upper:
raise TypeError("Matrix is not upper triangular.")
dps = _get_intermediate_simp()
rows = [[] for i in range(M.rows)]
for i, j, v in M.row_list():
if i < j:
rows[i].append((j, v))
X = rhs.as_mutable()
for j in range(rhs.cols):
for i in reversed(range(rhs.rows)):
for u, v in reversed(rows[i]):
X[i, j] -= v*X[u, j]
X[i, j] = dps(X[i, j] / M[i, i])
return M._new(X)
def _cholesky_solve(M, rhs):
"""Solves ``Ax = B`` using Cholesky decomposition,
for a general square non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if M.rows < M.cols:
raise NotImplementedError(
'Under-determined System. Try M.gauss_jordan_solve(rhs)')
hermitian = True
reform = False
if M.is_symmetric():
hermitian = False
elif not M.is_hermitian:
reform = True
if reform or _fuzzy_positive_definite(M) is False:
H = M.H
M = H.multiply(M)
rhs = H.multiply(rhs)
hermitian = not M.is_symmetric()
L = M.cholesky(hermitian=hermitian)
Y = L.lower_triangular_solve(rhs)
if hermitian:
return (L.H).upper_triangular_solve(Y)
else:
return (L.T).upper_triangular_solve(Y)
def _LDLsolve(M, rhs):
"""Solves ``Ax = B`` using LDL decomposition,
for a general square and non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.LDLsolve(B) == B/2
True
See Also
========
sympy.matrices.dense.DenseMatrix.LDLdecomposition
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LUsolve
QRsolve
pinv_solve
"""
if M.rows < M.cols:
raise NotImplementedError(
'Under-determined System. Try M.gauss_jordan_solve(rhs)')
hermitian = True
reform = False
if M.is_symmetric():
hermitian = False
elif not M.is_hermitian:
reform = True
if reform or _fuzzy_positive_definite(M) is False:
H = M.H
M = H.multiply(M)
rhs = H.multiply(rhs)
hermitian = not M.is_symmetric()
L, D = M.LDLdecomposition(hermitian=hermitian)
Y = L.lower_triangular_solve(rhs)
Z = D.diagonal_solve(Y)
if hermitian:
return (L.H).upper_triangular_solve(Z)
else:
return (L.T).upper_triangular_solve(Z)
def _LUsolve(M, rhs, iszerofunc=_iszero):
"""Solve the linear system ``Ax = rhs`` for ``x`` where ``A = M``.
This is for symbolic matrices, for real or complex ones use
mpmath.lu_solve or mpmath.qr_solve.
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
QRsolve
pinv_solve
LUdecomposition
"""
if rhs.rows != M.rows:
raise ShapeError(
"``M`` and ``rhs`` must have the same number of rows.")
m = M.rows
n = M.cols
if m < n:
raise NotImplementedError("Underdetermined systems not supported.")
try:
A, perm = M.LUdecomposition_Simple(
iszerofunc=_iszero, rankcheck=True)
except ValueError:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")
dps = _get_intermediate_simp()
b = rhs.permute_rows(perm).as_mutable()
# forward substitution, all diag entries are scaled to 1
for i in range(m):
for j in range(min(i, n)):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: dps(x - y * scale))
# consistency check for overdetermined systems
if m > n:
for i in range(n, m):
for j in range(b.cols):
if not iszerofunc(b[i, j]):
raise ValueError("The system is inconsistent.")
b = b[0:n, :] # truncate zero rows if consistent
# backward substitution
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: dps(x - y * scale))
scale = A[i, i]
b.row_op(i, lambda x, _: dps(x / scale))
return rhs.__class__(b)
def _QRsolve(M, b):
"""Solve the linear system ``Ax = b``.
``M`` is the matrix ``A``, the method argument is the vector
``b``. The method returns the solution vector ``x``. If ``b`` is a
matrix, the system is solved for each column of ``b`` and the
return value is a matrix of the same shape as ``b``.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you do not need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use mpmath.qr_solve.
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
pinv_solve
QRdecomposition
"""
dps = _get_intermediate_simp(expand_mul, expand_mul)
Q, R = M.QRdecomposition()
y = Q.T * b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n - 1, -1, -1):
tmp = y[j, :]
for k in range(j + 1, n):
tmp -= R[j, k] * x[n - 1 - k]
tmp = dps(tmp)
x.append(tmp / R[j, j])
return M.vstack(*x[::-1])
def _gauss_jordan_solve(M, B, freevar=False):
"""
Solves ``Ax = B`` using Gauss Jordan elimination.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, it will
be returned parametrically. If no solutions exist, It will throw
ValueError.
Parameters
==========
B : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
freevar : boolean, optional
Flag, when set to `True` will return the indices of the free
variables in the solutions (column Matrix), for a system that is
undetermined (e.g. A has more columns than rows), for which
infinite solutions are possible, in terms of arbitrary
values of free variables. Default `False`.
Returns
=======
x : Matrix
The matrix that will satisfy ``Ax = B``. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
params : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of arbitrary
parameters. These arbitrary parameters are returned as params
Matrix.
free_var_index : List, optional
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of arbitrary
values of free variables. Then the indices of the free variables
in the solutions (column Matrix) are returned by free_var_index,
if the flag `freevar` is set to `True`.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 1, 1], [1, 2, 2, -1], [2, 4, 0, 6]])
>>> B = Matrix([7, 12, 4])
>>> sol, params = A.gauss_jordan_solve(B)
>>> sol
Matrix([
[-2*tau0 - 3*tau1 + 2],
[ tau0],
[ 2*tau1 + 5],
[ tau1]])
>>> params
Matrix([
[tau0],
[tau1]])
>>> taus_zeroes = { tau:0 for tau in params }
>>> sol_unique = sol.xreplace(taus_zeroes)
>>> sol_unique
Matrix([
[2],
[0],
[5],
[0]])
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> B = Matrix([3, 6, 9])
>>> sol, params = A.gauss_jordan_solve(B)
>>> sol
Matrix([
[-1],
[ 2],
[ 0]])
>>> params
Matrix(0, 1, [])
>>> A = Matrix([[2, -7], [-1, 4]])
>>> B = Matrix([[-21, 3], [12, -2]])
>>> sol, params = A.gauss_jordan_solve(B)
>>> sol
Matrix([
[0, -2],
[3, -1]])
>>> params
Matrix(0, 2, [])
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 1, 1], [1, 2, 2, -1], [2, 4, 0, 6]])
>>> B = Matrix([7, 12, 4])
>>> sol, params, freevars = A.gauss_jordan_solve(B, freevar=True)
>>> sol
Matrix([
[-2*tau0 - 3*tau1 + 2],
[ tau0],
[ 2*tau1 + 5],
[ tau1]])
>>> params
Matrix([
[tau0],
[tau1]])
>>> freevars
[1, 3]
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
References
==========
.. [1] https://en.wikipedia.org/wiki/Gaussian_elimination
"""
from sympy.matrices import Matrix, zeros
cls = M.__class__
aug = M.hstack(M.copy(), B.copy())
B_cols = B.cols
row, col = aug[:, :-B_cols].shape
# solve by reduced row echelon form
A, pivots = aug.rref(simplify=True)
A, v = A[:, :-B_cols], A[:, -B_cols:]
pivots = list(filter(lambda p: p < col, pivots))
rank = len(pivots)
# Get index of free symbols (free parameters)
# non-pivots columns are free variables
free_var_index = [c for c in range(A.cols) if c not in pivots]
# Bring to block form
permutation = Matrix(pivots + free_var_index).T
# check for existence of solutions
# rank of aug Matrix should be equal to rank of coefficient matrix
if not v[rank:, :].is_zero_matrix:
raise ValueError("Linear system has no solution")
# Free parameters
# what are current unnumbered free symbol names?
name = uniquely_named_symbol('tau', aug,
compare=lambda i: str(i).rstrip('1234567890'),
modify=lambda s: '_' + s).name
gen = numbered_symbols(name)
tau = Matrix([next(gen) for k in range((col - rank)*B_cols)]).reshape(
col - rank, B_cols)
# Full parametric solution
V = A[:rank, free_var_index]
vt = v[:rank, :]
free_sol = tau.vstack(vt - V * tau, tau)
# Undo permutation
sol = zeros(col, B_cols)
for k in range(col):
sol[permutation[k], :] = free_sol[k,:]
sol, tau = cls(sol), cls(tau)
if freevar:
return sol, tau, free_var_index
else:
return sol, tau
def _pinv_solve(M, B, arbitrary_matrix=None):
"""Solve ``Ax = B`` using the Moore-Penrose pseudoinverse.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, one will
be returned based on the value of arbitrary_matrix. If no solutions
exist, the least-squares solution is returned.
Parameters
==========
B : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
arbitrary_matrix : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
matrix. This parameter may be set to a specific matrix to use
for that purpose; if so, it must be the same shape as x, with as
many rows as matrix A has columns, and as many columns as matrix
B. If left as None, an appropriate matrix containing dummy
symbols in the form of ``wn_m`` will be used, with n and m being
row and column position of each symbol.
Returns
=======
x : Matrix
The matrix that will satisfy ``Ax = B``. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([7, 8])
>>> A.pinv_solve(B)
Matrix([
[ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18],
[-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9],
[ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]])
>>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0]))
Matrix([
[-55/18],
[ 1/9],
[ 59/18]])
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
Notes
=====
This may return either exact solutions or least squares solutions.
To determine which, check ``A * A.pinv() * B == B``. It will be
True if exact solutions exist, and False if only a least-squares
solution exists. Be aware that the left hand side of that equation
may need to be simplified to correctly compare to the right hand
side.
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system
"""
from sympy.matrices import eye
A = M
A_pinv = M.pinv()
if arbitrary_matrix is None:
rows, cols = A.cols, B.cols
w = symbols('w:{}_:{}'.format(rows, cols), cls=Dummy)
arbitrary_matrix = M.__class__(cols, rows, w).T
return A_pinv.multiply(B) + (eye(A.cols) -
A_pinv.multiply(A)).multiply(arbitrary_matrix)
def _solve(M, rhs, method='GJ'):
"""Solves linear equation where the unique solution exists.
Parameters
==========
rhs : Matrix
Vector representing the right hand side of the linear equation.
method : string, optional
If set to ``'GJ'`` or ``'GE'``, the Gauss-Jordan elimination will be
used, which is implemented in the routine ``gauss_jordan_solve``.
If set to ``'LU'``, ``LUsolve`` routine will be used.
If set to ``'QR'``, ``QRsolve`` routine will be used.
If set to ``'PINV'``, ``pinv_solve`` routine will be used.
It also supports the methods available for special linear systems
For positive definite systems:
If set to ``'CH'``, ``cholesky_solve`` routine will be used.
If set to ``'LDL'``, ``LDLsolve`` routine will be used.
To use a different method and to compute the solution via the
inverse, use a method defined in the .inv() docstring.
Returns
=======
solutions : Matrix
Vector representing the solution.
Raises
======
ValueError
If there is not a unique solution then a ``ValueError`` will be
raised.
If ``M`` is not square, a ``ValueError`` and a different routine
for solving the system will be suggested.
"""
if method in ('GJ', 'GE'):
try:
soln, param = M.gauss_jordan_solve(rhs)
if param:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible. "
"Try ``M.gauss_jordan_solve(rhs)`` to obtain a parametric solution.")
except ValueError:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")
return soln
elif method == 'LU':
return M.LUsolve(rhs)
elif method == 'CH':
return M.cholesky_solve(rhs)
elif method == 'QR':
return M.QRsolve(rhs)
elif method == 'LDL':
return M.LDLsolve(rhs)
elif method == 'PINV':
return M.pinv_solve(rhs)
else:
return M.inv(method=method).multiply(rhs)
def _solve_least_squares(M, rhs, method='CH'):
"""Return the least-square fit to the data.
Parameters
==========
rhs : Matrix
Vector representing the right hand side of the linear equation.
method : string or boolean, optional
If set to ``'CH'``, ``cholesky_solve`` routine will be used.
If set to ``'LDL'``, ``LDLsolve`` routine will be used.
If set to ``'QR'``, ``QRsolve`` routine will be used.
If set to ``'PINV'``, ``pinv_solve`` routine will be used.
Otherwise, the conjugate of ``M`` will be used to create a system
of equations that is passed to ``solve`` along with the hint
defined by ``method``.
Returns
=======
solutions : Matrix
Vector representing the solution.
Examples
========
>>> from sympy.matrices import Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = Matrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
if method == 'CH':
return M.cholesky_solve(rhs)
elif method == 'QR':
return M.QRsolve(rhs)
elif method == 'LDL':
return M.LDLsolve(rhs)
elif method == 'PINV':
return M.pinv_solve(rhs)
else:
t = M.H
return (t * M).solve(t * rhs, method=method)
|
d7e755df841be6569ebb1c9edceab4307ca62a9b10dc6802fbec15eb07c62b56 | from collections.abc import Callable
from sympy.core.containers import Dict
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import is_sequence
from sympy.utilities.misc import as_int
from .matrices import MatrixBase
from .repmatrix import MutableRepMatrix, RepMatrix
from .utilities import _iszero
from .decompositions import (
_liupc, _row_structure_symbolic_cholesky, _cholesky_sparse,
_LDLdecomposition_sparse)
from .solvers import (
_lower_triangular_solve_sparse, _upper_triangular_solve_sparse)
class SparseRepMatrix(RepMatrix):
"""
A sparse matrix (a matrix with a large number of zero elements).
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> SparseMatrix(2, 2, range(4))
Matrix([
[0, 1],
[2, 3]])
>>> SparseMatrix(2, 2, {(1, 1): 2})
Matrix([
[0, 0],
[0, 2]])
A SparseMatrix can be instantiated from a ragged list of lists:
>>> SparseMatrix([[1, 2, 3], [1, 2], [1]])
Matrix([
[1, 2, 3],
[1, 2, 0],
[1, 0, 0]])
For safety, one may include the expected size and then an error
will be raised if the indices of any element are out of range or
(for a flat list) if the total number of elements does not match
the expected shape:
>>> SparseMatrix(2, 2, [1, 2])
Traceback (most recent call last):
...
ValueError: List length (2) != rows*columns (4)
Here, an error is not raised because the list is not flat and no
element is out of range:
>>> SparseMatrix(2, 2, [[1, 2]])
Matrix([
[1, 2],
[0, 0]])
But adding another element to the first (and only) row will cause
an error to be raised:
>>> SparseMatrix(2, 2, [[1, 2, 3]])
Traceback (most recent call last):
...
ValueError: The location (0, 2) is out of designated range: (1, 1)
To autosize the matrix, pass None for rows:
>>> SparseMatrix(None, [[1, 2, 3]])
Matrix([[1, 2, 3]])
>>> SparseMatrix(None, {(1, 1): 1, (3, 3): 3})
Matrix([
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 3]])
Values that are themselves a Matrix are automatically expanded:
>>> SparseMatrix(4, 4, {(1, 1): ones(2)})
Matrix([
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]])
A ValueError is raised if the expanding matrix tries to overwrite
a different element already present:
>>> SparseMatrix(3, 3, {(0, 0): ones(2), (1, 1): 2})
Traceback (most recent call last):
...
ValueError: collision at (1, 1)
See Also
========
DenseMatrix
MutableSparseMatrix
ImmutableSparseMatrix
"""
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], MatrixBase):
rows = args[0].rows
cols = args[0].cols
smat = args[0].todok()
return rows, cols, smat
smat = {}
# autosizing
if len(args) == 2 and args[0] is None:
args = [None, None, args[1]]
if len(args) == 3:
r, c = args[:2]
if r is c is None:
rows = cols = None
elif None in (r, c):
raise ValueError(
'Pass rows=None and no cols for autosizing.')
else:
rows, cols = as_int(args[0]), as_int(args[1])
if isinstance(args[2], Callable):
op = args[2]
if None in (rows, cols):
raise ValueError(
"{} and {} must be integers for this "
"specification.".format(rows, cols))
row_indices = [cls._sympify(i) for i in range(rows)]
col_indices = [cls._sympify(j) for j in range(cols)]
for i in row_indices:
for j in col_indices:
value = cls._sympify(op(i, j))
if value != cls.zero:
smat[i, j] = value
return rows, cols, smat
elif isinstance(args[2], (dict, Dict)):
def update(i, j, v):
# update smat and make sure there are no collisions
if v:
if (i, j) in smat and v != smat[i, j]:
raise ValueError(
"There is a collision at {} for {} and {}."
.format((i, j), v, smat[i, j])
)
smat[i, j] = v
# manual copy, copy.deepcopy() doesn't work
for (r, c), v in args[2].items():
if isinstance(v, MatrixBase):
for (i, j), vv in v.todok().items():
update(r + i, c + j, vv)
elif isinstance(v, (list, tuple)):
_, _, smat = cls._handle_creation_inputs(v, **kwargs)
for i, j in smat:
update(r + i, c + j, smat[i, j])
else:
v = cls._sympify(v)
update(r, c, cls._sympify(v))
elif is_sequence(args[2]):
flat = not any(is_sequence(i) for i in args[2])
if not flat:
_, _, smat = \
cls._handle_creation_inputs(args[2], **kwargs)
else:
flat_list = args[2]
if len(flat_list) != rows * cols:
raise ValueError(
"The length of the flat list ({}) does not "
"match the specified size ({} * {})."
.format(len(flat_list), rows, cols)
)
for i in range(rows):
for j in range(cols):
value = flat_list[i*cols + j]
value = cls._sympify(value)
if value != cls.zero:
smat[i, j] = value
if rows is None: # autosizing
keys = smat.keys()
rows = max([r for r, _ in keys]) + 1 if keys else 0
cols = max([c for _, c in keys]) + 1 if keys else 0
else:
for i, j in smat.keys():
if i and i >= rows or j and j >= cols:
raise ValueError(
"The location {} is out of the designated range"
"[{}, {}]x[{}, {}]"
.format((i, j), 0, rows - 1, 0, cols - 1)
)
return rows, cols, smat
elif len(args) == 1 and isinstance(args[0], (list, tuple)):
# list of values or lists
v = args[0]
c = 0
for i, row in enumerate(v):
if not isinstance(row, (list, tuple)):
row = [row]
for j, vv in enumerate(row):
if vv != cls.zero:
smat[i, j] = cls._sympify(vv)
c = max(c, len(row))
rows = len(v) if c else 0
cols = c
return rows, cols, smat
else:
# handle full matrix forms with _handle_creation_inputs
rows, cols, mat = super()._handle_creation_inputs(*args)
for i in range(rows):
for j in range(cols):
value = mat[cols*i + j]
if value != cls.zero:
smat[i, j] = value
return rows, cols, smat
@property
def _smat(self):
SymPyDeprecationWarning(
feature="The private _smat attribute of SparseMatrix",
useinstead="the .todok() method",
issue=21715,
deprecated_since_version="1.9").warn()
return self.todok()
def _eval_inverse(self, **kwargs):
return self.inv(method=kwargs.get('method', 'LDL'),
iszerofunc=kwargs.get('iszerofunc', _iszero),
try_block_diag=kwargs.get('try_block_diag', False))
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
# XXX: This only applies the function to the nonzero elements of the
# matrix so is inconsistent with DenseMatrix.applyfunc e.g.
# zeros(2, 2).applyfunc(lambda x: x + 1)
dok = {}
for k, v in self.todok().items():
fv = f(v)
if fv != 0:
dok[k] = fv
return self._new(self.rows, self.cols, dok)
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
sympy.matrices.sparse.SparseMatrix.row_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self.todok().keys()), key=lambda k: list(reversed(k)))]
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self.todok())
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
sympy.matrices.sparse.SparseMatrix.col_list
"""
return [tuple(k + (self[k],)) for k in
sorted(self.todok().keys(), key=list)]
def scalar_multiply(self, scalar):
"Scalar element-wise multiplication"
return scalar * self
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import SparseMatrix, Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method).multiply(rhs)
RL = property(row_list, None, None, "Alternate faster representation")
CL = property(col_list, None, None, "Alternate faster representation")
def liupc(self):
return _liupc(self)
def row_structure_symbolic_cholesky(self):
return _row_structure_symbolic_cholesky(self)
def cholesky(self, hermitian=True):
return _cholesky_sparse(self, hermitian=hermitian)
def LDLdecomposition(self, hermitian=True):
return _LDLdecomposition_sparse(self, hermitian=hermitian)
def lower_triangular_solve(self, rhs):
return _lower_triangular_solve_sparse(self, rhs)
def upper_triangular_solve(self, rhs):
return _upper_triangular_solve_sparse(self, rhs)
liupc.__doc__ = _liupc.__doc__
row_structure_symbolic_cholesky.__doc__ = _row_structure_symbolic_cholesky.__doc__
cholesky.__doc__ = _cholesky_sparse.__doc__
LDLdecomposition.__doc__ = _LDLdecomposition_sparse.__doc__
lower_triangular_solve.__doc__ = lower_triangular_solve.__doc__
upper_triangular_solve.__doc__ = upper_triangular_solve.__doc__
class MutableSparseMatrix(SparseRepMatrix, MutableRepMatrix):
@classmethod
def _new(cls, *args, **kwargs):
rows, cols, smat = cls._handle_creation_inputs(*args, **kwargs)
rep = cls._smat_to_DomainMatrix(rows, cols, smat)
return cls._fromrep(rep)
SparseMatrix = MutableSparseMatrix
|
b28115ff5219375a0695fb9915bef1d365664f17f5ceff848db6d5c27577a531 | import mpmath as mp
from collections.abc import Callable
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.function import diff
from sympy.core.expr import Expr
from sympy.core.kind import _NumberKind, UndefinedKind
from sympy.core.mul import Mul
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol, uniquely_named_symbol
from sympy.core.sympify import sympify
from sympy.core.sympify import _sympify
from sympy.functions import factorial
from sympy.functions.elementary.complexes import re
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.miscellaneous import Max, Min, sqrt
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.polys import cancel
from sympy.printing import sstr
from sympy.printing.defaults import Printable
from sympy.simplify import simplify as _simplify
from sympy.utilities.decorator import deprecated
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import flatten, NotIterable, is_sequence, reshape
from sympy.utilities.misc import as_int, filldedent
from .common import (
MatrixCommon, MatrixError, NonSquareMatrixError, NonInvertibleMatrixError,
ShapeError, MatrixKind)
from .utilities import _iszero, _is_zero_after_expand_mul
from .determinant import (
_find_reasonable_pivot, _find_reasonable_pivot_naive,
_adjugate, _charpoly, _cofactor, _cofactor_matrix, _per,
_det, _det_bareiss, _det_berkowitz, _det_LU, _minor, _minor_submatrix)
from .reductions import _is_echelon, _echelon_form, _rank, _rref
from .subspaces import _columnspace, _nullspace, _rowspace, _orthogonalize
from .eigen import (
_eigenvals, _eigenvects,
_bidiagonalize, _bidiagonal_decomposition,
_is_diagonalizable, _diagonalize,
_is_positive_definite, _is_positive_semidefinite,
_is_negative_definite, _is_negative_semidefinite, _is_indefinite,
_jordan_form, _left_eigenvects, _singular_values)
from .decompositions import (
_rank_decomposition, _cholesky, _LDLdecomposition,
_LUdecomposition, _LUdecomposition_Simple, _LUdecompositionFF,
_singular_value_decomposition, _QRdecomposition, _upper_hessenberg_decomposition)
from .graph import (
_connected_components, _connected_components_decomposition,
_strongly_connected_components, _strongly_connected_components_decomposition)
from .solvers import (
_diagonal_solve, _lower_triangular_solve, _upper_triangular_solve,
_cholesky_solve, _LDLsolve, _LUsolve, _QRsolve, _gauss_jordan_solve,
_pinv_solve, _solve, _solve_least_squares)
from .inverse import (
_pinv, _inv_mod, _inv_ADJ, _inv_GE, _inv_LU, _inv_CH, _inv_LDL, _inv_QR,
_inv, _inv_block)
class DeferredVector(Symbol, NotIterable):
"""A vector whose components are deferred (e.g. for use with lambdify)
Examples
========
>>> from sympy import DeferredVector, lambdify
>>> X = DeferredVector( 'X' )
>>> X
X
>>> expr = (X[0] + 2, X[2] + 3)
>>> func = lambdify( X, expr)
>>> func( [1, 2, 3] )
(3, 6)
"""
def __getitem__(self, i):
if i == -0:
i = 0
if i < 0:
raise IndexError('DeferredVector index out of range')
component_name = '%s[%d]' % (self.name, i)
return Symbol(component_name)
def __str__(self):
return sstr(self)
def __repr__(self):
return "DeferredVector('%s')" % self.name
class MatrixDeterminant(MatrixCommon):
"""Provides basic matrix determinant operations. Should not be instantiated
directly. See ``determinant.py`` for their implementations."""
def _eval_det_bareiss(self, iszerofunc=_is_zero_after_expand_mul):
return _det_bareiss(self, iszerofunc=iszerofunc)
def _eval_det_berkowitz(self):
return _det_berkowitz(self)
def _eval_det_lu(self, iszerofunc=_iszero, simpfunc=None):
return _det_LU(self, iszerofunc=iszerofunc, simpfunc=simpfunc)
def _eval_determinant(self): # for expressions.determinant.Determinant
return _det(self)
def adjugate(self, method="berkowitz"):
return _adjugate(self, method=method)
def charpoly(self, x='lambda', simplify=_simplify):
return _charpoly(self, x=x, simplify=simplify)
def cofactor(self, i, j, method="berkowitz"):
return _cofactor(self, i, j, method=method)
def cofactor_matrix(self, method="berkowitz"):
return _cofactor_matrix(self, method=method)
def det(self, method="bareiss", iszerofunc=None):
return _det(self, method=method, iszerofunc=iszerofunc)
def per(self):
return _per(self)
def minor(self, i, j, method="berkowitz"):
return _minor(self, i, j, method=method)
def minor_submatrix(self, i, j):
return _minor_submatrix(self, i, j)
_find_reasonable_pivot.__doc__ = _find_reasonable_pivot.__doc__
_find_reasonable_pivot_naive.__doc__ = _find_reasonable_pivot_naive.__doc__
_eval_det_bareiss.__doc__ = _det_bareiss.__doc__
_eval_det_berkowitz.__doc__ = _det_berkowitz.__doc__
_eval_det_lu.__doc__ = _det_LU.__doc__
_eval_determinant.__doc__ = _det.__doc__
adjugate.__doc__ = _adjugate.__doc__
charpoly.__doc__ = _charpoly.__doc__
cofactor.__doc__ = _cofactor.__doc__
cofactor_matrix.__doc__ = _cofactor_matrix.__doc__
det.__doc__ = _det.__doc__
per.__doc__ = _per.__doc__
minor.__doc__ = _minor.__doc__
minor_submatrix.__doc__ = _minor_submatrix.__doc__
class MatrixReductions(MatrixDeterminant):
"""Provides basic matrix row/column operations. Should not be instantiated
directly. See ``reductions.py`` for some of their implementations."""
def echelon_form(self, iszerofunc=_iszero, simplify=False, with_pivots=False):
return _echelon_form(self, iszerofunc=iszerofunc, simplify=simplify,
with_pivots=with_pivots)
@property
def is_echelon(self):
return _is_echelon(self)
def rank(self, iszerofunc=_iszero, simplify=False):
return _rank(self, iszerofunc=iszerofunc, simplify=simplify)
def rref(self, iszerofunc=_iszero, simplify=False, pivots=True,
normalize_last=True):
return _rref(self, iszerofunc=iszerofunc, simplify=simplify,
pivots=pivots, normalize_last=normalize_last)
echelon_form.__doc__ = _echelon_form.__doc__
is_echelon.__doc__ = _is_echelon.__doc__
rank.__doc__ = _rank.__doc__
rref.__doc__ = _rref.__doc__
def _normalize_op_args(self, op, col, k, col1, col2, error_str="col"):
"""Validate the arguments for a row/column operation. ``error_str``
can be one of "row" or "col" depending on the arguments being parsed."""
if op not in ["n->kn", "n<->m", "n->n+km"]:
raise ValueError("Unknown {} operation '{}'. Valid col operations "
"are 'n->kn', 'n<->m', 'n->n+km'".format(error_str, op))
# define self_col according to error_str
self_cols = self.cols if error_str == 'col' else self.rows
# normalize and validate the arguments
if op == "n->kn":
col = col if col is not None else col1
if col is None or k is None:
raise ValueError("For a {0} operation 'n->kn' you must provide the "
"kwargs `{0}` and `k`".format(error_str))
if not 0 <= col < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col))
elif op == "n<->m":
# we need two cols to swap. It doesn't matter
# how they were specified, so gather them together and
# remove `None`
cols = {col, k, col1, col2}.difference([None])
if len(cols) > 2:
# maybe the user left `k` by mistake?
cols = {col, col1, col2}.difference([None])
if len(cols) != 2:
raise ValueError("For a {0} operation 'n<->m' you must provide the "
"kwargs `{0}1` and `{0}2`".format(error_str))
col1, col2 = cols
if not 0 <= col1 < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col1))
if not 0 <= col2 < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col2))
elif op == "n->n+km":
col = col1 if col is None else col
col2 = col1 if col2 is None else col2
if col is None or col2 is None or k is None:
raise ValueError("For a {0} operation 'n->n+km' you must provide the "
"kwargs `{0}`, `k`, and `{0}2`".format(error_str))
if col == col2:
raise ValueError("For a {0} operation 'n->n+km' `{0}` and `{0}2` must "
"be different.".format(error_str))
if not 0 <= col < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col))
if not 0 <= col2 < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col2))
else:
raise ValueError('invalid operation %s' % repr(op))
return op, col, k, col1, col2
def _eval_col_op_multiply_col_by_const(self, col, k):
def entry(i, j):
if j == col:
return k * self[i, j]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_col_op_swap(self, col1, col2):
def entry(i, j):
if j == col1:
return self[i, col2]
elif j == col2:
return self[i, col1]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_col_op_add_multiple_to_other_col(self, col, k, col2):
def entry(i, j):
if j == col:
return self[i, j] + k * self[i, col2]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_row_op_swap(self, row1, row2):
def entry(i, j):
if i == row1:
return self[row2, j]
elif i == row2:
return self[row1, j]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_row_op_multiply_row_by_const(self, row, k):
def entry(i, j):
if i == row:
return k * self[i, j]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_row_op_add_multiple_to_other_row(self, row, k, row2):
def entry(i, j):
if i == row:
return self[i, j] + k * self[row2, j]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def elementary_col_op(self, op="n->kn", col=None, k=None, col1=None, col2=None):
"""Performs the elementary column operation `op`.
`op` may be one of
* ``"n->kn"`` (column n goes to k*n)
* ``"n<->m"`` (swap column n and column m)
* ``"n->n+km"`` (column n goes to column n + k*column m)
Parameters
==========
op : string; the elementary row operation
col : the column to apply the column operation
k : the multiple to apply in the column operation
col1 : one column of a column swap
col2 : second column of a column swap or column "m" in the column operation
"n->n+km"
"""
op, col, k, col1, col2 = self._normalize_op_args(op, col, k, col1, col2, "col")
# now that we've validated, we're all good to dispatch
if op == "n->kn":
return self._eval_col_op_multiply_col_by_const(col, k)
if op == "n<->m":
return self._eval_col_op_swap(col1, col2)
if op == "n->n+km":
return self._eval_col_op_add_multiple_to_other_col(col, k, col2)
def elementary_row_op(self, op="n->kn", row=None, k=None, row1=None, row2=None):
"""Performs the elementary row operation `op`.
`op` may be one of
* ``"n->kn"`` (row n goes to k*n)
* ``"n<->m"`` (swap row n and row m)
* ``"n->n+km"`` (row n goes to row n + k*row m)
Parameters
==========
op : string; the elementary row operation
row : the row to apply the row operation
k : the multiple to apply in the row operation
row1 : one row of a row swap
row2 : second row of a row swap or row "m" in the row operation
"n->n+km"
"""
op, row, k, row1, row2 = self._normalize_op_args(op, row, k, row1, row2, "row")
# now that we've validated, we're all good to dispatch
if op == "n->kn":
return self._eval_row_op_multiply_row_by_const(row, k)
if op == "n<->m":
return self._eval_row_op_swap(row1, row2)
if op == "n->n+km":
return self._eval_row_op_add_multiple_to_other_row(row, k, row2)
class MatrixSubspaces(MatrixReductions):
"""Provides methods relating to the fundamental subspaces of a matrix.
Should not be instantiated directly. See ``subspaces.py`` for their
implementations."""
def columnspace(self, simplify=False):
return _columnspace(self, simplify=simplify)
def nullspace(self, simplify=False, iszerofunc=_iszero):
return _nullspace(self, simplify=simplify, iszerofunc=iszerofunc)
def rowspace(self, simplify=False):
return _rowspace(self, simplify=simplify)
# This is a classmethod but is converted to such later in order to allow
# assignment of __doc__ since that does not work for already wrapped
# classmethods in Python 3.6.
def orthogonalize(cls, *vecs, **kwargs):
return _orthogonalize(cls, *vecs, **kwargs)
columnspace.__doc__ = _columnspace.__doc__
nullspace.__doc__ = _nullspace.__doc__
rowspace.__doc__ = _rowspace.__doc__
orthogonalize.__doc__ = _orthogonalize.__doc__
orthogonalize = classmethod(orthogonalize) # type:ignore
class MatrixEigen(MatrixSubspaces):
"""Provides basic matrix eigenvalue/vector operations.
Should not be instantiated directly. See ``eigen.py`` for their
implementations."""
def eigenvals(self, error_when_incomplete=True, **flags):
return _eigenvals(self, error_when_incomplete=error_when_incomplete, **flags)
def eigenvects(self, error_when_incomplete=True, iszerofunc=_iszero, **flags):
return _eigenvects(self, error_when_incomplete=error_when_incomplete,
iszerofunc=iszerofunc, **flags)
def is_diagonalizable(self, reals_only=False, **kwargs):
return _is_diagonalizable(self, reals_only=reals_only, **kwargs)
def diagonalize(self, reals_only=False, sort=False, normalize=False):
return _diagonalize(self, reals_only=reals_only, sort=sort,
normalize=normalize)
def bidiagonalize(self, upper=True):
return _bidiagonalize(self, upper=upper)
def bidiagonal_decomposition(self, upper=True):
return _bidiagonal_decomposition(self, upper=upper)
@property
def is_positive_definite(self):
return _is_positive_definite(self)
@property
def is_positive_semidefinite(self):
return _is_positive_semidefinite(self)
@property
def is_negative_definite(self):
return _is_negative_definite(self)
@property
def is_negative_semidefinite(self):
return _is_negative_semidefinite(self)
@property
def is_indefinite(self):
return _is_indefinite(self)
def jordan_form(self, calc_transform=True, **kwargs):
return _jordan_form(self, calc_transform=calc_transform, **kwargs)
def left_eigenvects(self, **flags):
return _left_eigenvects(self, **flags)
def singular_values(self):
return _singular_values(self)
eigenvals.__doc__ = _eigenvals.__doc__
eigenvects.__doc__ = _eigenvects.__doc__
is_diagonalizable.__doc__ = _is_diagonalizable.__doc__
diagonalize.__doc__ = _diagonalize.__doc__
is_positive_definite.__doc__ = _is_positive_definite.__doc__
is_positive_semidefinite.__doc__ = _is_positive_semidefinite.__doc__
is_negative_definite.__doc__ = _is_negative_definite.__doc__
is_negative_semidefinite.__doc__ = _is_negative_semidefinite.__doc__
is_indefinite.__doc__ = _is_indefinite.__doc__
jordan_form.__doc__ = _jordan_form.__doc__
left_eigenvects.__doc__ = _left_eigenvects.__doc__
singular_values.__doc__ = _singular_values.__doc__
bidiagonalize.__doc__ = _bidiagonalize.__doc__
bidiagonal_decomposition.__doc__ = _bidiagonal_decomposition.__doc__
class MatrixCalculus(MatrixCommon):
"""Provides calculus-related matrix operations."""
def diff(self, *args, **kwargs):
"""Calculate the derivative of each element in the matrix.
``args`` will be passed to the ``integrate`` function.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.diff(x)
Matrix([
[1, 0],
[0, 0]])
See Also
========
integrate
limit
"""
# XXX this should be handled here rather than in Derivative
from sympy.tensor.array.array_derivatives import ArrayDerivative
kwargs.setdefault('evaluate', True)
deriv = ArrayDerivative(self, *args, evaluate=True)
if not isinstance(self, Basic):
return deriv.as_mutable()
else:
return deriv
def _eval_derivative(self, arg):
return self.applyfunc(lambda x: x.diff(arg))
def integrate(self, *args, **kwargs):
"""Integrate each element of the matrix. ``args`` will
be passed to the ``integrate`` function.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.integrate((x, ))
Matrix([
[x**2/2, x*y],
[ x, 0]])
>>> M.integrate((x, 0, 2))
Matrix([
[2, 2*y],
[2, 0]])
See Also
========
limit
diff
"""
return self.applyfunc(lambda x: x.integrate(*args, **kwargs))
def jacobian(self, X):
"""Calculates the Jacobian matrix (derivative of a vector-valued function).
Parameters
==========
``self`` : vector of expressions representing functions f_i(x_1, ..., x_n).
X : set of x_i's in order, it can be a list or a Matrix
Both ``self`` and X can be a row or a column matrix in any order
(i.e., jacobian() should always work).
Examples
========
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0]])
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)]])
See Also
========
hessian
wronskian
"""
if not isinstance(X, MatrixBase):
X = self._new(X)
# Both X and ``self`` can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("``self`` must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return self._new(m, n, lambda j, i: self[j].diff(X[i]))
def limit(self, *args):
"""Calculate the limit of each element in the matrix.
``args`` will be passed to the ``limit`` function.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.limit(x, 2)
Matrix([
[2, y],
[1, 0]])
See Also
========
integrate
diff
"""
return self.applyfunc(lambda x: x.limit(*args))
# https://github.com/sympy/sympy/pull/12854
class MatrixDeprecated(MatrixCommon):
"""A class to house deprecated matrix methods."""
def _legacy_array_dot(self, b):
"""Compatibility function for deprecated behavior of ``matrix.dot(vector)``
"""
from .dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError(
"Dimensions incorrect for dot product: %s, %s" % (
self.shape, len(b)))
return self.dot(Matrix(b))
else:
raise TypeError(
"`b` must be an ordered iterable or Matrix, not %s." %
type(b))
mat = self
if mat.cols == b.rows:
if b.cols != 1:
mat = mat.T
b = b.T
prod = flatten((mat * b).tolist())
return prod
if mat.cols == b.cols:
return mat.dot(b.T)
elif mat.rows == b.rows:
return mat.T.dot(b)
else:
raise ShapeError("Dimensions incorrect for dot product: %s, %s" % (
self.shape, b.shape))
def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
return self.charpoly(x=x)
def berkowitz_det(self):
"""Computes determinant using Berkowitz method.
See Also
========
det
berkowitz
"""
return self.det(method='berkowitz')
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method.
See Also
========
berkowitz
"""
return self.eigenvals(**flags)
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method.
See Also
========
berkowitz
"""
sign, minors = self.one, []
for poly in self.berkowitz():
minors.append(sign * poly[-1])
sign = -sign
return tuple(minors)
def berkowitz(self):
from sympy.matrices import zeros
berk = ((1,),)
if not self:
return berk
if not self.is_square:
raise NonSquareMatrixError()
A, N = self, self.rows
transforms = [0] * (N - 1)
for n in range(N, 1, -1):
T, k = zeros(n + 1, n), n - 1
R, C = -A[k, :k], A[:k, k]
A, a = A[:k, :k], -A[k, k]
items = [C]
for i in range(0, n - 2):
items.append(A * items[i])
for i, B in enumerate(items):
items[i] = (R * B)[0, 0]
items = [self.one, a] + items
for i in range(n):
T[i:, i] = items[:n - i + 1]
transforms[k - 1] = T
polys = [self._new([self.one, -A[0, 0]])]
for i, T in enumerate(transforms):
polys.append(T * polys[i])
return berk + tuple(map(tuple, polys))
def cofactorMatrix(self, method="berkowitz"):
return self.cofactor_matrix(method=method)
def det_bareis(self):
return _det_bareiss(self)
def det_LU_decomposition(self):
"""Compute matrix determinant using LU decomposition
Note that this method fails if the LU decomposition itself
fails. In particular, if the matrix has no inverse this method
will fail.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
det_bareiss
berkowitz_det
"""
return self.det(method='lu')
def jordan_cell(self, eigenval, n):
return self.jordan_block(size=n, eigenvalue=eigenval)
def jordan_cells(self, calc_transformation=True):
P, J = self.jordan_form()
return P, J.get_diag_blocks()
def minorEntry(self, i, j, method="berkowitz"):
return self.minor(i, j, method=method)
def minorMatrix(self, i, j):
return self.minor_submatrix(i, j)
def permuteBkwd(self, perm):
"""Permute the rows of the matrix with the given permutation in reverse."""
return self.permute_rows(perm, direction='backward')
def permuteFwd(self, perm):
"""Permute the rows of the matrix with the given permutation."""
return self.permute_rows(perm, direction='forward')
@Mul._kind_dispatcher.register(_NumberKind, MatrixKind)
def num_mat_mul(k1, k2):
"""
Return MatrixKind. The element kind is selected by recursive dispatching.
Do not need to dispatch in reversed order because KindDispatcher
searches for this automatically.
"""
# Deal with Mul._kind_dispatcher's commutativity
# XXX: this function is called with either k1 or k2 as MatrixKind because
# the Mul kind dispatcher is commutative. Maybe it shouldn't be. Need to
# swap the args here because NumberKind doesn't have an element_kind
# attribute.
if not isinstance(k2, MatrixKind):
k1, k2 = k2, k1
elemk = Mul._kind_dispatcher(k1, k2.element_kind)
return MatrixKind(elemk)
@Mul._kind_dispatcher.register(MatrixKind, MatrixKind)
def mat_mat_mul(k1, k2):
"""
Return MatrixKind. The element kind is selected by recursive dispatching.
"""
elemk = Mul._kind_dispatcher(k1.element_kind, k2.element_kind)
return MatrixKind(elemk)
class MatrixBase(MatrixDeprecated,
MatrixCalculus,
MatrixEigen,
MatrixCommon,
Printable):
"""Base class for matrix objects."""
# Added just for numpy compatibility
__array_priority__ = 11
is_Matrix = True
_class_priority = 3
_sympify = staticmethod(sympify)
zero = S.Zero
one = S.One
@property
def kind(self) -> MatrixKind:
elem_kinds = set(e.kind for e in self.flat())
if len(elem_kinds) == 1:
elemkind, = elem_kinds
else:
elemkind = UndefinedKind
return MatrixKind(elemkind)
def flat(self):
return [self[i, j] for i in range(self.rows) for j in range(self.cols)]
def __array__(self, dtype=object):
from .dense import matrix2numpy
return matrix2numpy(self, dtype=dtype)
def __len__(self):
"""Return the number of elements of ``self``.
Implemented mainly so bool(Matrix()) == False.
"""
return self.rows * self.cols
def _matrix_pow_by_jordan_blocks(self, num):
from sympy.matrices import diag, MutableMatrix
from sympy.functions.combinatorial.factorials import binomial
def jordan_cell_power(jc, n):
N = jc.shape[0]
l = jc[0,0]
if l.is_zero:
if N == 1 and n.is_nonnegative:
jc[0,0] = l**n
elif not (n.is_integer and n.is_nonnegative):
raise NonInvertibleMatrixError("Non-invertible matrix can only be raised to a nonnegative integer")
else:
for i in range(N):
jc[0,i] = KroneckerDelta(i, n)
else:
for i in range(N):
bn = binomial(n, i)
if isinstance(bn, binomial):
bn = bn._eval_expand_func()
jc[0,i] = l**(n-i)*bn
for i in range(N):
for j in range(1, N-i):
jc[j,i+j] = jc [j-1,i+j-1]
P, J = self.jordan_form()
jordan_cells = J.get_diag_blocks()
# Make sure jordan_cells matrices are mutable:
jordan_cells = [MutableMatrix(j) for j in jordan_cells]
for j in jordan_cells:
jordan_cell_power(j, num)
return self._new(P.multiply(diag(*jordan_cells))
.multiply(P.inv()))
def __str__(self):
if S.Zero in self.shape:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
return "Matrix(%s)" % str(self.tolist())
def _format_str(self, printer=None):
if not printer:
from sympy.printing.str import StrPrinter
printer = StrPrinter()
# Handle zero dimensions:
if S.Zero in self.shape:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
if self.rows == 1:
return "Matrix([%s])" % self.table(printer, rowsep=',\n')
return "Matrix([\n%s])" % self.table(printer, rowsep=',\n')
@classmethod
def irregular(cls, ntop, *matrices, **kwargs):
"""Return a matrix filled by the given matrices which
are listed in order of appearance from left to right, top to
bottom as they first appear in the matrix. They must fill the
matrix completely.
Examples
========
>>> from sympy import ones, Matrix
>>> Matrix.irregular(3, ones(2,1), ones(3,3)*2, ones(2,2)*3,
... ones(1,1)*4, ones(2,2)*5, ones(1,2)*6, ones(1,2)*7)
Matrix([
[1, 2, 2, 2, 3, 3],
[1, 2, 2, 2, 3, 3],
[4, 2, 2, 2, 5, 5],
[6, 6, 7, 7, 5, 5]])
"""
ntop = as_int(ntop)
# make sure we are working with explicit matrices
b = [i.as_explicit() if hasattr(i, 'as_explicit') else i
for i in matrices]
q = list(range(len(b)))
dat = [i.rows for i in b]
active = [q.pop(0) for _ in range(ntop)]
cols = sum([b[i].cols for i in active])
rows = []
while any(dat):
r = []
for a, j in enumerate(active):
r.extend(b[j][-dat[j], :])
dat[j] -= 1
if dat[j] == 0 and q:
active[a] = q.pop(0)
if len(r) != cols:
raise ValueError(filldedent('''
Matrices provided do not appear to fill
the space completely.'''))
rows.append(r)
return cls._new(rows)
@classmethod
def _handle_ndarray(cls, arg):
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a Python list out of it.
arr = arg.__array__()
if len(arr.shape) == 2:
rows, cols = arr.shape[0], arr.shape[1]
flat_list = [cls._sympify(i) for i in arr.ravel()]
return rows, cols, flat_list
elif len(arr.shape) == 1:
flat_list = [cls._sympify(i) for i in arr]
return arr.shape[0], 1, flat_list
else:
raise NotImplementedError(
"SymPy supports just 1D and 2D matrices")
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
"""Return the number of rows, cols and flat matrix elements.
Examples
========
>>> from sympy import Matrix, I
Matrix can be constructed as follows:
* from a nested list of iterables
>>> Matrix( ((1, 2+I), (3, 4)) )
Matrix([
[1, 2 + I],
[3, 4]])
* from un-nested iterable (interpreted as a column)
>>> Matrix( [1, 2] )
Matrix([
[1],
[2]])
* from un-nested iterable with dimensions
>>> Matrix(1, 2, [1, 2] )
Matrix([[1, 2]])
* from no arguments (a 0 x 0 matrix)
>>> Matrix()
Matrix(0, 0, [])
* from a rule
>>> Matrix(2, 2, lambda i, j: i/(j + 1) )
Matrix([
[0, 0],
[1, 1/2]])
See Also
========
irregular - filling a matrix with irregular blocks
"""
from sympy.matrices import SparseMatrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.expressions.blockmatrix import BlockMatrix
flat_list = None
if len(args) == 1:
# Matrix(SparseMatrix(...))
if isinstance(args[0], SparseMatrix):
return args[0].rows, args[0].cols, flatten(args[0].tolist())
# Matrix(Matrix(...))
elif isinstance(args[0], MatrixBase):
return args[0].rows, args[0].cols, args[0].flat()
# Matrix(MatrixSymbol('X', 2, 2))
elif isinstance(args[0], Basic) and args[0].is_Matrix:
return args[0].rows, args[0].cols, args[0].as_explicit().flat()
elif isinstance(args[0], mp.matrix):
M = args[0]
flat_list = [cls._sympify(x) for x in M]
return M.rows, M.cols, flat_list
# Matrix(numpy.ones((2, 2)))
elif hasattr(args[0], "__array__"):
return cls._handle_ndarray(args[0])
# Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
elif is_sequence(args[0]) \
and not isinstance(args[0], DeferredVector):
dat = list(args[0])
ismat = lambda i: isinstance(i, MatrixBase) and (
evaluate or
isinstance(i, BlockMatrix) or
isinstance(i, MatrixSymbol))
raw = lambda i: is_sequence(i) and not ismat(i)
evaluate = kwargs.get('evaluate', True)
if evaluate:
def make_explicit(x):
"""make Block and Symbol explicit"""
if isinstance(x, BlockMatrix):
return x.as_explicit()
elif isinstance(x, MatrixSymbol) and all(_.is_Integer for _ in x.shape):
return x.as_explicit()
else:
return x
def make_explicit_row(row):
# Could be list or could be list of lists
if isinstance(row, (list, tuple)):
return [make_explicit(x) for x in row]
else:
return make_explicit(row)
if isinstance(dat, (list, tuple)):
dat = [make_explicit_row(row) for row in dat]
if dat in ([], [[]]):
rows = cols = 0
flat_list = []
elif not any(raw(i) or ismat(i) for i in dat):
# a column as a list of values
flat_list = [cls._sympify(i) for i in dat]
rows = len(flat_list)
cols = 1 if rows else 0
elif evaluate and all(ismat(i) for i in dat):
# a column as a list of matrices
ncol = {i.cols for i in dat if any(i.shape)}
if ncol:
if len(ncol) != 1:
raise ValueError('mismatched dimensions')
flat_list = [_ for i in dat for r in i.tolist() for _ in r]
cols = ncol.pop()
rows = len(flat_list)//cols
else:
rows = cols = 0
flat_list = []
elif evaluate and any(ismat(i) for i in dat):
ncol = set()
flat_list = []
for i in dat:
if ismat(i):
flat_list.extend(
[k for j in i.tolist() for k in j])
if any(i.shape):
ncol.add(i.cols)
elif raw(i):
if i:
ncol.add(len(i))
flat_list.extend([cls._sympify(ij) for ij in i])
else:
ncol.add(1)
flat_list.append(i)
if len(ncol) > 1:
raise ValueError('mismatched dimensions')
cols = ncol.pop()
rows = len(flat_list)//cols
else:
# list of lists; each sublist is a logical row
# which might consist of many rows if the values in
# the row are matrices
flat_list = []
ncol = set()
rows = cols = 0
for row in dat:
if not is_sequence(row) and \
not getattr(row, 'is_Matrix', False):
raise ValueError('expecting list of lists')
if hasattr(row, '__array__'):
if 0 in row.shape:
continue
elif not row:
continue
if evaluate and all(ismat(i) for i in row):
r, c, flatT = cls._handle_creation_inputs(
[i.T for i in row])
T = reshape(flatT, [c])
flat = \
[T[i][j] for j in range(c) for i in range(r)]
r, c = c, r
else:
r = 1
if getattr(row, 'is_Matrix', False):
c = 1
flat = [row]
else:
c = len(row)
flat = [cls._sympify(i) for i in row]
ncol.add(c)
if len(ncol) > 1:
raise ValueError('mismatched dimensions')
flat_list.extend(flat)
rows += r
cols = ncol.pop() if ncol else 0
elif len(args) == 3:
rows = as_int(args[0])
cols = as_int(args[1])
if rows < 0 or cols < 0:
raise ValueError("Cannot create a {} x {} matrix. "
"Both dimensions must be positive".format(rows, cols))
# Matrix(2, 2, lambda i, j: i+j)
if len(args) == 3 and isinstance(args[2], Callable):
op = args[2]
flat_list = []
for i in range(rows):
flat_list.extend(
[cls._sympify(op(cls._sympify(i), cls._sympify(j)))
for j in range(cols)])
# Matrix(2, 2, [1, 2, 3, 4])
elif len(args) == 3 and is_sequence(args[2]):
flat_list = args[2]
if len(flat_list) != rows * cols:
raise ValueError(
'List length should be equal to rows*columns')
flat_list = [cls._sympify(i) for i in flat_list]
# Matrix()
elif len(args) == 0:
# Empty Matrix
rows = cols = 0
flat_list = []
if flat_list is None:
raise TypeError(filldedent('''
Data type not understood; expecting list of lists
or lists of values.'''))
return rows, cols, flat_list
def _setitem(self, key, value):
"""Helper to set value at location given by key.
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
from .dense import Matrix
is_slice = isinstance(key, slice)
i, j = key = self.key2ij(key)
is_mat = isinstance(value, MatrixBase)
if isinstance(i, slice) or isinstance(j, slice):
if is_mat:
self.copyin_matrix(key, value)
return
if not isinstance(value, Expr) and is_sequence(value):
self.copyin_list(key, value)
return
raise ValueError('unexpected value: %s' % value)
else:
if (not is_mat and
not isinstance(value, Basic) and is_sequence(value)):
value = Matrix(value)
is_mat = True
if is_mat:
if is_slice:
key = (slice(*divmod(i, self.cols)),
slice(*divmod(j, self.cols)))
else:
key = (slice(i, i + value.rows),
slice(j, j + value.cols))
self.copyin_matrix(key, value)
else:
return i, j, self._sympify(value)
return
def add(self, b):
"""Return self + b """
return self + b
def condition_number(self):
"""Returns the condition number of a matrix.
This is the maximum singular value divided by the minimum singular value
Examples
========
>>> from sympy import Matrix, S
>>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, S.One/10]])
>>> A.condition_number()
100
See Also
========
singular_values
"""
if not self:
return self.zero
singularvalues = self.singular_values()
return Max(*singularvalues) / Min(*singularvalues)
def copy(self):
"""
Returns the copy of a matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.copy()
Matrix([
[1, 2],
[3, 4]])
"""
return self._new(self.rows, self.cols, self.flat())
def cross(self, b):
r"""
Return the cross product of ``self`` and ``b`` relaxing the condition
of compatible dimensions: if each has 3 elements, a matrix of the
same type and shape as ``self`` will be returned. If ``b`` has the same
shape as ``self`` then common identities for the cross product (like
`a \times b = - b \times a`) will hold.
Parameters
==========
b : 3x1 or 1x3 Matrix
See Also
========
dot
multiply
multiply_elementwise
"""
from sympy.matrices.expressions.matexpr import MatrixExpr
if not isinstance(b, MatrixBase) and not isinstance(b, MatrixExpr):
raise TypeError(
"{} must be a Matrix, not {}.".format(b, type(b)))
if not (self.rows * self.cols == b.rows * b.cols == 3):
raise ShapeError("Dimensions incorrect for cross product: %s x %s" %
((self.rows, self.cols), (b.rows, b.cols)))
else:
return self._new(self.rows, self.cols, (
(self[1] * b[2] - self[2] * b[1]),
(self[2] * b[0] - self[0] * b[2]),
(self[0] * b[1] - self[1] * b[0])))
@property
def D(self):
"""Return Dirac conjugate (if ``self.rows == 4``).
Examples
========
>>> from sympy import Matrix, I, eye
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m.D
Matrix([[0, 1 - I, -2, -3]])
>>> m = (eye(4) + I*eye(4))
>>> m[0, 3] = 2
>>> m.D
Matrix([
[1 - I, 0, 0, 0],
[ 0, 1 - I, 0, 0],
[ 0, 0, -1 + I, 0],
[ 2, 0, 0, -1 + I]])
If the matrix does not have 4 rows an AttributeError will be raised
because this property is only defined for matrices with 4 rows.
>>> Matrix(eye(2)).D
Traceback (most recent call last):
...
AttributeError: Matrix has no attribute D.
See Also
========
sympy.matrices.common.MatrixCommon.conjugate: By-element conjugation
sympy.matrices.common.MatrixCommon.H: Hermite conjugation
"""
from sympy.physics.matrices import mgamma
if self.rows != 4:
# In Python 3.2, properties can only return an AttributeError
# so we can't raise a ShapeError -- see commit which added the
# first line of this inline comment. Also, there is no need
# for a message since MatrixBase will raise the AttributeError
raise AttributeError
return self.H * mgamma(0)
def dot(self, b, hermitian=None, conjugate_convention=None):
"""Return the dot or inner product of two vectors of equal length.
Here ``self`` must be a ``Matrix`` of size 1 x n or n x 1, and ``b``
must be either a matrix of size 1 x n, n x 1, or a list/tuple of length n.
A scalar is returned.
By default, ``dot`` does not conjugate ``self`` or ``b``, even if there are
complex entries. Set ``hermitian=True`` (and optionally a ``conjugate_convention``)
to compute the hermitian inner product.
Possible kwargs are ``hermitian`` and ``conjugate_convention``.
If ``conjugate_convention`` is ``"left"``, ``"math"`` or ``"maths"``,
the conjugate of the first vector (``self``) is used. If ``"right"``
or ``"physics"`` is specified, the conjugate of the second vector ``b`` is used.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> v = Matrix([1, 1, 1])
>>> M.row(0).dot(v)
6
>>> M.col(0).dot(v)
12
>>> v = [3, 2, 1]
>>> M.row(0).dot(v)
10
>>> from sympy import I
>>> q = Matrix([1*I, 1*I, 1*I])
>>> q.dot(q, hermitian=False)
-3
>>> q.dot(q, hermitian=True)
3
>>> q1 = Matrix([1, 1, 1*I])
>>> q.dot(q1, hermitian=True, conjugate_convention="maths")
1 - 2*I
>>> q.dot(q1, hermitian=True, conjugate_convention="physics")
1 + 2*I
See Also
========
cross
multiply
multiply_elementwise
"""
from .dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError(
"Dimensions incorrect for dot product: %s, %s" % (
self.shape, len(b)))
return self.dot(Matrix(b))
else:
raise TypeError(
"`b` must be an ordered iterable or Matrix, not %s." %
type(b))
mat = self
if (1 not in mat.shape) or (1 not in b.shape) :
SymPyDeprecationWarning(
feature="Dot product of non row/column vectors",
issue=13815,
deprecated_since_version="1.2",
useinstead="* to take matrix products").warn()
return mat._legacy_array_dot(b)
if len(mat) != len(b):
raise ShapeError("Dimensions incorrect for dot product: %s, %s" % (self.shape, b.shape))
n = len(mat)
if mat.shape != (1, n):
mat = mat.reshape(1, n)
if b.shape != (n, 1):
b = b.reshape(n, 1)
# Now ``mat`` is a row vector and ``b`` is a column vector.
# If it so happens that only conjugate_convention is passed
# then automatically set hermitian to True. If only hermitian
# is true but no conjugate_convention is not passed then
# automatically set it to ``"maths"``
if conjugate_convention is not None and hermitian is None:
hermitian = True
if hermitian and conjugate_convention is None:
conjugate_convention = "maths"
if hermitian == True:
if conjugate_convention in ("maths", "left", "math"):
mat = mat.conjugate()
elif conjugate_convention in ("physics", "right"):
b = b.conjugate()
else:
raise ValueError("Unknown conjugate_convention was entered."
" conjugate_convention must be one of the"
" following: math, maths, left, physics or right.")
return (mat * b)[0]
def dual(self):
"""Returns the dual of a matrix, which is:
``(1/2)*levicivita(i, j, k, l)*M(k, l)`` summed over indices `k` and `l`
Since the levicivita method is anti_symmetric for any pairwise
exchange of indices, the dual of a symmetric matrix is the zero
matrix. Strictly speaking the dual defined here assumes that the
'matrix' `M` is a contravariant anti_symmetric second rank tensor,
so that the dual is a covariant second rank tensor.
"""
from sympy.functions.special.tensor_functions import LeviCivita
from sympy.matrices import zeros
M, n = self[:, :], self.rows
work = zeros(n)
if self.is_symmetric():
return work
for i in range(1, n):
for j in range(1, n):
acum = 0
for k in range(1, n):
acum += LeviCivita(i, j, 0, k) * M[0, k]
work[i, j] = acum
work[j, i] = -acum
for l in range(1, n):
acum = 0
for a in range(1, n):
for b in range(1, n):
acum += LeviCivita(0, l, a, b) * M[a, b]
acum /= 2
work[0, l] = -acum
work[l, 0] = acum
return work
def _eval_matrix_exp_jblock(self):
"""A helper function to compute an exponential of a Jordan block
matrix
Examples
========
>>> from sympy import Symbol, Matrix
>>> l = Symbol('lamda')
A trivial example of 1*1 Jordan block:
>>> m = Matrix.jordan_block(1, l)
>>> m._eval_matrix_exp_jblock()
Matrix([[exp(lamda)]])
An example of 3*3 Jordan block:
>>> m = Matrix.jordan_block(3, l)
>>> m._eval_matrix_exp_jblock()
Matrix([
[exp(lamda), exp(lamda), exp(lamda)/2],
[ 0, exp(lamda), exp(lamda)],
[ 0, 0, exp(lamda)]])
References
==========
.. [1] https://en.wikipedia.org/wiki/Matrix_function#Jordan_decomposition
"""
size = self.rows
l = self[0, 0]
exp_l = exp(l)
bands = {i: exp_l / factorial(i) for i in range(size)}
from .sparsetools import banded
return self.__class__(banded(size, bands))
def analytic_func(self, f, x):
"""
Computes f(A) where A is a Square Matrix
and f is an analytic function.
Examples
========
>>> from sympy import Symbol, Matrix, S, log
>>> x = Symbol('x')
>>> m = Matrix([[S(5)/4, S(3)/4], [S(3)/4, S(5)/4]])
>>> f = log(x)
>>> m.analytic_func(f, x)
Matrix([
[ 0, log(2)],
[log(2), 0]])
Parameters
==========
f : Expr
Analytic Function
x : Symbol
parameter of f
"""
f, x = _sympify(f), _sympify(x)
if not self.is_square:
raise NonSquareMatrixError
if not x.is_symbol:
raise ValueError("{} must be a symbol.".format(x))
if x not in f.free_symbols:
raise ValueError(
"{} must be a parameter of {}.".format(x, f))
if x in self.free_symbols:
raise ValueError(
"{} must not be a parameter of {}.".format(x, self))
eigen = self.eigenvals()
max_mul = max(eigen.values())
derivative = {}
dd = f
for i in range(max_mul - 1):
dd = diff(dd, x)
derivative[i + 1] = dd
n = self.shape[0]
r = self.zeros(n)
f_val = self.zeros(n, 1)
row = 0
for i in eigen:
mul = eigen[i]
f_val[row] = f.subs(x, i)
if f_val[row].is_number and not f_val[row].is_complex:
raise ValueError(
"Cannot evaluate the function because the "
"function {} is not analytic at the given "
"eigenvalue {}".format(f, f_val[row]))
val = 1
for a in range(n):
r[row, a] = val
val *= i
if mul > 1:
coe = [1 for ii in range(n)]
deri = 1
while mul > 1:
row = row + 1
mul -= 1
d_i = derivative[deri].subs(x, i)
if d_i.is_number and not d_i.is_complex:
raise ValueError(
"Cannot evaluate the function because the "
"derivative {} is not analytic at the given "
"eigenvalue {}".format(derivative[deri], d_i))
f_val[row] = d_i
for a in range(n):
if a - deri + 1 <= 0:
r[row, a] = 0
coe[a] = 0
continue
coe[a] = coe[a]*(a - deri + 1)
r[row, a] = coe[a]*pow(i, a - deri)
deri += 1
row += 1
c = r.solve(f_val)
ans = self.zeros(n)
pre = self.eye(n)
for i in range(n):
ans = ans + c[i]*pre
pre *= self
return ans
def exp(self):
"""Return the exponential of a square matrix
Examples
========
>>> from sympy import Symbol, Matrix
>>> t = Symbol('t')
>>> m = Matrix([[0, 1], [-1, 0]]) * t
>>> m.exp()
Matrix([
[ exp(I*t)/2 + exp(-I*t)/2, -I*exp(I*t)/2 + I*exp(-I*t)/2],
[I*exp(I*t)/2 - I*exp(-I*t)/2, exp(I*t)/2 + exp(-I*t)/2]])
"""
if not self.is_square:
raise NonSquareMatrixError(
"Exponentiation is valid only for square matrices")
try:
P, J = self.jordan_form()
cells = J.get_diag_blocks()
except MatrixError:
raise NotImplementedError(
"Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")
blocks = [cell._eval_matrix_exp_jblock() for cell in cells]
from sympy.matrices import diag
eJ = diag(*blocks)
# n = self.rows
ret = P.multiply(eJ, dotprodsimp=None).multiply(P.inv(), dotprodsimp=None)
if all(value.is_real for value in self.values()):
return type(self)(re(ret))
else:
return type(self)(ret)
def _eval_matrix_log_jblock(self):
"""Helper function to compute logarithm of a jordan block.
Examples
========
>>> from sympy import Symbol, Matrix
>>> l = Symbol('lamda')
A trivial example of 1*1 Jordan block:
>>> m = Matrix.jordan_block(1, l)
>>> m._eval_matrix_log_jblock()
Matrix([[log(lamda)]])
An example of 3*3 Jordan block:
>>> m = Matrix.jordan_block(3, l)
>>> m._eval_matrix_log_jblock()
Matrix([
[log(lamda), 1/lamda, -1/(2*lamda**2)],
[ 0, log(lamda), 1/lamda],
[ 0, 0, log(lamda)]])
"""
size = self.rows
l = self[0, 0]
if l.is_zero:
raise MatrixError(
'Could not take logarithm or reciprocal for the given '
'eigenvalue {}'.format(l))
bands = {0: log(l)}
for i in range(1, size):
bands[i] = -((-l) ** -i) / i
from .sparsetools import banded
return self.__class__(banded(size, bands))
def log(self, simplify=cancel):
"""Return the logarithm of a square matrix
Parameters
==========
simplify : function, bool
The function to simplify the result with.
Default is ``cancel``, which is effective to reduce the
expression growing for taking reciprocals and inverses for
symbolic matrices.
Examples
========
>>> from sympy import S, Matrix
Examples for positive-definite matrices:
>>> m = Matrix([[1, 1], [0, 1]])
>>> m.log()
Matrix([
[0, 1],
[0, 0]])
>>> m = Matrix([[S(5)/4, S(3)/4], [S(3)/4, S(5)/4]])
>>> m.log()
Matrix([
[ 0, log(2)],
[log(2), 0]])
Examples for non positive-definite matrices:
>>> m = Matrix([[S(3)/4, S(5)/4], [S(5)/4, S(3)/4]])
>>> m.log()
Matrix([
[ I*pi/2, log(2) - I*pi/2],
[log(2) - I*pi/2, I*pi/2]])
>>> m = Matrix(
... [[0, 0, 0, 1],
... [0, 0, 1, 0],
... [0, 1, 0, 0],
... [1, 0, 0, 0]])
>>> m.log()
Matrix([
[ I*pi/2, 0, 0, -I*pi/2],
[ 0, I*pi/2, -I*pi/2, 0],
[ 0, -I*pi/2, I*pi/2, 0],
[-I*pi/2, 0, 0, I*pi/2]])
"""
if not self.is_square:
raise NonSquareMatrixError(
"Logarithm is valid only for square matrices")
try:
if simplify:
P, J = simplify(self).jordan_form()
else:
P, J = self.jordan_form()
cells = J.get_diag_blocks()
except MatrixError:
raise NotImplementedError(
"Logarithm is implemented only for matrices for which "
"the Jordan normal form can be computed")
blocks = [
cell._eval_matrix_log_jblock()
for cell in cells]
from sympy.matrices import diag
eJ = diag(*blocks)
if simplify:
ret = simplify(P * eJ * simplify(P.inv()))
ret = self.__class__(ret)
else:
ret = P * eJ * P.inv()
return ret
def is_nilpotent(self):
"""Checks if a matrix is nilpotent.
A matrix B is nilpotent if for some integer k, B**k is
a zero matrix.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
True
>>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
False
"""
if not self:
return True
if not self.is_square:
raise NonSquareMatrixError(
"Nilpotency is valid only for square matrices")
x = uniquely_named_symbol('x', self, modify=lambda s: '_' + s)
p = self.charpoly(x)
if p.args[0] == x ** self.rows:
return True
return False
def key2bounds(self, keys):
"""Converts a key with potentially mixed types of keys (integer and slice)
into a tuple of ranges and raises an error if any index is out of ``self``'s
range.
See Also
========
key2ij
"""
from sympy.matrices.common import a2idx as a2idx_ # Remove this line after deprecation of a2idx from matrices.py
islice, jslice = [isinstance(k, slice) for k in keys]
if islice:
if not self.rows:
rlo = rhi = 0
else:
rlo, rhi = keys[0].indices(self.rows)[:2]
else:
rlo = a2idx_(keys[0], self.rows)
rhi = rlo + 1
if jslice:
if not self.cols:
clo = chi = 0
else:
clo, chi = keys[1].indices(self.cols)[:2]
else:
clo = a2idx_(keys[1], self.cols)
chi = clo + 1
return rlo, rhi, clo, chi
def key2ij(self, key):
"""Converts key into canonical form, converting integers or indexable
items into valid integers for ``self``'s range or returning slices
unchanged.
See Also
========
key2bounds
"""
from sympy.matrices.common import a2idx as a2idx_ # Remove this line after deprecation of a2idx from matrices.py
if is_sequence(key):
if not len(key) == 2:
raise TypeError('key must be a sequence of length 2')
return [a2idx_(i, n) if not isinstance(i, slice) else i
for i, n in zip(key, self.shape)]
elif isinstance(key, slice):
return key.indices(len(self))[:2]
else:
return divmod(a2idx_(key, len(self)), self.cols)
def normalized(self, iszerofunc=_iszero):
"""Return the normalized version of ``self``.
Parameters
==========
iszerofunc : Function, optional
A function to determine whether ``self`` is a zero vector.
The default ``_iszero`` tests to see if each element is
exactly zero.
Returns
=======
Matrix
Normalized vector form of ``self``.
It has the same length as a unit vector. However, a zero vector
will be returned for a vector with norm 0.
Raises
======
ShapeError
If the matrix is not in a vector form.
See Also
========
norm
"""
if self.rows != 1 and self.cols != 1:
raise ShapeError("A Matrix must be a vector to normalize.")
norm = self.norm()
if iszerofunc(norm):
out = self.zeros(self.rows, self.cols)
else:
out = self.applyfunc(lambda i: i / norm)
return out
def norm(self, ord=None):
"""Return the Norm of a Matrix or Vector.
In the simplest case this is the geometric size of the vector
Other norms can be specified by the ord parameter
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm - does not exist
inf maximum row sum max(abs(x))
-inf -- min(abs(x))
1 maximum column sum as below
-1 -- as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other - does not exist sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
Examples
========
>>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo
>>> x = Symbol('x', real=True)
>>> v = Matrix([cos(x), sin(x)])
>>> trigsimp( v.norm() )
1
>>> v.norm(10)
(sin(x)**10 + cos(x)**10)**(1/10)
>>> A = Matrix([[1, 1], [1, 1]])
>>> A.norm(1) # maximum sum of absolute values of A is 2
2
>>> A.norm(2) # Spectral norm (max of |Ax|/|x| under 2-vector-norm)
2
>>> A.norm(-2) # Inverse spectral norm (smallest singular value)
0
>>> A.norm() # Frobenius Norm
2
>>> A.norm(oo) # Infinity Norm
2
>>> Matrix([1, -2]).norm(oo)
2
>>> Matrix([-1, 2]).norm(-oo)
1
See Also
========
normalized
"""
# Row or Column Vector Norms
vals = list(self.values()) or [0]
if S.One in self.shape:
if ord in (2, None): # Common case sqrt(<x, x>)
return sqrt(Add(*(abs(i) ** 2 for i in vals)))
elif ord == 1: # sum(abs(x))
return Add(*(abs(i) for i in vals))
elif ord is S.Infinity: # max(abs(x))
return Max(*[abs(i) for i in vals])
elif ord is S.NegativeInfinity: # min(abs(x))
return Min(*[abs(i) for i in vals])
# Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
# Note that while useful this is not mathematically a norm
try:
return Pow(Add(*(abs(i) ** ord for i in vals)), S.One / ord)
except (NotImplementedError, TypeError):
raise ValueError("Expected order to be Number, Symbol, oo")
# Matrix Norms
else:
if ord == 1: # Maximum column sum
m = self.applyfunc(abs)
return Max(*[sum(m.col(i)) for i in range(m.cols)])
elif ord == 2: # Spectral Norm
# Maximum singular value
return Max(*self.singular_values())
elif ord == -2:
# Minimum singular value
return Min(*self.singular_values())
elif ord is S.Infinity: # Infinity Norm - Maximum row sum
m = self.applyfunc(abs)
return Max(*[sum(m.row(i)) for i in range(m.rows)])
elif (ord is None or isinstance(ord,
str) and ord.lower() in
['f', 'fro', 'frobenius', 'vector']):
# Reshape as vector and send back to norm function
return self.vec().norm(ord=2)
else:
raise NotImplementedError("Matrix Norms under development")
def print_nonzero(self, symb="X"):
"""Shows location of non-zero entries for fast shape lookup.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> m = Matrix(2, 3, lambda i, j: i*3+j)
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5]])
>>> m.print_nonzero()
[ XX]
[XXX]
>>> m = eye(4)
>>> m.print_nonzero("x")
[x ]
[ x ]
[ x ]
[ x]
"""
s = []
for i in range(self.rows):
line = []
for j in range(self.cols):
if self[i, j] == 0:
line.append(" ")
else:
line.append(str(symb))
s.append("[%s]" % ''.join(line))
print('\n'.join(s))
def project(self, v):
"""Return the projection of ``self`` onto the line containing ``v``.
Examples
========
>>> from sympy import Matrix, S, sqrt
>>> V = Matrix([sqrt(3)/2, S.Half])
>>> x = Matrix([[1, 0]])
>>> V.project(x)
Matrix([[sqrt(3)/2, 0]])
>>> V.project(-x)
Matrix([[sqrt(3)/2, 0]])
"""
return v * (self.dot(v) / v.dot(v))
def table(self, printer, rowstart='[', rowend=']', rowsep='\n',
colsep=', ', align='right'):
r"""
String form of Matrix as a table.
``printer`` is the printer to use for on the elements (generally
something like StrPrinter())
``rowstart`` is the string used to start each row (by default '[').
``rowend`` is the string used to end each row (by default ']').
``rowsep`` is the string used to separate rows (by default a newline).
``colsep`` is the string used to separate columns (by default ', ').
``align`` defines how the elements are aligned. Must be one of 'left',
'right', or 'center'. You can also use '<', '>', and '^' to mean the
same thing, respectively.
This is used by the string printer for Matrix.
Examples
========
>>> from sympy import Matrix
>>> from sympy.printing.str import StrPrinter
>>> M = Matrix([[1, 2], [-33, 4]])
>>> printer = StrPrinter()
>>> M.table(printer)
'[ 1, 2]\n[-33, 4]'
>>> print(M.table(printer))
[ 1, 2]
[-33, 4]
>>> print(M.table(printer, rowsep=',\n'))
[ 1, 2],
[-33, 4]
>>> print('[%s]' % M.table(printer, rowsep=',\n'))
[[ 1, 2],
[-33, 4]]
>>> print(M.table(printer, colsep=' '))
[ 1 2]
[-33 4]
>>> print(M.table(printer, align='center'))
[ 1 , 2]
[-33, 4]
>>> print(M.table(printer, rowstart='{', rowend='}'))
{ 1, 2}
{-33, 4}
"""
# Handle zero dimensions:
if S.Zero in self.shape:
return '[]'
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
s = printer._print(self[i, j])
res[-1].append(s)
maxlen[j] = max(len(s), maxlen[j])
# Patch strings together
align = {
'left': 'ljust',
'right': 'rjust',
'center': 'center',
'<': 'ljust',
'>': 'rjust',
'^': 'center',
}[align]
for i, row in enumerate(res):
for j, elem in enumerate(row):
row[j] = getattr(elem, align)(maxlen[j])
res[i] = rowstart + colsep.join(row) + rowend
return rowsep.join(res)
def rank_decomposition(self, iszerofunc=_iszero, simplify=False):
return _rank_decomposition(self, iszerofunc=iszerofunc,
simplify=simplify)
def cholesky(self, hermitian=True):
raise NotImplementedError('This function is implemented in DenseMatrix or SparseMatrix')
def LDLdecomposition(self, hermitian=True):
raise NotImplementedError('This function is implemented in DenseMatrix or SparseMatrix')
def LUdecomposition(self, iszerofunc=_iszero, simpfunc=None,
rankcheck=False):
return _LUdecomposition(self, iszerofunc=iszerofunc, simpfunc=simpfunc,
rankcheck=rankcheck)
def LUdecomposition_Simple(self, iszerofunc=_iszero, simpfunc=None,
rankcheck=False):
return _LUdecomposition_Simple(self, iszerofunc=iszerofunc,
simpfunc=simpfunc, rankcheck=rankcheck)
def LUdecompositionFF(self):
return _LUdecompositionFF(self)
def singular_value_decomposition(self):
return _singular_value_decomposition(self)
def QRdecomposition(self):
return _QRdecomposition(self)
def upper_hessenberg_decomposition(self):
return _upper_hessenberg_decomposition(self)
def diagonal_solve(self, rhs):
return _diagonal_solve(self, rhs)
def lower_triangular_solve(self, rhs):
raise NotImplementedError('This function is implemented in DenseMatrix or SparseMatrix')
def upper_triangular_solve(self, rhs):
raise NotImplementedError('This function is implemented in DenseMatrix or SparseMatrix')
def cholesky_solve(self, rhs):
return _cholesky_solve(self, rhs)
def LDLsolve(self, rhs):
return _LDLsolve(self, rhs)
def LUsolve(self, rhs, iszerofunc=_iszero):
return _LUsolve(self, rhs, iszerofunc=iszerofunc)
def QRsolve(self, b):
return _QRsolve(self, b)
def gauss_jordan_solve(self, B, freevar=False):
return _gauss_jordan_solve(self, B, freevar=freevar)
def pinv_solve(self, B, arbitrary_matrix=None):
return _pinv_solve(self, B, arbitrary_matrix=arbitrary_matrix)
def solve(self, rhs, method='GJ'):
return _solve(self, rhs, method=method)
def solve_least_squares(self, rhs, method='CH'):
return _solve_least_squares(self, rhs, method=method)
def pinv(self, method='RD'):
return _pinv(self, method=method)
def inv_mod(self, m):
return _inv_mod(self, m)
def inverse_ADJ(self, iszerofunc=_iszero):
return _inv_ADJ(self, iszerofunc=iszerofunc)
def inverse_BLOCK(self, iszerofunc=_iszero):
return _inv_block(self, iszerofunc=iszerofunc)
def inverse_GE(self, iszerofunc=_iszero):
return _inv_GE(self, iszerofunc=iszerofunc)
def inverse_LU(self, iszerofunc=_iszero):
return _inv_LU(self, iszerofunc=iszerofunc)
def inverse_CH(self, iszerofunc=_iszero):
return _inv_CH(self, iszerofunc=iszerofunc)
def inverse_LDL(self, iszerofunc=_iszero):
return _inv_LDL(self, iszerofunc=iszerofunc)
def inverse_QR(self, iszerofunc=_iszero):
return _inv_QR(self, iszerofunc=iszerofunc)
def inv(self, method=None, iszerofunc=_iszero, try_block_diag=False):
return _inv(self, method=method, iszerofunc=iszerofunc,
try_block_diag=try_block_diag)
def connected_components(self):
return _connected_components(self)
def connected_components_decomposition(self):
return _connected_components_decomposition(self)
def strongly_connected_components(self):
return _strongly_connected_components(self)
def strongly_connected_components_decomposition(self, lower=True):
return _strongly_connected_components_decomposition(self, lower=lower)
_sage_ = Basic._sage_
rank_decomposition.__doc__ = _rank_decomposition.__doc__
cholesky.__doc__ = _cholesky.__doc__
LDLdecomposition.__doc__ = _LDLdecomposition.__doc__
LUdecomposition.__doc__ = _LUdecomposition.__doc__
LUdecomposition_Simple.__doc__ = _LUdecomposition_Simple.__doc__
LUdecompositionFF.__doc__ = _LUdecompositionFF.__doc__
singular_value_decomposition.__doc__ = _singular_value_decomposition.__doc__
QRdecomposition.__doc__ = _QRdecomposition.__doc__
upper_hessenberg_decomposition.__doc__ = _upper_hessenberg_decomposition.__doc__
diagonal_solve.__doc__ = _diagonal_solve.__doc__
lower_triangular_solve.__doc__ = _lower_triangular_solve.__doc__
upper_triangular_solve.__doc__ = _upper_triangular_solve.__doc__
cholesky_solve.__doc__ = _cholesky_solve.__doc__
LDLsolve.__doc__ = _LDLsolve.__doc__
LUsolve.__doc__ = _LUsolve.__doc__
QRsolve.__doc__ = _QRsolve.__doc__
gauss_jordan_solve.__doc__ = _gauss_jordan_solve.__doc__
pinv_solve.__doc__ = _pinv_solve.__doc__
solve.__doc__ = _solve.__doc__
solve_least_squares.__doc__ = _solve_least_squares.__doc__
pinv.__doc__ = _pinv.__doc__
inv_mod.__doc__ = _inv_mod.__doc__
inverse_ADJ.__doc__ = _inv_ADJ.__doc__
inverse_GE.__doc__ = _inv_GE.__doc__
inverse_LU.__doc__ = _inv_LU.__doc__
inverse_CH.__doc__ = _inv_CH.__doc__
inverse_LDL.__doc__ = _inv_LDL.__doc__
inverse_QR.__doc__ = _inv_QR.__doc__
inverse_BLOCK.__doc__ = _inv_block.__doc__
inv.__doc__ = _inv.__doc__
connected_components.__doc__ = _connected_components.__doc__
connected_components_decomposition.__doc__ = \
_connected_components_decomposition.__doc__
strongly_connected_components.__doc__ = \
_strongly_connected_components.__doc__
strongly_connected_components_decomposition.__doc__ = \
_strongly_connected_components_decomposition.__doc__
@deprecated(
issue=15109,
useinstead="from sympy.matrices.common import classof",
deprecated_since_version="1.3")
def classof(A, B):
from sympy.matrices.common import classof as classof_
return classof_(A, B)
@deprecated(
issue=15109,
deprecated_since_version="1.3",
useinstead="from sympy.matrices.common import a2idx")
def a2idx(j, n=None):
from sympy.matrices.common import a2idx as a2idx_
return a2idx_(j, n)
|
fc7e711293191614db437a2baa27a1e15d0bc52e46a1054205e62e78f0811f7b | from types import FunctionType
from collections import Counter
from mpmath import mp, workprec
from mpmath.libmp.libmpf import prec_to_dps
from sympy.core.sorting import default_sort_key
from sympy.core.evalf import DEFAULT_MAXPREC, PrecisionExhausted
from sympy.core.logic import fuzzy_and, fuzzy_or
from sympy.core.numbers import Float
from sympy.core.sympify import _sympify
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.polys import roots, CRootOf, ZZ, QQ, EX
from sympy.polys.matrices import DomainMatrix
from sympy.polys.matrices.eigen import dom_eigenvects, dom_eigenvects_to_sympy
from sympy.simplify import nsimplify, simplify as _simplify
from sympy.utilities.exceptions import SymPyDeprecationWarning
from .common import MatrixError, NonSquareMatrixError
from .determinant import _find_reasonable_pivot
from .utilities import _iszero
def _eigenvals_eigenvects_mpmath(M):
norm2 = lambda v: mp.sqrt(sum(i**2 for i in v))
v1 = None
prec = max([x._prec for x in M.atoms(Float)])
eps = 2**-prec
while prec < DEFAULT_MAXPREC:
with workprec(prec):
A = mp.matrix(M.evalf(n=prec_to_dps(prec)))
E, ER = mp.eig(A)
v2 = norm2([i for e in E for i in (mp.re(e), mp.im(e))])
if v1 is not None and mp.fabs(v1 - v2) < eps:
return E, ER
v1 = v2
prec *= 2
# we get here because the next step would have taken us
# past MAXPREC or because we never took a step; in case
# of the latter, we refuse to send back a solution since
# it would not have been verified; we also resist taking
# a small step to arrive exactly at MAXPREC since then
# the two calculations might be artificially close.
raise PrecisionExhausted
def _eigenvals_mpmath(M, multiple=False):
"""Compute eigenvalues using mpmath"""
E, _ = _eigenvals_eigenvects_mpmath(M)
result = [_sympify(x) for x in E]
if multiple:
return result
return dict(Counter(result))
def _eigenvects_mpmath(M):
E, ER = _eigenvals_eigenvects_mpmath(M)
result = []
for i in range(M.rows):
eigenval = _sympify(E[i])
eigenvect = _sympify(ER[:, i])
result.append((eigenval, 1, [eigenvect]))
return result
# This function is a candidate for caching if it gets implemented for matrices.
def _eigenvals(
M, error_when_incomplete=True, *, simplify=False, multiple=False,
rational=False, **flags):
r"""Compute eigenvalues of the matrix.
Parameters
==========
error_when_incomplete : bool, optional
If it is set to ``True``, it will raise an error if not all
eigenvalues are computed. This is caused by ``roots`` not returning
a full list of eigenvalues.
simplify : bool or function, optional
If it is set to ``True``, it attempts to return the most
simplified form of expressions returned by applying default
simplification method in every routine.
If it is set to ``False``, it will skip simplification in this
particular routine to save computation resources.
If a function is passed to, it will attempt to apply
the particular function as simplification method.
rational : bool, optional
If it is set to ``True``, every floating point numbers would be
replaced with rationals before computation. It can solve some
issues of ``roots`` routine not working well with floats.
multiple : bool, optional
If it is set to ``True``, the result will be in the form of a
list.
If it is set to ``False``, the result will be in the form of a
dictionary.
Returns
=======
eigs : list or dict
Eigenvalues of a matrix. The return format would be specified by
the key ``multiple``.
Raises
======
MatrixError
If not enough roots had got computed.
NonSquareMatrixError
If attempted to compute eigenvalues from a non-square matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1])
>>> M.eigenvals()
{-1: 1, 0: 1, 2: 1}
See Also
========
MatrixDeterminant.charpoly
eigenvects
Notes
=====
Eigenvalues of a matrix $A$ can be computed by solving a matrix
equation $\det(A - \lambda I) = 0$
It's not always possible to return radical solutions for
eigenvalues for matrices larger than $4, 4$ shape due to
Abel-Ruffini theorem.
If there is no radical solution is found for the eigenvalue,
it may return eigenvalues in the form of
:class:`sympy.polys.rootoftools.ComplexRootOf`.
"""
if not M:
if multiple:
return []
return {}
if not M.is_square:
raise NonSquareMatrixError("{} must be a square matrix.".format(M))
if M._rep.domain not in (ZZ, QQ):
# Skip this check for ZZ/QQ because it can be slow
if all(x.is_number for x in M) and M.has(Float):
return _eigenvals_mpmath(M, multiple=multiple)
if rational:
M = M.applyfunc(
lambda x: nsimplify(x, rational=True) if x.has(Float) else x)
if multiple:
return _eigenvals_list(
M, error_when_incomplete=error_when_incomplete, simplify=simplify,
**flags)
return _eigenvals_dict(
M, error_when_incomplete=error_when_incomplete, simplify=simplify,
**flags)
eigenvals_error_message = \
"It is not always possible to express the eigenvalues of a matrix " + \
"of size 5x5 or higher in radicals. " + \
"We have CRootOf, but domains other than the rationals are not " + \
"currently supported. " + \
"If there are no symbols in the matrix, " + \
"it should still be possible to compute numeric approximations " + \
"of the eigenvalues using " + \
"M.evalf().eigenvals() or M.charpoly().nroots()."
def _eigenvals_list(
M, error_when_incomplete=True, simplify=False, **flags):
iblocks = M.strongly_connected_components()
all_eigs = []
is_dom = M._rep.domain in (ZZ, QQ)
for b in iblocks:
# Fast path for a 1x1 block:
if is_dom and len(b) == 1:
index = b[0]
val = M[index, index]
all_eigs.append(val)
continue
block = M[b, b]
if isinstance(simplify, FunctionType):
charpoly = block.charpoly(simplify=simplify)
else:
charpoly = block.charpoly()
eigs = roots(charpoly, multiple=True, **flags)
if len(eigs) != block.rows:
degree = int(charpoly.degree())
f = charpoly.as_expr()
x = charpoly.gen
try:
eigs = [CRootOf(f, x, idx) for idx in range(degree)]
except NotImplementedError:
if error_when_incomplete:
raise MatrixError(eigenvals_error_message)
else:
eigs = []
all_eigs += eigs
if not simplify:
return all_eigs
if not isinstance(simplify, FunctionType):
simplify = _simplify
return [simplify(value) for value in all_eigs]
def _eigenvals_dict(
M, error_when_incomplete=True, simplify=False, **flags):
iblocks = M.strongly_connected_components()
all_eigs = {}
is_dom = M._rep.domain in (ZZ, QQ)
for b in iblocks:
# Fast path for a 1x1 block:
if is_dom and len(b) == 1:
index = b[0]
val = M[index, index]
all_eigs[val] = all_eigs.get(val, 0) + 1
continue
block = M[b, b]
if isinstance(simplify, FunctionType):
charpoly = block.charpoly(simplify=simplify)
else:
charpoly = block.charpoly()
eigs = roots(charpoly, multiple=False, **flags)
if sum(eigs.values()) != block.rows:
degree = int(charpoly.degree())
f = charpoly.as_expr()
x = charpoly.gen
try:
eigs = {CRootOf(f, x, idx): 1 for idx in range(degree)}
except NotImplementedError:
if error_when_incomplete:
raise MatrixError(eigenvals_error_message)
else:
eigs = {}
for k, v in eigs.items():
if k in all_eigs:
all_eigs[k] += v
else:
all_eigs[k] = v
if not simplify:
return all_eigs
if not isinstance(simplify, FunctionType):
simplify = _simplify
return {simplify(key): value for key, value in all_eigs.items()}
def _eigenspace(M, eigenval, iszerofunc=_iszero, simplify=False):
"""Get a basis for the eigenspace for a particular eigenvalue"""
m = M - M.eye(M.rows) * eigenval
ret = m.nullspace(iszerofunc=iszerofunc)
# The nullspace for a real eigenvalue should be non-trivial.
# If we didn't find an eigenvector, try once more a little harder
if len(ret) == 0 and simplify:
ret = m.nullspace(iszerofunc=iszerofunc, simplify=True)
if len(ret) == 0:
raise NotImplementedError(
"Can't evaluate eigenvector for eigenvalue {}".format(eigenval))
return ret
def _eigenvects_DOM(M, **kwargs):
DOM = DomainMatrix.from_Matrix(M, field=True, extension=True)
DOM = DOM.to_dense()
if DOM.domain != EX:
rational, algebraic = dom_eigenvects(DOM)
eigenvects = dom_eigenvects_to_sympy(
rational, algebraic, M.__class__, **kwargs)
eigenvects = sorted(eigenvects, key=lambda x: default_sort_key(x[0]))
return eigenvects
return None
def _eigenvects_sympy(M, iszerofunc, simplify=True, **flags):
eigenvals = M.eigenvals(rational=False, **flags)
# Make sure that we have all roots in radical form
for x in eigenvals:
if x.has(CRootOf):
raise MatrixError(
"Eigenvector computation is not implemented if the matrix have "
"eigenvalues in CRootOf form")
eigenvals = sorted(eigenvals.items(), key=default_sort_key)
ret = []
for val, mult in eigenvals:
vects = _eigenspace(M, val, iszerofunc=iszerofunc, simplify=simplify)
ret.append((val, mult, vects))
return ret
# This functions is a candidate for caching if it gets implemented for matrices.
def _eigenvects(M, error_when_incomplete=True, iszerofunc=_iszero, *, chop=False, **flags):
"""Compute eigenvectors of the matrix.
Parameters
==========
error_when_incomplete : bool, optional
Raise an error when not all eigenvalues are computed. This is
caused by ``roots`` not returning a full list of eigenvalues.
iszerofunc : function, optional
Specifies a zero testing function to be used in ``rref``.
Default value is ``_iszero``, which uses SymPy's naive and fast
default assumption handler.
It can also accept any user-specified zero testing function, if it
is formatted as a function which accepts a single symbolic argument
and returns ``True`` if it is tested as zero and ``False`` if it
is tested as non-zero, and ``None`` if it is undecidable.
simplify : bool or function, optional
If ``True``, ``as_content_primitive()`` will be used to tidy up
normalization artifacts.
It will also be used by the ``nullspace`` routine.
chop : bool or positive number, optional
If the matrix contains any Floats, they will be changed to Rationals
for computation purposes, but the answers will be returned after
being evaluated with evalf. The ``chop`` flag is passed to ``evalf``.
When ``chop=True`` a default precision will be used; a number will
be interpreted as the desired level of precision.
Returns
=======
ret : [(eigenval, multiplicity, eigenspace), ...]
A ragged list containing tuples of data obtained by ``eigenvals``
and ``nullspace``.
``eigenspace`` is a list containing the ``eigenvector`` for each
eigenvalue.
``eigenvector`` is a vector in the form of a ``Matrix``. e.g.
a vector of length 3 is returned as ``Matrix([a_1, a_2, a_3])``.
Raises
======
NotImplementedError
If failed to compute nullspace.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1])
>>> M.eigenvects()
[(-1, 1, [Matrix([
[-1],
[ 1],
[ 0]])]), (0, 1, [Matrix([
[ 0],
[-1],
[ 1]])]), (2, 1, [Matrix([
[2/3],
[1/3],
[ 1]])])]
See Also
========
eigenvals
MatrixSubspaces.nullspace
"""
simplify = flags.get('simplify', True)
primitive = flags.get('simplify', False)
flags.pop('simplify', None) # remove this if it's there
flags.pop('multiple', None) # remove this if it's there
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
has_floats = M.has(Float)
if has_floats:
if all(x.is_number for x in M):
return _eigenvects_mpmath(M)
M = M.applyfunc(lambda x: nsimplify(x, rational=True))
ret = _eigenvects_DOM(M)
if ret is None:
ret = _eigenvects_sympy(M, iszerofunc, simplify=simplify, **flags)
if primitive:
# if the primitive flag is set, get rid of any common
# integer denominators
def denom_clean(l):
from sympy.polys.polytools import gcd
return [(v / gcd(list(v))).applyfunc(simpfunc) for v in l]
ret = [(val, mult, denom_clean(es)) for val, mult, es in ret]
if has_floats:
# if we had floats to start with, turn the eigenvectors to floats
ret = [(val.evalf(chop=chop), mult, [v.evalf(chop=chop) for v in es])
for val, mult, es in ret]
return ret
def _is_diagonalizable_with_eigen(M, reals_only=False):
"""See _is_diagonalizable. This function returns the bool along with the
eigenvectors to avoid calculating them again in functions like
``diagonalize``."""
if not M.is_square:
return False, []
eigenvecs = M.eigenvects(simplify=True)
for val, mult, basis in eigenvecs:
if reals_only and not val.is_real: # if we have a complex eigenvalue
return False, eigenvecs
if mult != len(basis): # if the geometric multiplicity doesn't equal the algebraic
return False, eigenvecs
return True, eigenvecs
def _is_diagonalizable(M, reals_only=False, **kwargs):
"""Returns ``True`` if a matrix is diagonalizable.
Parameters
==========
reals_only : bool, optional
If ``True``, it tests whether the matrix can be diagonalized
to contain only real numbers on the diagonal.
If ``False``, it tests whether the matrix can be diagonalized
at all, even with numbers that may not be real.
Examples
========
Example of a diagonalizable matrix:
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 0], [0, 3, 0], [2, -4, 2]])
>>> M.is_diagonalizable()
True
Example of a non-diagonalizable matrix:
>>> M = Matrix([[0, 1], [0, 0]])
>>> M.is_diagonalizable()
False
Example of a matrix that is diagonalized in terms of non-real entries:
>>> M = Matrix([[0, 1], [-1, 0]])
>>> M.is_diagonalizable(reals_only=False)
True
>>> M.is_diagonalizable(reals_only=True)
False
See Also
========
is_diagonal
diagonalize
"""
if 'clear_cache' in kwargs:
SymPyDeprecationWarning(
feature='clear_cache',
deprecated_since_version=1.4,
issue=15887
).warn()
if 'clear_subproducts' in kwargs:
SymPyDeprecationWarning(
feature='clear_subproducts',
deprecated_since_version=1.4,
issue=15887
).warn()
if not M.is_square:
return False
if all(e.is_real for e in M) and M.is_symmetric():
return True
if all(e.is_complex for e in M) and M.is_hermitian:
return True
return _is_diagonalizable_with_eigen(M, reals_only=reals_only)[0]
#G&VL, Matrix Computations, Algo 5.4.2
def _householder_vector(x):
if not x.cols == 1:
raise ValueError("Input must be a column matrix")
v = x.copy()
v_plus = x.copy()
v_minus = x.copy()
q = x[0, 0] / abs(x[0, 0])
norm_x = x.norm()
v_plus[0, 0] = x[0, 0] + q * norm_x
v_minus[0, 0] = x[0, 0] - q * norm_x
if x[1:, 0].norm() == 0:
bet = 0
v[0, 0] = 1
else:
if v_plus.norm() <= v_minus.norm():
v = v_plus
else:
v = v_minus
v = v / v[0]
bet = 2 / (v.norm() ** 2)
return v, bet
def _bidiagonal_decmp_hholder(M):
m = M.rows
n = M.cols
A = M.as_mutable()
U, V = A.eye(m), A.eye(n)
for i in range(min(m, n)):
v, bet = _householder_vector(A[i:, i])
hh_mat = A.eye(m - i) - bet * v * v.H
A[i:, i:] = hh_mat * A[i:, i:]
temp = A.eye(m)
temp[i:, i:] = hh_mat
U = U * temp
if i + 1 <= n - 2:
v, bet = _householder_vector(A[i, i+1:].T)
hh_mat = A.eye(n - i - 1) - bet * v * v.H
A[i:, i+1:] = A[i:, i+1:] * hh_mat
temp = A.eye(n)
temp[i+1:, i+1:] = hh_mat
V = temp * V
return U, A, V
def _eval_bidiag_hholder(M):
m = M.rows
n = M.cols
A = M.as_mutable()
for i in range(min(m, n)):
v, bet = _householder_vector(A[i:, i])
hh_mat = A.eye(m-i) - bet * v * v.H
A[i:, i:] = hh_mat * A[i:, i:]
if i + 1 <= n - 2:
v, bet = _householder_vector(A[i, i+1:].T)
hh_mat = A.eye(n - i - 1) - bet * v * v.H
A[i:, i+1:] = A[i:, i+1:] * hh_mat
return A
def _bidiagonal_decomposition(M, upper=True):
"""
Returns $(U,B,V.H)$ for
$$A = UBV^{H}$$
where $A$ is the input matrix, and $B$ is its Bidiagonalized form
Note: Bidiagonal Computation can hang for symbolic matrices.
Parameters
==========
upper : bool. Whether to do upper bidiagnalization or lower.
True for upper and False for lower.
References
==========
.. [1] Algorithm 5.4.2, Matrix computations by Golub and Van Loan, 4th edition
.. [2] Complex Matrix Bidiagonalization, https://github.com/vslobody/Householder-Bidiagonalization
"""
if not isinstance(upper, bool):
raise ValueError("upper must be a boolean")
if upper:
return _bidiagonal_decmp_hholder(M)
X = _bidiagonal_decmp_hholder(M.H)
return X[2].H, X[1].H, X[0].H
def _bidiagonalize(M, upper=True):
"""
Returns $B$, the Bidiagonalized form of the input matrix.
Note: Bidiagonal Computation can hang for symbolic matrices.
Parameters
==========
upper : bool. Whether to do upper bidiagnalization or lower.
True for upper and False for lower.
References
==========
.. [1] Algorithm 5.4.2, Matrix computations by Golub and Van Loan, 4th edition
.. [2] Complex Matrix Bidiagonalization : https://github.com/vslobody/Householder-Bidiagonalization
"""
if not isinstance(upper, bool):
raise ValueError("upper must be a boolean")
if upper:
return _eval_bidiag_hholder(M)
return _eval_bidiag_hholder(M.H).H
def _diagonalize(M, reals_only=False, sort=False, normalize=False):
"""
Return (P, D), where D is diagonal and
D = P^-1 * M * P
where M is current matrix.
Parameters
==========
reals_only : bool. Whether to throw an error if complex numbers are need
to diagonalize. (Default: False)
sort : bool. Sort the eigenvalues along the diagonal. (Default: False)
normalize : bool. If True, normalize the columns of P. (Default: False)
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> M
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> (P, D) = M.diagonalize()
>>> D
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> P
Matrix([
[-1, 0, -1],
[ 0, 0, -1],
[ 2, 1, 2]])
>>> P.inv() * M * P
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
See Also
========
is_diagonal
is_diagonalizable
"""
if not M.is_square:
raise NonSquareMatrixError()
is_diagonalizable, eigenvecs = _is_diagonalizable_with_eigen(M,
reals_only=reals_only)
if not is_diagonalizable:
raise MatrixError("Matrix is not diagonalizable")
if sort:
eigenvecs = sorted(eigenvecs, key=default_sort_key)
p_cols, diag = [], []
for val, mult, basis in eigenvecs:
diag += [val] * mult
p_cols += basis
if normalize:
p_cols = [v / v.norm() for v in p_cols]
return M.hstack(*p_cols), M.diag(*diag)
def _fuzzy_positive_definite(M):
positive_diagonals = M._has_positive_diagonals()
if positive_diagonals is False:
return False
if positive_diagonals and M.is_strongly_diagonally_dominant:
return True
return None
def _fuzzy_positive_semidefinite(M):
nonnegative_diagonals = M._has_nonnegative_diagonals()
if nonnegative_diagonals is False:
return False
if nonnegative_diagonals and M.is_weakly_diagonally_dominant:
return True
return None
def _is_positive_definite(M):
if not M.is_hermitian:
if not M.is_square:
return False
M = M + M.H
fuzzy = _fuzzy_positive_definite(M)
if fuzzy is not None:
return fuzzy
return _is_positive_definite_GE(M)
def _is_positive_semidefinite(M):
if not M.is_hermitian:
if not M.is_square:
return False
M = M + M.H
fuzzy = _fuzzy_positive_semidefinite(M)
if fuzzy is not None:
return fuzzy
return _is_positive_semidefinite_cholesky(M)
def _is_negative_definite(M):
return _is_positive_definite(-M)
def _is_negative_semidefinite(M):
return _is_positive_semidefinite(-M)
def _is_indefinite(M):
if M.is_hermitian:
eigen = M.eigenvals()
args1 = [x.is_positive for x in eigen.keys()]
any_positive = fuzzy_or(args1)
args2 = [x.is_negative for x in eigen.keys()]
any_negative = fuzzy_or(args2)
return fuzzy_and([any_positive, any_negative])
elif M.is_square:
return (M + M.H).is_indefinite
return False
def _is_positive_definite_GE(M):
"""A division-free gaussian elimination method for testing
positive-definiteness."""
M = M.as_mutable()
size = M.rows
for i in range(size):
is_positive = M[i, i].is_positive
if is_positive is not True:
return is_positive
for j in range(i+1, size):
M[j, i+1:] = M[i, i] * M[j, i+1:] - M[j, i] * M[i, i+1:]
return True
def _is_positive_semidefinite_cholesky(M):
"""Uses Cholesky factorization with complete pivoting
References
==========
.. [1] http://eprints.ma.man.ac.uk/1199/1/covered/MIMS_ep2008_116.pdf
.. [2] https://www.value-at-risk.net/cholesky-factorization/
"""
M = M.as_mutable()
for k in range(M.rows):
diags = [M[i, i] for i in range(k, M.rows)]
pivot, pivot_val, nonzero, _ = _find_reasonable_pivot(diags)
if nonzero:
return None
if pivot is None:
for i in range(k+1, M.rows):
for j in range(k, M.cols):
iszero = M[i, j].is_zero
if iszero is None:
return None
elif iszero is False:
return False
return True
if M[k, k].is_negative or pivot_val.is_negative:
return False
elif not (M[k, k].is_nonnegative and pivot_val.is_nonnegative):
return None
if pivot > 0:
M.col_swap(k, k+pivot)
M.row_swap(k, k+pivot)
M[k, k] = sqrt(M[k, k])
M[k, k+1:] /= M[k, k]
M[k+1:, k+1:] -= M[k, k+1:].H * M[k, k+1:]
return M[-1, -1].is_nonnegative
_doc_positive_definite = \
r"""Finds out the definiteness of a matrix.
Explanation
===========
A square real matrix $A$ is:
- A positive definite matrix if $x^T A x > 0$
for all non-zero real vectors $x$.
- A positive semidefinite matrix if $x^T A x \geq 0$
for all non-zero real vectors $x$.
- A negative definite matrix if $x^T A x < 0$
for all non-zero real vectors $x$.
- A negative semidefinite matrix if $x^T A x \leq 0$
for all non-zero real vectors $x$.
- An indefinite matrix if there exists non-zero real vectors
$x, y$ with $x^T A x > 0 > y^T A y$.
A square complex matrix $A$ is:
- A positive definite matrix if $\text{re}(x^H A x) > 0$
for all non-zero complex vectors $x$.
- A positive semidefinite matrix if $\text{re}(x^H A x) \geq 0$
for all non-zero complex vectors $x$.
- A negative definite matrix if $\text{re}(x^H A x) < 0$
for all non-zero complex vectors $x$.
- A negative semidefinite matrix if $\text{re}(x^H A x) \leq 0$
for all non-zero complex vectors $x$.
- An indefinite matrix if there exists non-zero complex vectors
$x, y$ with $\text{re}(x^H A x) > 0 > \text{re}(y^H A y)$.
A matrix need not be symmetric or hermitian to be positive definite.
- A real non-symmetric matrix is positive definite if and only if
$\frac{A + A^T}{2}$ is positive definite.
- A complex non-hermitian matrix is positive definite if and only if
$\frac{A + A^H}{2}$ is positive definite.
And this extension can apply for all the definitions above.
However, for complex cases, you can restrict the definition of
$\text{re}(x^H A x) > 0$ to $x^H A x > 0$ and require the matrix
to be hermitian.
But we do not present this restriction for computation because you
can check ``M.is_hermitian`` independently with this and use
the same procedure.
Examples
========
An example of symmetric positive definite matrix:
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import Matrix, symbols
>>> from sympy.plotting import plot3d
>>> a, b = symbols('a b')
>>> x = Matrix([a, b])
>>> A = Matrix([[1, 0], [0, 1]])
>>> A.is_positive_definite
True
>>> A.is_positive_semidefinite
True
>>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
An example of symmetric positive semidefinite matrix:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> A = Matrix([[1, -1], [-1, 1]])
>>> A.is_positive_definite
False
>>> A.is_positive_semidefinite
True
>>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
An example of symmetric negative definite matrix:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> A = Matrix([[-1, 0], [0, -1]])
>>> A.is_negative_definite
True
>>> A.is_negative_semidefinite
True
>>> A.is_indefinite
False
>>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
An example of symmetric indefinite matrix:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> A = Matrix([[1, 2], [2, -1]])
>>> A.is_indefinite
True
>>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
An example of non-symmetric positive definite matrix.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> A = Matrix([[1, 2], [-2, 1]])
>>> A.is_positive_definite
True
>>> A.is_positive_semidefinite
True
>>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
Notes
=====
Although some people trivialize the definition of positive definite
matrices only for symmetric or hermitian matrices, this restriction
is not correct because it does not classify all instances of
positive definite matrices from the definition $x^T A x > 0$ or
$\text{re}(x^H A x) > 0$.
For instance, ``Matrix([[1, 2], [-2, 1]])`` presented in
the example above is an example of real positive definite matrix
that is not symmetric.
However, since the following formula holds true;
.. math::
\text{re}(x^H A x) > 0 \iff
\text{re}(x^H \frac{A + A^H}{2} x) > 0
We can classify all positive definite matrices that may or may not
be symmetric or hermitian by transforming the matrix to
$\frac{A + A^T}{2}$ or $\frac{A + A^H}{2}$
(which is guaranteed to be always real symmetric or complex
hermitian) and we can defer most of the studies to symmetric or
hermitian positive definite matrices.
But it is a different problem for the existance of Cholesky
decomposition. Because even though a non symmetric or a non
hermitian matrix can be positive definite, Cholesky or LDL
decomposition does not exist because the decompositions require the
matrix to be symmetric or hermitian.
References
==========
.. [1] https://en.wikipedia.org/wiki/Definiteness_of_a_matrix#Eigenvalues
.. [2] http://mathworld.wolfram.com/PositiveDefiniteMatrix.html
.. [3] Johnson, C. R. "Positive Definite Matrices." Amer.
Math. Monthly 77, 259-264 1970.
"""
_is_positive_definite.__doc__ = _doc_positive_definite
_is_positive_semidefinite.__doc__ = _doc_positive_definite
_is_negative_definite.__doc__ = _doc_positive_definite
_is_negative_semidefinite.__doc__ = _doc_positive_definite
_is_indefinite.__doc__ = _doc_positive_definite
def _jordan_form(M, calc_transform=True, *, chop=False):
"""Return $(P, J)$ where $J$ is a Jordan block
matrix and $P$ is a matrix such that $M = P J P^{-1}$
Parameters
==========
calc_transform : bool
If ``False``, then only $J$ is returned.
chop : bool
All matrices are converted to exact types when computing
eigenvalues and eigenvectors. As a result, there may be
approximation errors. If ``chop==True``, these errors
will be truncated.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[ 6, 5, -2, -3], [-3, -1, 3, 3], [ 2, 1, -2, -3], [-1, 1, 5, 5]])
>>> P, J = M.jordan_form()
>>> J
Matrix([
[2, 1, 0, 0],
[0, 2, 0, 0],
[0, 0, 2, 1],
[0, 0, 0, 2]])
See Also
========
jordan_block
"""
if not M.is_square:
raise NonSquareMatrixError("Only square matrices have Jordan forms")
mat = M
has_floats = M.has(Float)
if has_floats:
try:
max_prec = max(term._prec for term in M.values() if isinstance(term, Float))
except ValueError:
# if no term in the matrix is explicitly a Float calling max()
# will throw a error so setting max_prec to default value of 53
max_prec = 53
# setting minimum max_dps to 15 to prevent loss of precision in
# matrix containing non evaluated expressions
max_dps = max(prec_to_dps(max_prec), 15)
def restore_floats(*args):
"""If ``has_floats`` is `True`, cast all ``args`` as
matrices of floats."""
if has_floats:
args = [m.evalf(n=max_dps, chop=chop) for m in args]
if len(args) == 1:
return args[0]
return args
# cache calculations for some speedup
mat_cache = {}
def eig_mat(val, pow):
"""Cache computations of ``(M - val*I)**pow`` for quick
retrieval"""
if (val, pow) in mat_cache:
return mat_cache[(val, pow)]
if (val, pow - 1) in mat_cache:
mat_cache[(val, pow)] = mat_cache[(val, pow - 1)].multiply(
mat_cache[(val, 1)], dotprodsimp=None)
else:
mat_cache[(val, pow)] = (mat - val*M.eye(M.rows)).pow(pow)
return mat_cache[(val, pow)]
# helper functions
def nullity_chain(val, algebraic_multiplicity):
"""Calculate the sequence [0, nullity(E), nullity(E**2), ...]
until it is constant where ``E = M - val*I``"""
# mat.rank() is faster than computing the null space,
# so use the rank-nullity theorem
cols = M.cols
ret = [0]
nullity = cols - eig_mat(val, 1).rank()
i = 2
while nullity != ret[-1]:
ret.append(nullity)
if nullity == algebraic_multiplicity:
break
nullity = cols - eig_mat(val, i).rank()
i += 1
# Due to issues like #7146 and #15872, SymPy sometimes
# gives the wrong rank. In this case, raise an error
# instead of returning an incorrect matrix
if nullity < ret[-1] or nullity > algebraic_multiplicity:
raise MatrixError(
"SymPy had encountered an inconsistent "
"result while computing Jordan block: "
"{}".format(M))
return ret
def blocks_from_nullity_chain(d):
"""Return a list of the size of each Jordan block.
If d_n is the nullity of E**n, then the number
of Jordan blocks of size n is
2*d_n - d_(n-1) - d_(n+1)"""
# d[0] is always the number of columns, so skip past it
mid = [2*d[n] - d[n - 1] - d[n + 1] for n in range(1, len(d) - 1)]
# d is assumed to plateau with "d[ len(d) ] == d[-1]", so
# 2*d_n - d_(n-1) - d_(n+1) == d_n - d_(n-1)
end = [d[-1] - d[-2]] if len(d) > 1 else [d[0]]
return mid + end
def pick_vec(small_basis, big_basis):
"""Picks a vector from big_basis that isn't in
the subspace spanned by small_basis"""
if len(small_basis) == 0:
return big_basis[0]
for v in big_basis:
_, pivots = M.hstack(*(small_basis + [v])).echelon_form(
with_pivots=True)
if pivots[-1] == len(small_basis):
return v
# roots doesn't like Floats, so replace them with Rationals
if has_floats:
mat = mat.applyfunc(lambda x: nsimplify(x, rational=True))
# first calculate the jordan block structure
eigs = mat.eigenvals()
# Make sure that we have all roots in radical form
for x in eigs:
if x.has(CRootOf):
raise MatrixError(
"Jordan normal form is not implemented if the matrix have "
"eigenvalues in CRootOf form")
# most matrices have distinct eigenvalues
# and so are diagonalizable. In this case, don't
# do extra work!
if len(eigs.keys()) == mat.cols:
blocks = list(sorted(eigs.keys(), key=default_sort_key))
jordan_mat = mat.diag(*blocks)
if not calc_transform:
return restore_floats(jordan_mat)
jordan_basis = [eig_mat(eig, 1).nullspace()[0]
for eig in blocks]
basis_mat = mat.hstack(*jordan_basis)
return restore_floats(basis_mat, jordan_mat)
block_structure = []
for eig in sorted(eigs.keys(), key=default_sort_key):
algebraic_multiplicity = eigs[eig]
chain = nullity_chain(eig, algebraic_multiplicity)
block_sizes = blocks_from_nullity_chain(chain)
# if block_sizes = = [a, b, c, ...], then the number of
# Jordan blocks of size 1 is a, of size 2 is b, etc.
# create an array that has (eig, block_size) with one
# entry for each block
size_nums = [(i+1, num) for i, num in enumerate(block_sizes)]
# we expect larger Jordan blocks to come earlier
size_nums.reverse()
block_structure.extend(
[(eig, size) for size, num in size_nums for _ in range(num)])
jordan_form_size = sum(size for eig, size in block_structure)
if jordan_form_size != M.rows:
raise MatrixError(
"SymPy had encountered an inconsistent result while "
"computing Jordan block. : {}".format(M))
blocks = (mat.jordan_block(size=size, eigenvalue=eig) for eig, size in block_structure)
jordan_mat = mat.diag(*blocks)
if not calc_transform:
return restore_floats(jordan_mat)
# For each generalized eigenspace, calculate a basis.
# We start by looking for a vector in null( (A - eig*I)**n )
# which isn't in null( (A - eig*I)**(n-1) ) where n is
# the size of the Jordan block
#
# Ideally we'd just loop through block_structure and
# compute each generalized eigenspace. However, this
# causes a lot of unneeded computation. Instead, we
# go through the eigenvalues separately, since we know
# their generalized eigenspaces must have bases that
# are linearly independent.
jordan_basis = []
for eig in sorted(eigs.keys(), key=default_sort_key):
eig_basis = []
for block_eig, size in block_structure:
if block_eig != eig:
continue
null_big = (eig_mat(eig, size)).nullspace()
null_small = (eig_mat(eig, size - 1)).nullspace()
# we want to pick something that is in the big basis
# and not the small, but also something that is independent
# of any other generalized eigenvectors from a different
# generalized eigenspace sharing the same eigenvalue.
vec = pick_vec(null_small + eig_basis, null_big)
new_vecs = [eig_mat(eig, i).multiply(vec, dotprodsimp=None)
for i in range(size)]
eig_basis.extend(new_vecs)
jordan_basis.extend(reversed(new_vecs))
basis_mat = mat.hstack(*jordan_basis)
return restore_floats(basis_mat, jordan_mat)
def _left_eigenvects(M, **flags):
"""Returns left eigenvectors and eigenvalues.
This function returns the list of triples (eigenval, multiplicity,
basis) for the left eigenvectors. Options are the same as for
eigenvects(), i.e. the ``**flags`` arguments gets passed directly to
eigenvects().
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]])
>>> M.eigenvects()
[(-1, 1, [Matrix([
[-1],
[ 1],
[ 0]])]), (0, 1, [Matrix([
[ 0],
[-1],
[ 1]])]), (2, 1, [Matrix([
[2/3],
[1/3],
[ 1]])])]
>>> M.left_eigenvects()
[(-1, 1, [Matrix([[-2, 1, 1]])]), (0, 1, [Matrix([[-1, -1, 1]])]), (2,
1, [Matrix([[1, 1, 1]])])]
"""
eigs = M.transpose().eigenvects(**flags)
return [(val, mult, [l.transpose() for l in basis]) for val, mult, basis in eigs]
def _singular_values(M):
"""Compute the singular values of a Matrix
Examples
========
>>> from sympy import Matrix, Symbol
>>> x = Symbol('x', real=True)
>>> M = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
>>> M.singular_values()
[sqrt(x**2 + 1), 1, 0]
See Also
========
condition_number
"""
if M.rows >= M.cols:
valmultpairs = M.H.multiply(M).eigenvals()
else:
valmultpairs = M.multiply(M.H).eigenvals()
# Expands result from eigenvals into a simple list
vals = []
for k, v in valmultpairs.items():
vals += [sqrt(k)] * v # dangerous! same k in several spots!
# Pad with zeros if singular values are computed in reverse way,
# to give consistent format.
if len(vals) < M.cols:
vals += [M.zero] * (M.cols - len(vals))
# sort them in descending order
vals.sort(reverse=True, key=default_sort_key)
return vals
|
311bf3b85222f25181051af2a8f02e492932b2d170416f30128c8dcff33962b7 | from collections import defaultdict
from operator import index as index_
from sympy.core.expr import Expr
from sympy.core.kind import Kind, NumberKind, UndefinedKind
from sympy.core.numbers import Integer, Rational
from sympy.core.sympify import _sympify, SympifyError
from sympy.core.singleton import S
from sympy.polys.domains import ZZ, QQ, EXRAW
from sympy.polys.matrices import DomainMatrix
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import is_sequence
from sympy.utilities.misc import filldedent
from .common import classof
from .matrices import MatrixBase, MatrixKind, ShapeError
class RepMatrix(MatrixBase):
"""Matrix implementation based on DomainMatrix as an internal representation.
The RepMatrix class is a superclass for Matrix, ImmutableMatrix,
SparseMatrix and ImmutableSparseMatrix which are the main usable matrix
classes in SymPy. Most methods on this class are simply forwarded to
DomainMatrix.
"""
#
# MatrixBase is the common superclass for all of the usable explicit matrix
# classes in SymPy. The idea is that MatrixBase is an abstract class though
# and that subclasses will implement the lower-level methods.
#
# RepMatrix is a subclass of MatrixBase that uses DomainMatrix as an
# internal representation and delegates lower-level methods to
# DomainMatrix. All of SymPy's standard explicit matrix classes subclass
# RepMatrix and so use DomainMatrix internally.
#
# A RepMatrix uses an internal DomainMatrix with the domain set to ZZ, QQ
# or EXRAW. The EXRAW domain is equivalent to the previous implementation
# of Matrix that used Expr for the elements. The ZZ and QQ domains are used
# when applicable just because they are compatible with the previous
# implementation but are much more efficient. Other domains such as QQ[x]
# are not used because they differ from Expr in some way (e.g. automatic
# expansion of powers and products).
#
_rep: DomainMatrix
def __eq__(self, other):
# Skip sympify for mutable matrices...
if not isinstance(other, RepMatrix):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if not isinstance(other, RepMatrix):
return NotImplemented
return self._rep.unify_eq(other._rep)
@classmethod
def _unify_element_sympy(cls, rep, element):
domain = rep.domain
element = _sympify(element)
if domain != EXRAW:
# The domain can only be ZZ, QQ or EXRAW
if element.is_Integer:
new_domain = domain
elif element.is_Rational:
new_domain = QQ
else:
new_domain = EXRAW
# XXX: This converts the domain for all elements in the matrix
# which can be slow. This happens e.g. if __setitem__ changes one
# element to something that does not fit in the domain
if new_domain != domain:
rep = rep.convert_to(new_domain)
domain = new_domain
if domain != EXRAW:
element = new_domain.from_sympy(element)
if domain == EXRAW and not isinstance(element, Expr):
SymPyDeprecationWarning(
feature="non-Expr objects in a Matrix",
useinstead="list of lists, TableForm or some other data structure",
issue=21497,
deprecated_since_version="1.9"
).warn()
return rep, element
@classmethod
def _dod_to_DomainMatrix(cls, rows, cols, dod, types):
if not all(issubclass(typ, Expr) for typ in types):
SymPyDeprecationWarning(
feature="non-Expr objects in a Matrix",
useinstead="list of lists, TableForm or some other data structure",
issue=21497,
deprecated_since_version="1.9"
).warn()
rep = DomainMatrix(dod, (rows, cols), EXRAW)
if all(issubclass(typ, Rational) for typ in types):
if all(issubclass(typ, Integer) for typ in types):
rep = rep.convert_to(ZZ)
else:
rep = rep.convert_to(QQ)
return rep
@classmethod
def _flat_list_to_DomainMatrix(cls, rows, cols, flat_list):
elements_dod = defaultdict(dict)
for n, element in enumerate(flat_list):
if element != 0:
i, j = divmod(n, cols)
elements_dod[i][j] = element
types = set(map(type, flat_list))
rep = cls._dod_to_DomainMatrix(rows, cols, elements_dod, types)
return rep
@classmethod
def _smat_to_DomainMatrix(cls, rows, cols, smat):
elements_dod = defaultdict(dict)
for (i, j), element in smat.items():
if element != 0:
elements_dod[i][j] = element
types = set(map(type, smat.values()))
rep = cls._dod_to_DomainMatrix(rows, cols, elements_dod, types)
return rep
def flat(self):
return self._rep.to_sympy().to_list_flat()
def _eval_tolist(self):
return self._rep.to_sympy().to_list()
def _eval_todok(self):
return self._rep.to_sympy().to_dok()
def _eval_values(self):
return list(self.todok().values())
def copy(self):
return self._fromrep(self._rep.copy())
@property
def kind(self) -> MatrixKind:
domain = self._rep.domain
element_kind: Kind
if domain in (ZZ, QQ):
element_kind = NumberKind
elif domain == EXRAW:
kinds = set(e.kind for e in self.values())
if len(kinds) == 1:
[element_kind] = kinds
else:
element_kind = UndefinedKind
else: # pragma: no cover
raise RuntimeError("Domain should only be ZZ, QQ or EXRAW")
return MatrixKind(element_kind)
def _eval_has(self, *patterns):
# if the matrix has any zeros, see if S.Zero
# has the pattern. If _smat is full length,
# the matrix has no zeros.
zhas = False
dok = self.todok()
if len(dok) != self.rows*self.cols:
zhas = S.Zero.has(*patterns)
return zhas or any(value.has(*patterns) for value in dok.values())
def _eval_is_Identity(self):
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self.todok()) == self.rows
def _eval_is_symmetric(self, simpfunc):
diff = (self - self.T).applyfunc(simpfunc)
return len(diff.values()) == 0
def _eval_transpose(self):
"""Returns the transposed SparseMatrix of this SparseMatrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.T
Matrix([
[1, 3],
[2, 4]])
"""
return self._fromrep(self._rep.transpose())
def _eval_col_join(self, other):
return self._fromrep(self._rep.vstack(other._rep))
def _eval_row_join(self, other):
return self._fromrep(self._rep.hstack(other._rep))
def _eval_extract(self, rowsList, colsList):
return self._fromrep(self._rep.extract(rowsList, colsList))
def __getitem__(self, key):
return _getitem_RepMatrix(self, key)
@classmethod
def _eval_zeros(cls, rows, cols):
rep = DomainMatrix.zeros((rows, cols), ZZ)
return cls._fromrep(rep)
@classmethod
def _eval_eye(cls, rows, cols):
rep = DomainMatrix.eye((rows, cols), ZZ)
return cls._fromrep(rep)
def _eval_add(self, other):
return classof(self, other)._fromrep(self._rep + other._rep)
def _eval_matrix_mul(self, other):
return classof(self, other)._fromrep(self._rep * other._rep)
def _eval_matrix_mul_elementwise(self, other):
selfrep, otherrep = self._rep.unify(other._rep)
newrep = selfrep.mul_elementwise(otherrep)
return classof(self, other)._fromrep(newrep)
def _eval_scalar_mul(self, other):
rep, other = self._unify_element_sympy(self._rep, other)
return self._fromrep(rep.scalarmul(other))
def _eval_scalar_rmul(self, other):
rep, other = self._unify_element_sympy(self._rep, other)
return self._fromrep(rep.rscalarmul(other))
def _eval_Abs(self):
return self._fromrep(self._rep.applyfunc(abs))
def _eval_conjugate(self):
rep = self._rep
domain = rep.domain
if domain in (ZZ, QQ):
return self.copy()
else:
return self._fromrep(rep.applyfunc(lambda e: e.conjugate()))
def equals(self, other, failing_expression=False):
"""Applies ``equals`` to corresponding elements of the matrices,
trying to prove that the elements are equivalent, returning True
if they are, False if any pair is not, and None (or the first
failing expression if failing_expression is True) if it cannot
be decided if the expressions are equivalent or not. This is, in
general, an expensive operation.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x
>>> A = Matrix([x*(x - 1), 0])
>>> B = Matrix([x**2 - x, 0])
>>> A == B
False
>>> A.simplify() == B.simplify()
True
>>> A.equals(B)
True
>>> A.equals(2)
False
See Also
========
sympy.core.expr.Expr.equals
"""
if self.shape != getattr(other, 'shape', None):
return False
rv = True
for i in range(self.rows):
for j in range(self.cols):
ans = self[i, j].equals(other[i, j], failing_expression)
if ans is False:
return False
elif ans is not True and rv is True:
rv = ans
return rv
class MutableRepMatrix(RepMatrix):
"""Mutable matrix based on DomainMatrix as the internal representation"""
#
# MutableRepMatrix is a subclass of RepMatrix that adds/overrides methods
# to make the instances mutable. MutableRepMatrix is a superclass for both
# MutableDenseMatrix and MutableSparseMatrix.
#
is_zero = False
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
@classmethod
def _new(cls, *args, copy=True, **kwargs):
if copy is False:
# The input was rows, cols, [list].
# It should be used directly without creating a copy.
if len(args) != 3:
raise TypeError("'copy=False' requires a matrix be initialized as rows,cols,[list]")
rows, cols, flat_list = args
else:
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
flat_list = list(flat_list) # create a shallow copy
rep = cls._flat_list_to_DomainMatrix(rows, cols, flat_list)
return cls._fromrep(rep)
@classmethod
def _fromrep(cls, rep):
obj = super().__new__(cls)
obj.rows, obj.cols = rep.shape
obj._rep = rep
return obj
def copy(self):
return self._fromrep(self._rep.copy())
def as_mutable(self):
return self.copy()
def __setitem__(self, key, value):
"""
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
self._rep, value = self._unify_element_sympy(self._rep, value)
self._rep.rep.setitem(i, j, value)
def _eval_col_del(self, col):
self._rep = DomainMatrix.hstack(self._rep[:,:col], self._rep[:,col+1:])
self.cols -= 1
def _eval_row_del(self, row):
self._rep = DomainMatrix.vstack(self._rep[:row,:], self._rep[row+1:, :])
self.rows -= 1
def _eval_col_insert(self, col, other):
other = self._new(other)
return self.hstack(self[:,:col], other, self[:,col:])
def _eval_row_insert(self, row, other):
other = self._new(other)
return self.vstack(self[:row,:], other, self[row:,:])
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i).
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[1, 2, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
col
row_op
"""
for i in range(self.rows):
self[i, j] = f(self[i, j], i)
def col_swap(self, i, j):
"""Swap the two given columns of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[1, 0], [1, 0]])
>>> M
Matrix([
[1, 0],
[1, 0]])
>>> M.col_swap(0, 1)
>>> M
Matrix([
[0, 1],
[0, 1]])
See Also
========
col
row_swap
"""
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
zip_row_op
col_op
"""
for j in range(self.cols):
self[i, j] = f(self[i, j], j)
def row_swap(self, i, j):
"""Swap the two given rows of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[0, 1], [1, 0]])
>>> M
Matrix([
[0, 1],
[1, 0]])
>>> M.row_swap(0, 1)
>>> M
Matrix([
[1, 0],
[0, 1]])
See Also
========
row
col_swap
"""
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
row_op
col_op
"""
for j in range(self.cols):
self[i, j] = f(self[i, j], self[k, j])
def copyin_list(self, key, value):
"""Copy in elements from a list.
Parameters
==========
key : slice
The section of this matrix to replace.
value : iterable
The iterable to copy values from.
Examples
========
>>> from sympy.matrices import eye
>>> I = eye(3)
>>> I[:2, 0] = [1, 2] # col
>>> I
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
>>> I[1, :2] = [[3, 4]]
>>> I
Matrix([
[1, 0, 0],
[3, 4, 0],
[0, 0, 1]])
See Also
========
copyin_matrix
"""
if not is_sequence(value):
raise TypeError("`value` must be an ordered iterable, not %s." % type(value))
return self.copyin_matrix(key, type(self)(value))
def copyin_matrix(self, key, value):
"""Copy in values from a matrix into the given bounds.
Parameters
==========
key : slice
The section of this matrix to replace.
value : Matrix
The matrix to copy values from.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> M = Matrix([[0, 1], [2, 3], [4, 5]])
>>> I = eye(3)
>>> I[:3, :2] = M
>>> I
Matrix([
[0, 1, 0],
[2, 3, 0],
[4, 5, 1]])
>>> I[0, 1] = M
>>> I
Matrix([
[0, 0, 1],
[2, 2, 3],
[4, 4, 5]])
See Also
========
copyin_list
"""
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(filldedent("The Matrix `value` doesn't have the "
"same dimensions "
"as the in sub-Matrix given by `key`."))
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
def fill(self, value):
"""Fill self with the given value.
Notes
=====
Unless many values are going to be deleted (i.e. set to zero)
this will create a matrix that is slower than a dense matrix in
operations.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.zeros(3); M
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> M.fill(1); M
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
See Also
========
zeros
ones
"""
value = _sympify(value)
if not value:
self._rep = DomainMatrix.zeros(self.shape, EXRAW)
else:
elements_dod = {i: {j: value for j in range(self.cols)} for i in range(self.rows)}
self._rep = DomainMatrix(elements_dod, self.shape, EXRAW)
def _getitem_RepMatrix(self, key):
"""Return portion of self defined by key. If the key involves a slice
then a list will be returned (if key is a single slice) or a matrix
(if key was a tuple involving a slice).
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix([
... [1, 2 + I],
... [3, 4 ]])
If the key is a tuple that doesn't involve a slice then that element
is returned:
>>> m[1, 0]
3
When a tuple key involves a slice, a matrix is returned. Here, the
first column is selected (all rows, column 0):
>>> m[:, 0]
Matrix([
[1],
[3]])
If the slice is not a tuple then it selects from the underlying
list of elements that are arranged in row order and a list is
returned if a slice is involved:
>>> m[0]
1
>>> m[::2]
[1, 3]
"""
if isinstance(key, tuple):
i, j = key
try:
return self._rep.getitem_sympy(index_(i), index_(j))
except (TypeError, IndexError):
if (isinstance(i, Expr) and not i.is_number) or (isinstance(j, Expr) and not j.is_number):
if ((j < 0) is True) or ((j >= self.shape[1]) is True) or\
((i < 0) is True) or ((i >= self.shape[0]) is True):
raise ValueError("index out of boundary")
from sympy.matrices.expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
if isinstance(i, slice):
i = range(self.rows)[i]
elif is_sequence(i):
pass
else:
i = [i]
if isinstance(j, slice):
j = range(self.cols)[j]
elif is_sequence(j):
pass
else:
j = [j]
return self.extract(i, j)
else:
# Index/slice like a flattened list
rows, cols = self.shape
# Raise the appropriate exception:
if not rows * cols:
return [][key]
rep = self._rep.rep
domain = rep.domain
is_slice = isinstance(key, slice)
if is_slice:
values = [rep.getitem(*divmod(n, cols)) for n in range(rows * cols)[key]]
else:
values = [rep.getitem(*divmod(index_(key), cols))]
if domain != EXRAW:
to_sympy = domain.to_sympy
values = [to_sympy(val) for val in values]
if is_slice:
return values
else:
return values[0]
|
b73239e353fc416fefd5e54901780e968fca2b55e9eaed6079ac17fed7650716 | from sympy.core.containers import Dict
from sympy.core.symbol import Dummy
from sympy.utilities.iterables import is_sequence
from sympy.utilities.misc import as_int, filldedent
from .sparse import MutableSparseMatrix as SparseMatrix
def _doktocsr(dok):
"""Converts a sparse matrix to Compressed Sparse Row (CSR) format.
Parameters
==========
A : contains non-zero elements sorted by key (row, column)
JA : JA[i] is the column corresponding to A[i]
IA : IA[i] contains the index in A for the first non-zero element
of row[i]. Thus IA[i+1] - IA[i] gives number of non-zero
elements row[i]. The length of IA is always 1 more than the
number of rows in the matrix.
Examples
========
>>> from sympy.matrices.sparsetools import _doktocsr
>>> from sympy import SparseMatrix, diag
>>> m = SparseMatrix(diag(1, 2, 3))
>>> m[2, 0] = -1
>>> _doktocsr(m)
[[1, 2, -1, 3], [0, 1, 0, 2], [0, 1, 2, 4], [3, 3]]
"""
row, JA, A = [list(i) for i in zip(*dok.row_list())]
IA = [0]*((row[0] if row else 0) + 1)
for i, r in enumerate(row):
IA.extend([i]*(r - row[i - 1])) # if i = 0 nothing is extended
IA.extend([len(A)]*(dok.rows - len(IA) + 1))
shape = [dok.rows, dok.cols]
return [A, JA, IA, shape]
def _csrtodok(csr):
"""Converts a CSR representation to DOK representation.
Examples
========
>>> from sympy.matrices.sparsetools import _csrtodok
>>> _csrtodok([[5, 8, 3, 6], [0, 1, 2, 1], [0, 0, 2, 3, 4], [4, 3]])
Matrix([
[0, 0, 0],
[5, 8, 0],
[0, 0, 3],
[0, 6, 0]])
"""
smat = {}
A, JA, IA, shape = csr
for i in range(len(IA) - 1):
indices = slice(IA[i], IA[i + 1])
for l, m in zip(A[indices], JA[indices]):
smat[i, m] = l
return SparseMatrix(*shape, smat)
def banded(*args, **kwargs):
"""Returns a SparseMatrix from the given dictionary describing
the diagonals of the matrix. The keys are positive for upper
diagonals and negative for those below the main diagonal. The
values may be:
* expressions or single-argument functions,
* lists or tuples of values,
* matrices
Unless dimensions are given, the size of the returned matrix will
be large enough to contain the largest non-zero value provided.
kwargs
======
rows : rows of the resulting matrix; computed if
not given.
cols : columns of the resulting matrix; computed if
not given.
Examples
========
>>> from sympy import banded, ones, Matrix
>>> from sympy.abc import x
If explicit values are given in tuples,
the matrix will autosize to contain all values, otherwise
a single value is filled onto the entire diagonal:
>>> banded({1: (1, 2, 3), -1: (4, 5, 6), 0: x})
Matrix([
[x, 1, 0, 0],
[4, x, 2, 0],
[0, 5, x, 3],
[0, 0, 6, x]])
A function accepting a single argument can be used to fill the
diagonal as a function of diagonal index (which starts at 0).
The size (or shape) of the matrix must be given to obtain more
than a 1x1 matrix:
>>> s = lambda d: (1 + d)**2
>>> banded(5, {0: s, 2: s, -2: 2})
Matrix([
[1, 0, 1, 0, 0],
[0, 4, 0, 4, 0],
[2, 0, 9, 0, 9],
[0, 2, 0, 16, 0],
[0, 0, 2, 0, 25]])
The diagonal of matrices placed on a diagonal will coincide
with the indicated diagonal:
>>> vert = Matrix([1, 2, 3])
>>> banded({0: vert}, cols=3)
Matrix([
[1, 0, 0],
[2, 1, 0],
[3, 2, 1],
[0, 3, 2],
[0, 0, 3]])
>>> banded(4, {0: ones(2)})
Matrix([
[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 1, 1]])
Errors are raised if the designated size will not hold
all values an integral number of times. Here, the rows
are designated as odd (but an even number is required to
hold the off-diagonal 2x2 ones):
>>> banded({0: 2, 1: ones(2)}, rows=5)
Traceback (most recent call last):
...
ValueError:
sequence does not fit an integral number of times in the matrix
And here, an even number of rows is given...but the square
matrix has an even number of columns, too. As we saw
in the previous example, an odd number is required:
>>> banded(4, {0: 2, 1: ones(2)}) # trying to make 4x4 and cols must be odd
Traceback (most recent call last):
...
ValueError:
sequence does not fit an integral number of times in the matrix
A way around having to count rows is to enclosing matrix elements
in a tuple and indicate the desired number of them to the right:
>>> banded({0: 2, 2: (ones(2),)*3})
Matrix([
[2, 0, 1, 1, 0, 0, 0, 0],
[0, 2, 1, 1, 0, 0, 0, 0],
[0, 0, 2, 0, 1, 1, 0, 0],
[0, 0, 0, 2, 1, 1, 0, 0],
[0, 0, 0, 0, 2, 0, 1, 1],
[0, 0, 0, 0, 0, 2, 1, 1]])
An error will be raised if more than one value
is written to a given entry. Here, the ones overlap
with the main diagonal if they are placed on the
first diagonal:
>>> banded({0: (2,)*5, 1: (ones(2),)*3})
Traceback (most recent call last):
...
ValueError: collision at (1, 1)
By placing a 0 at the bottom left of the 2x2 matrix of
ones, the collision is avoided:
>>> u2 = Matrix([
... [1, 1],
... [0, 1]])
>>> banded({0: [2]*5, 1: [u2]*3})
Matrix([
[2, 1, 1, 0, 0, 0, 0],
[0, 2, 1, 0, 0, 0, 0],
[0, 0, 2, 1, 1, 0, 0],
[0, 0, 0, 2, 1, 0, 0],
[0, 0, 0, 0, 2, 1, 1],
[0, 0, 0, 0, 0, 0, 1]])
"""
try:
if len(args) not in (1, 2, 3):
raise TypeError
if not isinstance(args[-1], (dict, Dict)):
raise TypeError
if len(args) == 1:
rows = kwargs.get('rows', None)
cols = kwargs.get('cols', None)
if rows is not None:
rows = as_int(rows)
if cols is not None:
cols = as_int(cols)
elif len(args) == 2:
rows = cols = as_int(args[0])
else:
rows, cols = map(as_int, args[:2])
# fails with ValueError if any keys are not ints
_ = all(as_int(k) for k in args[-1])
except (ValueError, TypeError):
raise TypeError(filldedent(
'''unrecognized input to banded:
expecting [[row,] col,] {int: value}'''))
def rc(d):
# return row,col coord of diagonal start
r = -d if d < 0 else 0
c = 0 if r else d
return r, c
smat = {}
undone = []
tba = Dummy()
# first handle objects with size
for d, v in args[-1].items():
r, c = rc(d)
# note: only list and tuple are recognized since this
# will allow other Basic objects like Tuple
# into the matrix if so desired
if isinstance(v, (list, tuple)):
extra = 0
for i, vi in enumerate(v):
i += extra
if is_sequence(vi):
vi = SparseMatrix(vi)
smat[r + i, c + i] = vi
extra += min(vi.shape) - 1
else:
smat[r + i, c + i] = vi
elif is_sequence(v):
v = SparseMatrix(v)
rv, cv = v.shape
if rows and cols:
nr, xr = divmod(rows - r, rv)
nc, xc = divmod(cols - c, cv)
x = xr or xc
do = min(nr, nc)
elif rows:
do, x = divmod(rows - r, rv)
elif cols:
do, x = divmod(cols - c, cv)
else:
do = 1
x = 0
if x:
raise ValueError(filldedent('''
sequence does not fit an integral number of times
in the matrix'''))
j = min(v.shape)
for i in range(do):
smat[r, c] = v
r += j
c += j
elif v:
smat[r, c] = tba
undone.append((d, v))
s = SparseMatrix(None, smat) # to expand matrices
smat = s.todok()
# check for dim errors here
if rows is not None and rows < s.rows:
raise ValueError('Designated rows %s < needed %s' % (rows, s.rows))
if cols is not None and cols < s.cols:
raise ValueError('Designated cols %s < needed %s' % (cols, s.cols))
if rows is cols is None:
rows = s.rows
cols = s.cols
elif rows is not None and cols is None:
cols = max(rows, s.cols)
elif cols is not None and rows is None:
rows = max(cols, s.rows)
def update(i, j, v):
# update smat and make sure there are
# no collisions
if v:
if (i, j) in smat and smat[i, j] not in (tba, v):
raise ValueError('collision at %s' % ((i, j),))
smat[i, j] = v
if undone:
for d, vi in undone:
r, c = rc(d)
v = vi if callable(vi) else lambda _: vi
i = 0
while r + i < rows and c + i < cols:
update(r + i, c + i, v(i))
i += 1
return SparseMatrix(rows, cols, smat)
|
58305128707cc96f0da889ec12889a58ba3e20a246c10c097013d86a225248ae | from sympy.core import Basic, Integer
import operator
class OmegaPower(Basic):
"""
Represents ordinal exponential and multiplication terms one of the
building blocks of the :class:`Ordinal` class.
In ``OmegaPower(a, b)``, ``a`` represents exponent and ``b`` represents multiplicity.
"""
def __new__(cls, a, b):
if isinstance(b, int):
b = Integer(b)
if not isinstance(b, Integer) or b <= 0:
raise TypeError("multiplicity must be a positive integer")
if not isinstance(a, Ordinal):
a = Ordinal.convert(a)
return Basic.__new__(cls, a, b)
@property
def exp(self):
return self.args[0]
@property
def mult(self):
return self.args[1]
def _compare_term(self, other, op):
if self.exp == other.exp:
return op(self.mult, other.mult)
else:
return op(self.exp, other.exp)
def __eq__(self, other):
if not isinstance(other, OmegaPower):
try:
other = OmegaPower(0, other)
except TypeError:
return NotImplemented
return self.args == other.args
def __hash__(self):
return Basic.__hash__(self)
def __lt__(self, other):
if not isinstance(other, OmegaPower):
try:
other = OmegaPower(0, other)
except TypeError:
return NotImplemented
return self._compare_term(other, operator.lt)
class Ordinal(Basic):
"""
Represents ordinals in Cantor normal form.
Internally, this class is just a list of instances of OmegaPower.
Examples
========
>>> from sympy.sets import Ordinal, OmegaPower
>>> from sympy.sets.ordinals import omega
>>> w = omega
>>> w.is_limit_ordinal
True
>>> Ordinal(OmegaPower(w + 1, 1), OmegaPower(3, 2))
w**(w + 1) + w**3*2
>>> 3 + w
w
>>> (w + 1) * w
w**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Ordinal_arithmetic
"""
def __new__(cls, *terms):
obj = super().__new__(cls, *terms)
powers = [i.exp for i in obj.args]
if not all(powers[i] >= powers[i+1] for i in range(len(powers) - 1)):
raise ValueError("powers must be in decreasing order")
return obj
@property
def terms(self):
return self.args
@property
def leading_term(self):
if self == ord0:
raise ValueError("ordinal zero has no leading term")
return self.terms[0]
@property
def trailing_term(self):
if self == ord0:
raise ValueError("ordinal zero has no trailing term")
return self.terms[-1]
@property
def is_successor_ordinal(self):
try:
return self.trailing_term.exp == ord0
except ValueError:
return False
@property
def is_limit_ordinal(self):
try:
return not self.trailing_term.exp == ord0
except ValueError:
return False
@property
def degree(self):
return self.leading_term.exp
@classmethod
def convert(cls, integer_value):
if integer_value == 0:
return ord0
return Ordinal(OmegaPower(0, integer_value))
def __eq__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
return self.terms == other.terms
def __hash__(self):
return hash(self.args)
def __lt__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
for term_self, term_other in zip(self.terms, other.terms):
if term_self != term_other:
return term_self < term_other
return len(self.terms) < len(other.terms)
def __le__(self, other):
return (self == other or self < other)
def __gt__(self, other):
return not self <= other
def __ge__(self, other):
return not self < other
def __str__(self):
net_str = ""
plus_count = 0
if self == ord0:
return 'ord0'
for i in self.terms:
if plus_count:
net_str += " + "
if i.exp == ord0:
net_str += str(i.mult)
elif i.exp == 1:
net_str += 'w'
elif len(i.exp.terms) > 1 or i.exp.is_limit_ordinal:
net_str += 'w**(%s)'%i.exp
else:
net_str += 'w**%s'%i.exp
if not i.mult == 1 and not i.exp == ord0:
net_str += '*%s'%i.mult
plus_count += 1
return(net_str)
__repr__ = __str__
def __add__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
if other == ord0:
return self
a_terms = list(self.terms)
b_terms = list(other.terms)
r = len(a_terms) - 1
b_exp = other.degree
while r >= 0 and a_terms[r].exp < b_exp:
r -= 1
if r < 0:
terms = b_terms
elif a_terms[r].exp == b_exp:
sum_term = OmegaPower(b_exp, a_terms[r].mult + other.leading_term.mult)
terms = a_terms[:r] + [sum_term] + b_terms[1:]
else:
terms = a_terms[:r+1] + b_terms
return Ordinal(*terms)
def __radd__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
return other + self
def __mul__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
if ord0 in (self, other):
return ord0
a_exp = self.degree
a_mult = self.leading_term.mult
summation = []
if other.is_limit_ordinal:
for arg in other.terms:
summation.append(OmegaPower(a_exp + arg.exp, arg.mult))
else:
for arg in other.terms[:-1]:
summation.append(OmegaPower(a_exp + arg.exp, arg.mult))
b_mult = other.trailing_term.mult
summation.append(OmegaPower(a_exp, a_mult*b_mult))
summation += list(self.terms[1:])
return Ordinal(*summation)
def __rmul__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
return other * self
def __pow__(self, other):
if not self == omega:
return NotImplemented
return Ordinal(OmegaPower(other, 1))
class OrdinalZero(Ordinal):
"""The ordinal zero.
OrdinalZero can be imported as ``ord0``.
"""
pass
class OrdinalOmega(Ordinal):
"""The ordinal omega which forms the base of all ordinals in cantor normal form.
OrdinalOmega can be imported as ``omega``.
Examples
========
>>> from sympy.sets.ordinals import omega
>>> omega + omega
w*2
"""
def __new__(cls):
return Ordinal.__new__(cls)
@property
def terms(self):
return (OmegaPower(1, 1),)
ord0 = OrdinalZero()
omega = OrdinalOmega()
|
def8c29b8bf67ac6e9b153d8d113b04d73044f19ecffae9b1b814220567dea83 | from .sets import (Set, Interval, Union, FiniteSet, ProductSet,
Intersection, imageset, Complement, SymmetricDifference,
DisjointUnion)
from .fancysets import ImageSet, Range, ComplexRegion
from .contains import Contains
from .conditionset import ConditionSet
from .ordinals import Ordinal, OmegaPower, ord0
from .powerset import PowerSet
from ..core.singleton import S
from .handlers.comparison import _eval_is_eq # noqa:F401
Complexes = S.Complexes
EmptySet = S.EmptySet
Integers = S.Integers
Naturals = S.Naturals
Naturals0 = S.Naturals0
Rationals = S.Rationals
Reals = S.Reals
UniversalSet = S.UniversalSet
__all__ = [
'Set', 'Interval', 'Union', 'EmptySet', 'FiniteSet', 'ProductSet',
'Intersection', 'imageset', 'Complement', 'SymmetricDifference', 'DisjointUnion',
'ImageSet', 'Range', 'ComplexRegion', 'Reals',
'Contains',
'ConditionSet',
'Ordinal', 'OmegaPower', 'ord0',
'PowerSet',
'Reals', 'Naturals', 'Naturals0', 'UniversalSet', 'Integers', 'Rationals',
]
|
8758b340b7be6206b03eed03bbf962c517835313d637bccb27b24b67717477c2 | from functools import reduce
from itertools import product
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import Lambda
from sympy.core.logic import fuzzy_not, fuzzy_or, fuzzy_and
from sympy.core.mod import Mod
from sympy.core.numbers import oo, igcd, Rational
from sympy.core.relational import Eq, is_eq
from sympy.core.singleton import Singleton, S
from sympy.core.symbol import Dummy, symbols, Symbol
from sympy.core.sympify import _sympify, sympify, converter
from sympy.logic.boolalg import And, Or
from sympy.sets.sets import (Set, Interval, Union, FiniteSet,
ProductSet)
from sympy.utilities.misc import filldedent
class Rationals(Set, metaclass=Singleton):
"""
Represents the rational numbers. This set is also available as
the singleton ``S.Rationals``.
Examples
========
>>> from sympy import S
>>> S.Half in S.Rationals
True
>>> iterable = iter(S.Rationals)
>>> [next(iterable) for i in range(12)]
[0, 1, -1, 1/2, 2, -1/2, -2, 1/3, 3, -1/3, -3, 2/3]
"""
is_iterable = True
_inf = S.NegativeInfinity
_sup = S.Infinity
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return False
return other.is_rational
def __iter__(self):
yield S.Zero
yield S.One
yield S.NegativeOne
d = 2
while True:
for n in range(d):
if igcd(n, d) == 1:
yield Rational(n, d)
yield Rational(d, n)
yield Rational(-n, d)
yield Rational(-d, n)
d += 1
@property
def _boundary(self):
return S.Reals
class Naturals(Set, metaclass=Singleton):
"""
Represents the natural numbers (or counting numbers) which are all
positive integers starting from 1. This set is also available as
the singleton ``S.Naturals``.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Naturals)
>>> next(iterable)
1
>>> next(iterable)
2
>>> next(iterable)
3
>>> pprint(S.Naturals.intersect(Interval(0, 10)))
{1, 2, ..., 10}
See Also
========
Naturals0 : non-negative integers (i.e. includes 0, too)
Integers : also includes negative integers
"""
is_iterable = True
_inf = S.One
_sup = S.Infinity
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return False
elif other.is_positive and other.is_integer:
return True
elif other.is_integer is False or other.is_positive is False:
return False
def _eval_is_subset(self, other):
return Range(1, oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(1, oo).is_superset(other)
def __iter__(self):
i = self._inf
while True:
yield i
i = i + 1
@property
def _boundary(self):
return self
def as_relational(self, x):
from sympy.functions.elementary.integers import floor
return And(Eq(floor(x), x), x >= self.inf, x < oo)
class Naturals0(Naturals):
"""Represents the whole numbers which are all the non-negative integers,
inclusive of zero.
See Also
========
Naturals : positive integers; does not include 0
Integers : also includes the negative integers
"""
_inf = S.Zero
def _contains(self, other):
if not isinstance(other, Expr):
return S.false
elif other.is_integer and other.is_nonnegative:
return S.true
elif other.is_integer is False or other.is_nonnegative is False:
return S.false
def _eval_is_subset(self, other):
return Range(oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(oo).is_superset(other)
class Integers(Set, metaclass=Singleton):
"""
Represents all integers: positive, negative and zero. This set is also
available as the singleton ``S.Integers``.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Integers)
>>> next(iterable)
0
>>> next(iterable)
1
>>> next(iterable)
-1
>>> next(iterable)
2
>>> pprint(S.Integers.intersect(Interval(-4, 4)))
{-4, -3, ..., 4}
See Also
========
Naturals0 : non-negative integers
Integers : positive and negative integers and zero
"""
is_iterable = True
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return S.false
return other.is_integer
def __iter__(self):
yield S.Zero
i = S.One
while True:
yield i
yield -i
i = i + 1
@property
def _inf(self):
return S.NegativeInfinity
@property
def _sup(self):
return S.Infinity
@property
def _boundary(self):
return self
def as_relational(self, x):
from sympy.functions.elementary.integers import floor
return And(Eq(floor(x), x), -oo < x, x < oo)
def _eval_is_subset(self, other):
return Range(-oo, oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(-oo, oo).is_superset(other)
class Reals(Interval, metaclass=Singleton):
"""
Represents all real numbers
from negative infinity to positive infinity,
including all integer, rational and irrational numbers.
This set is also available as the singleton ``S.Reals``.
Examples
========
>>> from sympy import S, Rational, pi, I
>>> 5 in S.Reals
True
>>> Rational(-1, 2) in S.Reals
True
>>> pi in S.Reals
True
>>> 3*I in S.Reals
False
>>> S.Reals.contains(pi)
True
See Also
========
ComplexRegion
"""
@property
def start(self):
return S.NegativeInfinity
@property
def end(self):
return S.Infinity
@property
def left_open(self):
return True
@property
def right_open(self):
return True
def __eq__(self, other):
return other == Interval(S.NegativeInfinity, S.Infinity)
def __hash__(self):
return hash(Interval(S.NegativeInfinity, S.Infinity))
class ImageSet(Set):
"""
Image of a set under a mathematical function. The transformation
must be given as a Lambda function which has as many arguments
as the elements of the set upon which it operates, e.g. 1 argument
when acting on the set of integers or 2 arguments when acting on
a complex region.
This function is not normally called directly, but is called
from ``imageset``.
Examples
========
>>> from sympy import Symbol, S, pi, Dummy, Lambda
>>> from sympy.sets.sets import FiniteSet, Interval
>>> from sympy.sets.fancysets import ImageSet
>>> x = Symbol('x')
>>> N = S.Naturals
>>> squares = ImageSet(Lambda(x, x**2), N) # {x**2 for x in N}
>>> 4 in squares
True
>>> 5 in squares
False
>>> FiniteSet(0, 1, 2, 3, 4, 5, 6, 7, 9, 10).intersect(squares)
{1, 4, 9}
>>> square_iterable = iter(squares)
>>> for i in range(4):
... next(square_iterable)
1
4
9
16
If you want to get value for `x` = 2, 1/2 etc. (Please check whether the
`x` value is in ``base_set`` or not before passing it as args)
>>> squares.lamda(2)
4
>>> squares.lamda(S(1)/2)
1/4
>>> n = Dummy('n')
>>> solutions = ImageSet(Lambda(n, n*pi), S.Integers) # solutions of sin(x) = 0
>>> dom = Interval(-1, 1)
>>> dom.intersect(solutions)
{0}
See Also
========
sympy.sets.sets.imageset
"""
def __new__(cls, flambda, *sets):
if not isinstance(flambda, Lambda):
raise ValueError('First argument must be a Lambda')
signature = flambda.signature
if len(signature) != len(sets):
raise ValueError('Incompatible signature')
sets = [_sympify(s) for s in sets]
if not all(isinstance(s, Set) for s in sets):
raise TypeError("Set arguments to ImageSet should of type Set")
if not all(cls._check_sig(sg, st) for sg, st in zip(signature, sets)):
raise ValueError("Signature %s does not match sets %s" % (signature, sets))
if flambda is S.IdentityFunction and len(sets) == 1:
return sets[0]
if not set(flambda.variables) & flambda.expr.free_symbols:
is_empty = fuzzy_or(s.is_empty for s in sets)
if is_empty == True:
return S.EmptySet
elif is_empty == False:
return FiniteSet(flambda.expr)
return Basic.__new__(cls, flambda, *sets)
lamda = property(lambda self: self.args[0])
base_sets = property(lambda self: self.args[1:])
@property
def base_set(self):
# XXX: Maybe deprecate this? It is poorly defined in handling
# the multivariate case...
sets = self.base_sets
if len(sets) == 1:
return sets[0]
else:
return ProductSet(*sets).flatten()
@property
def base_pset(self):
return ProductSet(*self.base_sets)
@classmethod
def _check_sig(cls, sig_i, set_i):
if sig_i.is_symbol:
return True
elif isinstance(set_i, ProductSet):
sets = set_i.sets
if len(sig_i) != len(sets):
return False
# Recurse through the signature for nested tuples:
return all(cls._check_sig(ts, ps) for ts, ps in zip(sig_i, sets))
else:
# XXX: Need a better way of checking whether a set is a set of
# Tuples or not. For example a FiniteSet can contain Tuples
# but so can an ImageSet or a ConditionSet. Others like
# Integers, Reals etc can not contain Tuples. We could just
# list the possibilities here... Current code for e.g.
# _contains probably only works for ProductSet.
return True # Give the benefit of the doubt
def __iter__(self):
already_seen = set()
for i in self.base_pset:
val = self.lamda(*i)
if val in already_seen:
continue
else:
already_seen.add(val)
yield val
def _is_multivariate(self):
return len(self.lamda.variables) > 1
def _contains(self, other):
from sympy.solvers.solveset import _solveset_multi
def get_symsetmap(signature, base_sets):
'''Attempt to get a map of symbols to base_sets'''
queue = list(zip(signature, base_sets))
symsetmap = {}
for sig, base_set in queue:
if sig.is_symbol:
symsetmap[sig] = base_set
elif base_set.is_ProductSet:
sets = base_set.sets
if len(sig) != len(sets):
raise ValueError("Incompatible signature")
# Recurse
queue.extend(zip(sig, sets))
else:
# If we get here then we have something like sig = (x, y) and
# base_set = {(1, 2), (3, 4)}. For now we give up.
return None
return symsetmap
def get_equations(expr, candidate):
'''Find the equations relating symbols in expr and candidate.'''
queue = [(expr, candidate)]
for e, c in queue:
if not isinstance(e, Tuple):
yield Eq(e, c)
elif not isinstance(c, Tuple) or len(e) != len(c):
yield False
return
else:
queue.extend(zip(e, c))
# Get the basic objects together:
other = _sympify(other)
expr = self.lamda.expr
sig = self.lamda.signature
variables = self.lamda.variables
base_sets = self.base_sets
# Use dummy symbols for ImageSet parameters so they don't match
# anything in other
rep = {v: Dummy(v.name) for v in variables}
variables = [v.subs(rep) for v in variables]
sig = sig.subs(rep)
expr = expr.subs(rep)
# Map the parts of other to those in the Lambda expr
equations = []
for eq in get_equations(expr, other):
# Unsatisfiable equation?
if eq is False:
return False
equations.append(eq)
# Map the symbols in the signature to the corresponding domains
symsetmap = get_symsetmap(sig, base_sets)
if symsetmap is None:
# Can't factor the base sets to a ProductSet
return None
# Which of the variables in the Lambda signature need to be solved for?
symss = (eq.free_symbols for eq in equations)
variables = set(variables) & reduce(set.union, symss, set())
# Use internal multivariate solveset
variables = tuple(variables)
base_sets = [symsetmap[v] for v in variables]
solnset = _solveset_multi(equations, variables, base_sets)
if solnset is None:
return None
return fuzzy_not(solnset.is_empty)
@property
def is_iterable(self):
return all(s.is_iterable for s in self.base_sets)
def doit(self, **kwargs):
from sympy.sets.setexpr import SetExpr
f = self.lamda
sig = f.signature
if len(sig) == 1 and sig[0].is_symbol and isinstance(f.expr, Expr):
base_set = self.base_sets[0]
return SetExpr(base_set)._eval_func(f).set
if all(s.is_FiniteSet for s in self.base_sets):
return FiniteSet(*(f(*a) for a in product(*self.base_sets)))
return self
class Range(Set):
"""
Represents a range of integers. Can be called as ``Range(stop)``,
``Range(start, stop)``, or ``Range(start, stop, step)``; when ``step`` is
not given it defaults to 1.
``Range(stop)`` is the same as ``Range(0, stop, 1)`` and the stop value
(just as for Python ranges) is not included in the Range values.
>>> from sympy import Range
>>> list(Range(3))
[0, 1, 2]
The step can also be negative:
>>> list(Range(10, 0, -2))
[10, 8, 6, 4, 2]
The stop value is made canonical so equivalent ranges always
have the same args:
>>> Range(0, 10, 3)
Range(0, 12, 3)
Infinite ranges are allowed. ``oo`` and ``-oo`` are never included in the
set (``Range`` is always a subset of ``Integers``). If the starting point
is infinite, then the final value is ``stop - step``. To iterate such a
range, it needs to be reversed:
>>> from sympy import oo
>>> r = Range(-oo, 1)
>>> r[-1]
0
>>> next(iter(r))
Traceback (most recent call last):
...
TypeError: Cannot iterate over Range with infinite start
>>> next(iter(r.reversed))
0
Although ``Range`` is a :class:`Set` (and supports the normal set
operations) it maintains the order of the elements and can
be used in contexts where ``range`` would be used.
>>> from sympy import Interval
>>> Range(0, 10, 2).intersect(Interval(3, 7))
Range(4, 8, 2)
>>> list(_)
[4, 6]
Although slicing of a Range will always return a Range -- possibly
empty -- an empty set will be returned from any intersection that
is empty:
>>> Range(3)[:0]
Range(0, 0, 1)
>>> Range(3).intersect(Interval(4, oo))
EmptySet
>>> Range(3).intersect(Range(4, oo))
EmptySet
Range will accept symbolic arguments but has very limited support
for doing anything other than displaying the Range:
>>> from sympy import Symbol, pprint
>>> from sympy.abc import i, j, k
>>> Range(i, j, k).start
i
>>> Range(i, j, k).inf
Traceback (most recent call last):
...
ValueError: invalid method for symbolic range
Better success will be had when using integer symbols:
>>> n = Symbol('n', integer=True)
>>> r = Range(n, n + 20, 3)
>>> r.inf
n
>>> pprint(r)
{n, n + 3, ..., n + 18}
"""
def __new__(cls, *args):
from sympy.functions.elementary.integers import ceiling
if len(args) == 1:
if isinstance(args[0], range):
raise TypeError(
'use sympify(%s) to convert range to Range' % args[0])
# expand range
slc = slice(*args)
if slc.step == 0:
raise ValueError("step cannot be 0")
start, stop, step = slc.start or 0, slc.stop, slc.step or 1
try:
ok = []
for w in (start, stop, step):
w = sympify(w)
if w in [S.NegativeInfinity, S.Infinity] or (
w.has(Symbol) and w.is_integer != False):
ok.append(w)
elif not w.is_Integer:
if w.is_infinite:
raise ValueError('infinite symbols not allowed')
raise ValueError
else:
ok.append(w)
except ValueError:
raise ValueError(filldedent('''
Finite arguments to Range must be integers; `imageset` can define
other cases, e.g. use `imageset(i, i/10, Range(3))` to give
[0, 1/10, 1/5].'''))
start, stop, step = ok
null = False
if any(i.has(Symbol) for i in (start, stop, step)):
dif = stop - start
n = dif/step
if n.is_Rational:
from sympy.functions.elementary.integers import floor
if dif == 0:
null = True
else: # (x, x + 5, 2) or (x, 3*x, x)
n = floor(n)
end = start + n*step
if dif.is_Rational: # (x, x + 5, 2)
if (end - stop).is_negative:
end += step
else: # (x, 3*x, x)
if (end/stop - 1).is_negative:
end += step
elif n.is_extended_negative:
null = True
else:
end = stop # other methods like sup and reversed must fail
elif start.is_infinite:
span = step*(stop - start)
if span is S.NaN or span <= 0:
null = True
elif step.is_Integer and stop.is_infinite and abs(step) != 1:
raise ValueError(filldedent('''
Step size must be %s in this case.''' % (1 if step > 0 else -1)))
else:
end = stop
else:
oostep = step.is_infinite
if oostep:
step = S.One if step > 0 else S.NegativeOne
n = ceiling((stop - start)/step)
if n <= 0:
null = True
elif oostep:
step = S.One # make it canonical
end = start + step
else:
end = start + n*step
if null:
start = end = S.Zero
step = S.One
return Basic.__new__(cls, start, end, step)
start = property(lambda self: self.args[0])
stop = property(lambda self: self.args[1])
step = property(lambda self: self.args[2])
@property
def reversed(self):
"""Return an equivalent Range in the opposite order.
Examples
========
>>> from sympy import Range
>>> Range(10).reversed
Range(9, -1, -1)
"""
if self.has(Symbol):
n = (self.stop - self.start)/self.step
if not n.is_extended_positive or not all(
i.is_integer or i.is_infinite for i in self.args):
raise ValueError('invalid method for symbolic range')
if self.start == self.stop:
return self
return self.func(
self.stop - self.step, self.start - self.step, -self.step)
def _contains(self, other):
if self.start == self.stop:
return S.false
if other.is_infinite:
return S.false
if not other.is_integer:
return other.is_integer
if self.has(Symbol):
n = (self.stop - self.start)/self.step
if not n.is_extended_positive or not all(
i.is_integer or i.is_infinite for i in self.args):
return
else:
n = self.size
if self.start.is_finite:
ref = self.start
elif self.stop.is_finite:
ref = self.stop
else: # both infinite; step is +/- 1 (enforced by __new__)
return S.true
if n == 1:
return Eq(other, self[0])
res = (ref - other) % self.step
if res == S.Zero:
if self.has(Symbol):
d = Dummy('i')
return self.as_relational(d).subs(d, other)
return And(other >= self.inf, other <= self.sup)
elif res.is_Integer: # off sequence
return S.false
else: # symbolic/unsimplified residue modulo step
return None
def __iter__(self):
n = self.size # validate
if not (n.has(S.Infinity) or n.has(S.NegativeInfinity) or n.is_Integer):
raise TypeError("Cannot iterate over symbolic Range")
if self.start in [S.NegativeInfinity, S.Infinity]:
raise TypeError("Cannot iterate over Range with infinite start")
elif self.start != self.stop:
i = self.start
if n.is_infinite:
while True:
yield i
i += self.step
else:
for _ in range(n):
yield i
i += self.step
@property
def is_iterable(self):
# Check that size can be determined, used by __iter__
dif = self.stop - self.start
n = dif/self.step
if not (n.has(S.Infinity) or n.has(S.NegativeInfinity) or n.is_Integer):
return False
if self.start in [S.NegativeInfinity, S.Infinity]:
return False
if not (n.is_extended_nonnegative and all(i.is_integer for i in self.args)):
return False
return True
def __len__(self):
rv = self.size
if rv is S.Infinity:
raise ValueError('Use .size to get the length of an infinite Range')
return int(rv)
@property
def size(self):
if self.start == self.stop:
return S.Zero
dif = self.stop - self.start
n = dif/self.step
if n.is_infinite:
return S.Infinity
if n.is_extended_nonnegative and all(i.is_integer for i in self.args):
from sympy.functions.elementary.integers import floor
return abs(floor(n))
raise ValueError('Invalid method for symbolic Range')
@property
def is_finite_set(self):
if self.start.is_integer and self.stop.is_integer:
return True
return self.size.is_finite
@property
def is_empty(self):
try:
return self.size.is_zero
except ValueError:
return None
def __bool__(self):
# this only distinguishes between definite null range
# and non-null/unknown null; getting True doesn't mean
# that it actually is not null
b = is_eq(self.start, self.stop)
if b is None:
raise ValueError('cannot tell if Range is null or not')
return not bool(b)
def __getitem__(self, i):
from sympy.functions.elementary.integers import ceiling
ooslice = "cannot slice from the end with an infinite value"
zerostep = "slice step cannot be zero"
infinite = "slicing not possible on range with infinite start"
# if we had to take every other element in the following
# oo, ..., 6, 4, 2, 0
# we might get oo, ..., 4, 0 or oo, ..., 6, 2
ambiguous = "cannot unambiguously re-stride from the end " + \
"with an infinite value"
if isinstance(i, slice):
if self.size.is_finite: # validates, too
if self.start == self.stop:
return Range(0)
start, stop, step = i.indices(self.size)
n = ceiling((stop - start)/step)
if n <= 0:
return Range(0)
canonical_stop = start + n*step
end = canonical_stop - step
ss = step*self.step
return Range(self[start], self[end] + ss, ss)
else: # infinite Range
start = i.start
stop = i.stop
if i.step == 0:
raise ValueError(zerostep)
step = i.step or 1
ss = step*self.step
#---------------------
# handle infinite Range
# i.e. Range(-oo, oo) or Range(oo, -oo, -1)
# --------------------
if self.start.is_infinite and self.stop.is_infinite:
raise ValueError(infinite)
#---------------------
# handle infinite on right
# e.g. Range(0, oo) or Range(0, -oo, -1)
# --------------------
if self.stop.is_infinite:
# start and stop are not interdependent --
# they only depend on step --so we use the
# equivalent reversed values
return self.reversed[
stop if stop is None else -stop + 1:
start if start is None else -start:
step].reversed
#---------------------
# handle infinite on the left
# e.g. Range(oo, 0, -1) or Range(-oo, 0)
# --------------------
# consider combinations of
# start/stop {== None, < 0, == 0, > 0} and
# step {< 0, > 0}
if start is None:
if stop is None:
if step < 0:
return Range(self[-1], self.start, ss)
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step < 0:
return Range(self[-1], self[stop], ss)
else: # > 0
return Range(self.start, self[stop], ss)
elif stop == 0:
if step > 0:
return Range(0)
else: # < 0
raise ValueError(ooslice)
elif stop == 1:
if step > 0:
raise ValueError(ooslice) # infinite singleton
else: # < 0
raise ValueError(ooslice)
else: # > 1
raise ValueError(ooslice)
elif start < 0:
if stop is None:
if step < 0:
return Range(self[start], self.start, ss)
else: # > 0
return Range(self[start], self.stop, ss)
elif stop < 0:
return Range(self[start], self[stop], ss)
elif stop == 0:
if step < 0:
raise ValueError(ooslice)
else: # > 0
return Range(0)
elif stop > 0:
raise ValueError(ooslice)
elif start == 0:
if stop is None:
if step < 0:
raise ValueError(ooslice) # infinite singleton
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step > 1:
raise ValueError(ambiguous)
elif step == 1:
return Range(self.start, self[stop], ss)
else: # < 0
return Range(0)
else: # >= 0
raise ValueError(ooslice)
elif start > 0:
raise ValueError(ooslice)
else:
if self.start == self.stop:
raise IndexError('Range index out of range')
if not (all(i.is_integer or i.is_infinite
for i in self.args) and ((self.stop - self.start)/
self.step).is_extended_positive):
raise ValueError('Invalid method for symbolic Range')
if i == 0:
if self.start.is_infinite:
raise ValueError(ooslice)
return self.start
if i == -1:
if self.stop.is_infinite:
raise ValueError(ooslice)
return self.stop - self.step
n = self.size # must be known for any other index
rv = (self.stop if i < 0 else self.start) + i*self.step
if rv.is_infinite:
raise ValueError(ooslice)
val = (rv - self.start)/self.step
rel = fuzzy_or([val.is_infinite,
fuzzy_and([val.is_nonnegative, (n-val).is_nonnegative])])
if rel:
return rv
if rel is None:
raise ValueError('Invalid method for symbolic Range')
raise IndexError("Range index out of range")
@property
def _inf(self):
if not self:
return S.EmptySet.inf
if self.has(Symbol):
if all(i.is_integer or i.is_infinite for i in self.args):
dif = self.stop - self.start
if self.step.is_positive and dif.is_positive:
return self.start
elif self.step.is_negative and dif.is_negative:
return self.stop - self.step
raise ValueError('invalid method for symbolic range')
if self.step > 0:
return self.start
else:
return self.stop - self.step
@property
def _sup(self):
if not self:
return S.EmptySet.sup
if self.has(Symbol):
if all(i.is_integer or i.is_infinite for i in self.args):
dif = self.stop - self.start
if self.step.is_positive and dif.is_positive:
return self.stop - self.step
elif self.step.is_negative and dif.is_negative:
return self.start
raise ValueError('invalid method for symbolic range')
if self.step > 0:
return self.stop - self.step
else:
return self.start
@property
def _boundary(self):
return self
def as_relational(self, x):
"""Rewrite a Range in terms of equalities and logic operators. """
if self.start.is_infinite:
assert not self.stop.is_infinite # by instantiation
a = self.reversed.start
else:
a = self.start
step = self.step
in_seq = Eq(Mod(x - a, step), 0)
ints = And(Eq(Mod(a, 1), 0), Eq(Mod(step, 1), 0))
n = (self.stop - self.start)/self.step
if n == 0:
return S.EmptySet.as_relational(x)
if n == 1:
return And(Eq(x, a), ints)
try:
a, b = self.inf, self.sup
except ValueError:
a = None
if a is not None:
range_cond = And(
x > a if a.is_infinite else x >= a,
x < b if b.is_infinite else x <= b)
else:
a, b = self.start, self.stop - self.step
range_cond = Or(
And(self.step >= 1, x > a if a.is_infinite else x >= a,
x < b if b.is_infinite else x <= b),
And(self.step <= -1, x < a if a.is_infinite else x <= a,
x > b if b.is_infinite else x >= b))
return And(in_seq, ints, range_cond)
converter[range] = lambda r: Range(r.start, r.stop, r.step)
def normalize_theta_set(theta):
r"""
Normalize a Real Set `theta` in the interval `[0, 2\pi)`. It returns
a normalized value of theta in the Set. For Interval, a maximum of
one cycle $[0, 2\pi]$, is returned i.e. for theta equal to $[0, 10\pi]$,
returned normalized value would be $[0, 2\pi)$. As of now intervals
with end points as non-multiples of ``pi`` is not supported.
Raises
======
NotImplementedError
The algorithms for Normalizing theta Set are not yet
implemented.
ValueError
The input is not valid, i.e. the input is not a real set.
RuntimeError
It is a bug, please report to the github issue tracker.
Examples
========
>>> from sympy.sets.fancysets import normalize_theta_set
>>> from sympy import Interval, FiniteSet, pi
>>> normalize_theta_set(Interval(9*pi/2, 5*pi))
Interval(pi/2, pi)
>>> normalize_theta_set(Interval(-3*pi/2, pi/2))
Interval.Ropen(0, 2*pi)
>>> normalize_theta_set(Interval(-pi/2, pi/2))
Union(Interval(0, pi/2), Interval.Ropen(3*pi/2, 2*pi))
>>> normalize_theta_set(Interval(-4*pi, 3*pi))
Interval.Ropen(0, 2*pi)
>>> normalize_theta_set(Interval(-3*pi/2, -pi/2))
Interval(pi/2, 3*pi/2)
>>> normalize_theta_set(FiniteSet(0, pi, 3*pi))
{0, pi}
"""
from sympy.functions.elementary.trigonometric import _pi_coeff as coeff
if theta.is_Interval:
interval_len = theta.measure
# one complete circle
if interval_len >= 2*S.Pi:
if interval_len == 2*S.Pi and theta.left_open and theta.right_open:
k = coeff(theta.start)
return Union(Interval(0, k*S.Pi, False, True),
Interval(k*S.Pi, 2*S.Pi, True, True))
return Interval(0, 2*S.Pi, False, True)
k_start, k_end = coeff(theta.start), coeff(theta.end)
if k_start is None or k_end is None:
raise NotImplementedError("Normalizing theta without pi as coefficient is "
"not yet implemented")
new_start = k_start*S.Pi
new_end = k_end*S.Pi
if new_start > new_end:
return Union(Interval(S.Zero, new_end, False, theta.right_open),
Interval(new_start, 2*S.Pi, theta.left_open, True))
else:
return Interval(new_start, new_end, theta.left_open, theta.right_open)
elif theta.is_FiniteSet:
new_theta = []
for element in theta:
k = coeff(element)
if k is None:
raise NotImplementedError('Normalizing theta without pi as '
'coefficient, is not Implemented.')
else:
new_theta.append(k*S.Pi)
return FiniteSet(*new_theta)
elif theta.is_Union:
return Union(*[normalize_theta_set(interval) for interval in theta.args])
elif theta.is_subset(S.Reals):
raise NotImplementedError("Normalizing theta when, it is of type %s is not "
"implemented" % type(theta))
else:
raise ValueError(" %s is not a real set" % (theta))
class ComplexRegion(Set):
r"""
Represents the Set of all Complex Numbers. It can represent a
region of Complex Plane in both the standard forms Polar and
Rectangular coordinates.
* Polar Form
Input is in the form of the ProductSet or Union of ProductSets
of the intervals of ``r`` and ``theta``, and use the flag ``polar=True``.
.. math:: Z = \{z \in \mathbb{C} \mid z = r\times (\cos(\theta) + I\sin(\theta)), r \in [\texttt{r}], \theta \in [\texttt{theta}]\}
* Rectangular Form
Input is in the form of the ProductSet or Union of ProductSets
of interval of x and y, the real and imaginary parts of the Complex numbers in a plane.
Default input type is in rectangular form.
.. math:: Z = \{z \in \mathbb{C} \mid z = x + Iy, x \in [\operatorname{re}(z)], y \in [\operatorname{im}(z)]\}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion
>>> from sympy.sets import Interval
>>> from sympy import S, I, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 6)
>>> c1 = ComplexRegion(a*b) # Rectangular Form
>>> c1
CartesianComplexRegion(ProductSet(Interval(2, 3), Interval(4, 6)))
* c1 represents the rectangular region in complex plane
surrounded by the coordinates (2, 4), (3, 4), (3, 6) and
(2, 6), of the four vertices.
>>> c = Interval(1, 8)
>>> c2 = ComplexRegion(Union(a*b, b*c))
>>> c2
CartesianComplexRegion(Union(ProductSet(Interval(2, 3), Interval(4, 6)), ProductSet(Interval(4, 6), Interval(1, 8))))
* c2 represents the Union of two rectangular regions in complex
plane. One of them surrounded by the coordinates of c1 and
other surrounded by the coordinates (4, 1), (6, 1), (6, 8) and
(4, 8).
>>> 2.5 + 4.5*I in c1
True
>>> 2.5 + 6.5*I in c1
False
>>> r = Interval(0, 1)
>>> theta = Interval(0, 2*S.Pi)
>>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form
>>> c2 # unit Disk
PolarComplexRegion(ProductSet(Interval(0, 1), Interval.Ropen(0, 2*pi)))
* c2 represents the region in complex plane inside the
Unit Disk centered at the origin.
>>> 0.5 + 0.5*I in c2
True
>>> 1 + 2*I in c2
False
>>> unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True)
>>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True)
>>> intersection = unit_disk.intersect(upper_half_unit_disk)
>>> intersection
PolarComplexRegion(ProductSet(Interval(0, 1), Interval(0, pi)))
>>> intersection == upper_half_unit_disk
True
See Also
========
CartesianComplexRegion
PolarComplexRegion
Complexes
"""
is_ComplexRegion = True
def __new__(cls, sets, polar=False):
if polar is False:
return CartesianComplexRegion(sets)
elif polar is True:
return PolarComplexRegion(sets)
else:
raise ValueError("polar should be either True or False")
@property
def sets(self):
"""
Return raw input sets to the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.sets
ProductSet(Interval(2, 3), Interval(4, 5))
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.sets
Union(ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7)))
"""
return self.args[0]
@property
def psets(self):
"""
Return a tuple of sets (ProductSets) input of the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.psets
(ProductSet(Interval(2, 3), Interval(4, 5)),)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.psets
(ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7)))
"""
if self.sets.is_ProductSet:
psets = ()
psets = psets + (self.sets, )
else:
psets = self.sets.args
return psets
@property
def a_interval(self):
"""
Return the union of intervals of `x` when, self is in
rectangular form, or the union of intervals of `r` when
self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.a_interval
Interval(2, 3)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.a_interval
Union(Interval(2, 3), Interval(4, 5))
"""
a_interval = []
for element in self.psets:
a_interval.append(element.args[0])
a_interval = Union(*a_interval)
return a_interval
@property
def b_interval(self):
"""
Return the union of intervals of `y` when, self is in
rectangular form, or the union of intervals of `theta`
when self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.b_interval
Interval(4, 5)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.b_interval
Interval(1, 7)
"""
b_interval = []
for element in self.psets:
b_interval.append(element.args[1])
b_interval = Union(*b_interval)
return b_interval
@property
def _measure(self):
"""
The measure of self.sets.
Examples
========
>>> from sympy import Interval, ComplexRegion, S
>>> a, b = Interval(2, 5), Interval(4, 8)
>>> c = Interval(0, 2*S.Pi)
>>> c1 = ComplexRegion(a*b)
>>> c1.measure
12
>>> c2 = ComplexRegion(a*c, polar=True)
>>> c2.measure
6*pi
"""
return self.sets._measure
@classmethod
def from_real(cls, sets):
"""
Converts given subset of real numbers to a complex region.
Examples
========
>>> from sympy import Interval, ComplexRegion
>>> unit = Interval(0,1)
>>> ComplexRegion.from_real(unit)
CartesianComplexRegion(ProductSet(Interval(0, 1), {0}))
"""
if not sets.is_subset(S.Reals):
raise ValueError("sets must be a subset of the real line")
return CartesianComplexRegion(sets * FiniteSet(0))
def _contains(self, other):
from sympy.functions import arg, Abs
other = sympify(other)
isTuple = isinstance(other, Tuple)
if isTuple and len(other) != 2:
raise ValueError('expecting Tuple of length 2')
# If the other is not an Expression, and neither a Tuple
if not isinstance(other, Expr) and not isinstance(other, Tuple):
return S.false
# self in rectangular form
if not self.polar:
re, im = other if isTuple else other.as_real_imag()
return fuzzy_or(fuzzy_and([
pset.args[0]._contains(re),
pset.args[1]._contains(im)])
for pset in self.psets)
# self in polar form
elif self.polar:
if other.is_zero:
# ignore undefined complex argument
return fuzzy_or(pset.args[0]._contains(S.Zero)
for pset in self.psets)
if isTuple:
r, theta = other
else:
r, theta = Abs(other), arg(other)
if theta.is_real and theta.is_number:
# angles in psets are normalized to [0, 2pi)
theta %= 2*S.Pi
return fuzzy_or(fuzzy_and([
pset.args[0]._contains(r),
pset.args[1]._contains(theta)])
for pset in self.psets)
class CartesianComplexRegion(ComplexRegion):
r"""
Set representing a square region of the complex plane.
.. math:: Z = \{z \in \mathbb{C} \mid z = x + Iy, x \in [\operatorname{re}(z)], y \in [\operatorname{im}(z)]\}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion
>>> from sympy.sets.sets import Interval
>>> from sympy import I
>>> region = ComplexRegion(Interval(1, 3) * Interval(4, 6))
>>> 2 + 5*I in region
True
>>> 5*I in region
False
See also
========
ComplexRegion
PolarComplexRegion
Complexes
"""
polar = False
variables = symbols('x, y', cls=Dummy)
def __new__(cls, sets):
if sets == S.Reals*S.Reals:
return S.Complexes
if all(_a.is_FiniteSet for _a in sets.args) and (len(sets.args) == 2):
# ** ProductSet of FiniteSets in the Complex Plane. **
# For Cases like ComplexRegion({2, 4}*{3}), It
# would return {2 + 3*I, 4 + 3*I}
# FIXME: This should probably be handled with something like:
# return ImageSet(Lambda((x, y), x+I*y), sets).rewrite(FiniteSet)
complex_num = []
for x in sets.args[0]:
for y in sets.args[1]:
complex_num.append(x + S.ImaginaryUnit*y)
return FiniteSet(*complex_num)
else:
return Set.__new__(cls, sets)
@property
def expr(self):
x, y = self.variables
return x + S.ImaginaryUnit*y
class PolarComplexRegion(ComplexRegion):
r"""
Set representing a polar region of the complex plane.
.. math:: Z = \{z \in \mathbb{C} \mid z = r\times (\cos(\theta) + I\sin(\theta)), r \in [\texttt{r}], \theta \in [\texttt{theta}]\}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion, Interval
>>> from sympy import oo, pi, I
>>> rset = Interval(0, oo)
>>> thetaset = Interval(0, pi)
>>> upper_half_plane = ComplexRegion(rset * thetaset, polar=True)
>>> 1 + I in upper_half_plane
True
>>> 1 - I in upper_half_plane
False
See also
========
ComplexRegion
CartesianComplexRegion
Complexes
"""
polar = True
variables = symbols('r, theta', cls=Dummy)
def __new__(cls, sets):
new_sets = []
# sets is Union of ProductSets
if not sets.is_ProductSet:
for k in sets.args:
new_sets.append(k)
# sets is ProductSets
else:
new_sets.append(sets)
# Normalize input theta
for k, v in enumerate(new_sets):
new_sets[k] = ProductSet(v.args[0],
normalize_theta_set(v.args[1]))
sets = Union(*new_sets)
return Set.__new__(cls, sets)
@property
def expr(self):
from sympy.functions.elementary.trigonometric import sin, cos
r, theta = self.variables
return r*(cos(theta) + S.ImaginaryUnit*sin(theta))
class Complexes(CartesianComplexRegion, metaclass=Singleton):
"""
The :class:`Set` of all complex numbers
Examples
========
>>> from sympy import S, I
>>> S.Complexes
Complexes
>>> 1 + I in S.Complexes
True
See also
========
Reals
ComplexRegion
"""
is_empty = False
is_finite_set = False
# Override property from superclass since Complexes has no args
@property
def sets(self):
return ProductSet(S.Reals, S.Reals)
def __new__(cls):
return Set.__new__(cls)
|
9353ece78fa0813fd3c960d4f59cb66b758d8c3dabd72e9cb93c46daf93b1b28 | from typing import Optional
from functools import reduce
from collections import defaultdict
import inspect
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.decorators import sympify_method_args, sympify_return
from sympy.core.evalf import EvalfMixin
from sympy.core.expr import Expr
from sympy.core.function import Lambda
from sympy.core.logic import (FuzzyBool, fuzzy_bool, fuzzy_or, fuzzy_and,
fuzzy_not)
from sympy.core.numbers import Float, Integer
from sympy.core.operations import LatticeOp
from sympy.core.parameters import global_parameters
from sympy.core.relational import Eq, Ne, is_lt
from sympy.core.singleton import Singleton, S
from sympy.core.sorting import ordered
from sympy.core.symbol import symbols, Symbol, Dummy, uniquely_named_symbol
from sympy.core.sympify import _sympify, sympify, converter
from sympy.logic.boolalg import And, Or, Not, Xor, true, false
from sympy.sets.contains import Contains
from sympy.utilities.decorator import deprecated
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import (iproduct, sift, roundrobin, iterable,
subsets)
from sympy.utilities.misc import func_name, filldedent
from mpmath import mpi, mpf
from mpmath.libmp.libmpf import prec_to_dps
tfn = defaultdict(lambda: None, {
True: S.true,
S.true: S.true,
False: S.false,
S.false: S.false})
@sympify_method_args
class Set(Basic, EvalfMixin):
"""
The base class for any kind of set.
Explanation
===========
This is not meant to be used directly as a container of items. It does not
behave like the builtin ``set``; see :class:`FiniteSet` for that.
Real intervals are represented by the :class:`Interval` class and unions of
sets by the :class:`Union` class. The empty set is represented by the
:class:`EmptySet` class and available as a singleton as ``S.EmptySet``.
"""
is_number = False
is_iterable = False
is_interval = False
is_FiniteSet = False
is_Interval = False
is_ProductSet = False
is_Union = False
is_Intersection = None # type: Optional[bool]
is_UniversalSet = None # type: Optional[bool]
is_Complement = None # type: Optional[bool]
is_ComplexRegion = False
is_empty = None # type: FuzzyBool
is_finite_set = None # type: FuzzyBool
@property # type: ignore
@deprecated(useinstead="is S.EmptySet or is_empty",
issue=16946, deprecated_since_version="1.5")
def is_EmptySet(self):
return None
@staticmethod
def _infimum_key(expr):
"""
Return infimum (if possible) else S.Infinity.
"""
try:
infimum = expr.inf
assert infimum.is_comparable
infimum = infimum.evalf() # issue #18505
except (NotImplementedError,
AttributeError, AssertionError, ValueError):
infimum = S.Infinity
return infimum
def union(self, other):
"""
Returns the union of ``self`` and ``other``.
Examples
========
As a shortcut it is possible to use the ``+`` operator:
>>> from sympy import Interval, FiniteSet
>>> Interval(0, 1).union(Interval(2, 3))
Union(Interval(0, 1), Interval(2, 3))
>>> Interval(0, 1) + Interval(2, 3)
Union(Interval(0, 1), Interval(2, 3))
>>> Interval(1, 2, True, True) + FiniteSet(2, 3)
Union({3}, Interval.Lopen(1, 2))
Similarly it is possible to use the ``-`` operator for set differences:
>>> Interval(0, 2) - Interval(0, 1)
Interval.Lopen(1, 2)
>>> Interval(1, 3) - FiniteSet(2)
Union(Interval.Ropen(1, 2), Interval.Lopen(2, 3))
"""
return Union(self, other)
def intersect(self, other):
"""
Returns the intersection of 'self' and 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(1, 3).intersect(Interval(1, 2))
Interval(1, 2)
>>> from sympy import imageset, Lambda, symbols, S
>>> n, m = symbols('n m')
>>> a = imageset(Lambda(n, 2*n), S.Integers)
>>> a.intersect(imageset(Lambda(m, 2*m + 1), S.Integers))
EmptySet
"""
return Intersection(self, other)
def intersection(self, other):
"""
Alias for :meth:`intersect()`
"""
return self.intersect(other)
def is_disjoint(self, other):
"""
Returns True if ``self`` and ``other`` are disjoint.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 2).is_disjoint(Interval(1, 2))
False
>>> Interval(0, 2).is_disjoint(Interval(3, 4))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Disjoint_sets
"""
return self.intersect(other) == S.EmptySet
def isdisjoint(self, other):
"""
Alias for :meth:`is_disjoint()`
"""
return self.is_disjoint(other)
def complement(self, universe):
r"""
The complement of 'self' w.r.t the given universe.
Examples
========
>>> from sympy import Interval, S
>>> Interval(0, 1).complement(S.Reals)
Union(Interval.open(-oo, 0), Interval.open(1, oo))
>>> Interval(0, 1).complement(S.UniversalSet)
Complement(UniversalSet, Interval(0, 1))
"""
return Complement(universe, self)
def _complement(self, other):
# this behaves as other - self
if isinstance(self, ProductSet) and isinstance(other, ProductSet):
# If self and other are disjoint then other - self == self
if len(self.sets) != len(other.sets):
return other
# There can be other ways to represent this but this gives:
# (A x B) - (C x D) = ((A - C) x B) U (A x (B - D))
overlaps = []
pairs = list(zip(self.sets, other.sets))
for n in range(len(pairs)):
sets = (o if i != n else o-s for i, (s, o) in enumerate(pairs))
overlaps.append(ProductSet(*sets))
return Union(*overlaps)
elif isinstance(other, Interval):
if isinstance(self, (Interval, FiniteSet)):
return Intersection(other, self.complement(S.Reals))
elif isinstance(other, Union):
return Union(*(o - self for o in other.args))
elif isinstance(other, Complement):
return Complement(other.args[0], Union(other.args[1], self), evaluate=False)
elif other is S.EmptySet:
return S.EmptySet
elif isinstance(other, FiniteSet):
sifted = sift(other, lambda x: fuzzy_bool(self.contains(x)))
# ignore those that are contained in self
return Union(FiniteSet(*(sifted[False])),
Complement(FiniteSet(*(sifted[None])), self, evaluate=False)
if sifted[None] else S.EmptySet)
def symmetric_difference(self, other):
"""
Returns symmetric difference of ``self`` and ``other``.
Examples
========
>>> from sympy import Interval, S
>>> Interval(1, 3).symmetric_difference(S.Reals)
Union(Interval.open(-oo, 1), Interval.open(3, oo))
>>> Interval(1, 10).symmetric_difference(S.Reals)
Union(Interval.open(-oo, 1), Interval.open(10, oo))
>>> from sympy import S, EmptySet
>>> S.Reals.symmetric_difference(EmptySet)
Reals
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_difference
"""
return SymmetricDifference(self, other)
def _symmetric_difference(self, other):
return Union(Complement(self, other), Complement(other, self))
@property
def inf(self):
"""
The infimum of ``self``.
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).inf
0
>>> Union(Interval(0, 1), Interval(2, 3)).inf
0
"""
return self._inf
@property
def _inf(self):
raise NotImplementedError("(%s)._inf" % self)
@property
def sup(self):
"""
The supremum of ``self``.
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).sup
1
>>> Union(Interval(0, 1), Interval(2, 3)).sup
3
"""
return self._sup
@property
def _sup(self):
raise NotImplementedError("(%s)._sup" % self)
def contains(self, other):
"""
Returns a SymPy value indicating whether ``other`` is contained
in ``self``: ``true`` if it is, ``false`` if it isn't, else
an unevaluated ``Contains`` expression (or, as in the case of
ConditionSet and a union of FiniteSet/Intervals, an expression
indicating the conditions for containment).
Examples
========
>>> from sympy import Interval, S
>>> from sympy.abc import x
>>> Interval(0, 1).contains(0.5)
True
As a shortcut it is possible to use the ``in`` operator, but that
will raise an error unless an affirmative true or false is not
obtained.
>>> Interval(0, 1).contains(x)
(0 <= x) & (x <= 1)
>>> x in Interval(0, 1)
Traceback (most recent call last):
...
TypeError: did not evaluate to a bool: None
The result of 'in' is a bool, not a SymPy value
>>> 1 in Interval(0, 2)
True
>>> _ is S.true
False
"""
other = sympify(other, strict=True)
c = self._contains(other)
if isinstance(c, Contains):
return c
if c is None:
return Contains(other, self, evaluate=False)
b = tfn[c]
if b is None:
return c
return b
def _contains(self, other):
raise NotImplementedError(filldedent('''
(%s)._contains(%s) is not defined. This method, when
defined, will receive a sympified object. The method
should return True, False, None or something that
expresses what must be true for the containment of that
object in self to be evaluated. If None is returned
then a generic Contains object will be returned
by the ``contains`` method.''' % (self, other)))
def is_subset(self, other):
"""
Returns True if ``self`` is a subset of ``other``.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_subset(Interval(0, 1, left_open=True))
False
"""
if not isinstance(other, Set):
raise ValueError("Unknown argument '%s'" % other)
# Handle the trivial cases
if self == other:
return True
is_empty = self.is_empty
if is_empty is True:
return True
elif fuzzy_not(is_empty) and other.is_empty:
return False
if self.is_finite_set is False and other.is_finite_set:
return False
# Dispatch on subclass rules
ret = self._eval_is_subset(other)
if ret is not None:
return ret
ret = other._eval_is_superset(self)
if ret is not None:
return ret
# Use pairwise rules from multiple dispatch
from sympy.sets.handlers.issubset import is_subset_sets
ret = is_subset_sets(self, other)
if ret is not None:
return ret
# Fall back on computing the intersection
# XXX: We shouldn't do this. A query like this should be handled
# without evaluating new Set objects. It should be the other way round
# so that the intersect method uses is_subset for evaluation.
if self.intersect(other) == self:
return True
def _eval_is_subset(self, other):
'''Returns a fuzzy bool for whether self is a subset of other.'''
return None
def _eval_is_superset(self, other):
'''Returns a fuzzy bool for whether self is a subset of other.'''
return None
# This should be deprecated:
def issubset(self, other):
"""
Alias for :meth:`is_subset()`
"""
return self.is_subset(other)
def is_proper_subset(self, other):
"""
Returns True if ``self`` is a proper subset of ``other``.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_proper_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_proper_subset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_subset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def is_superset(self, other):
"""
Returns True if ``self`` is a superset of ``other``.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_superset(Interval(0, 1))
False
>>> Interval(0, 1).is_superset(Interval(0, 1, left_open=True))
True
"""
if isinstance(other, Set):
return other.is_subset(self)
else:
raise ValueError("Unknown argument '%s'" % other)
# This should be deprecated:
def issuperset(self, other):
"""
Alias for :meth:`is_superset()`
"""
return self.is_superset(other)
def is_proper_superset(self, other):
"""
Returns True if ``self`` is a proper superset of ``other``.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_proper_superset(Interval(0, 0.5))
True
>>> Interval(0, 1).is_proper_superset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_superset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def _eval_powerset(self):
from .powerset import PowerSet
return PowerSet(self)
def powerset(self):
"""
Find the Power set of ``self``.
Examples
========
>>> from sympy import EmptySet, FiniteSet, Interval
A power set of an empty set:
>>> A = EmptySet
>>> A.powerset()
{EmptySet}
A power set of a finite set:
>>> A = FiniteSet(1, 2)
>>> a, b, c = FiniteSet(1), FiniteSet(2), FiniteSet(1, 2)
>>> A.powerset() == FiniteSet(a, b, c, EmptySet)
True
A power set of an interval:
>>> Interval(1, 2).powerset()
PowerSet(Interval(1, 2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Power_set
"""
return self._eval_powerset()
@property
def measure(self):
"""
The (Lebesgue) measure of ``self``.
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).measure
1
>>> Union(Interval(0, 1), Interval(2, 3)).measure
2
"""
return self._measure
@property
def boundary(self):
"""
The boundary or frontier of a set.
Explanation
===========
A point x is on the boundary of a set S if
1. x is in the closure of S.
I.e. Every neighborhood of x contains a point in S.
2. x is not in the interior of S.
I.e. There does not exist an open set centered on x contained
entirely within S.
There are the points on the outer rim of S. If S is open then these
points need not actually be contained within S.
For example, the boundary of an interval is its start and end points.
This is true regardless of whether or not the interval is open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).boundary
{0, 1}
>>> Interval(0, 1, True, False).boundary
{0, 1}
"""
return self._boundary
@property
def is_open(self):
"""
Property method to check whether a set is open.
Explanation
===========
A set is open if and only if it has an empty intersection with its
boundary. In particular, a subset A of the reals is open if and only
if each one of its points is contained in an open interval that is a
subset of A.
Examples
========
>>> from sympy import S
>>> S.Reals.is_open
True
>>> S.Rationals.is_open
False
"""
return Intersection(self, self.boundary).is_empty
@property
def is_closed(self):
"""
A property method to check whether a set is closed.
Explanation
===========
A set is closed if its complement is an open set. The closedness of a
subset of the reals is determined with respect to R and its standard
topology.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_closed
True
"""
return self.boundary.is_subset(self)
@property
def closure(self):
"""
Property method which returns the closure of a set.
The closure is defined as the union of the set itself and its
boundary.
Examples
========
>>> from sympy import S, Interval
>>> S.Reals.closure
Reals
>>> Interval(0, 1).closure
Interval(0, 1)
"""
return self + self.boundary
@property
def interior(self):
"""
Property method which returns the interior of a set.
The interior of a set S consists all points of S that do not
belong to the boundary of S.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).interior
Interval.open(0, 1)
>>> Interval(0, 1).boundary.interior
EmptySet
"""
return self - self.boundary
@property
def _boundary(self):
raise NotImplementedError()
@property
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
def _eval_evalf(self, prec):
dps = prec_to_dps(prec)
return self.func(*[arg.evalf(n=dps) for arg in self.args])
@sympify_return([('other', 'Set')], NotImplemented)
def __add__(self, other):
return self.union(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __or__(self, other):
return self.union(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __and__(self, other):
return self.intersect(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __mul__(self, other):
return ProductSet(self, other)
@sympify_return([('other', 'Set')], NotImplemented)
def __xor__(self, other):
return SymmetricDifference(self, other)
@sympify_return([('exp', Expr)], NotImplemented)
def __pow__(self, exp):
if not (exp.is_Integer and exp >= 0):
raise ValueError("%s: Exponent must be a positive Integer" % exp)
return ProductSet(*[self]*exp)
@sympify_return([('other', 'Set')], NotImplemented)
def __sub__(self, other):
return Complement(self, other)
def __contains__(self, other):
other = _sympify(other)
c = self._contains(other)
b = tfn[c]
if b is None:
# x in y must evaluate to T or F; to entertain a None
# result with Set use y.contains(x)
raise TypeError('did not evaluate to a bool: %r' % c)
return b
class ProductSet(Set):
"""
Represents a Cartesian Product of Sets.
Explanation
===========
Returns a Cartesian product given several sets as either an iterable
or individual arguments.
Can use ``*`` operator on any sets for convenient shorthand.
Examples
========
>>> from sympy import Interval, FiniteSet, ProductSet
>>> I = Interval(0, 5); S = FiniteSet(1, 2, 3)
>>> ProductSet(I, S)
ProductSet(Interval(0, 5), {1, 2, 3})
>>> (2, 2) in ProductSet(I, S)
True
>>> Interval(0, 1) * Interval(0, 1) # The unit square
ProductSet(Interval(0, 1), Interval(0, 1))
>>> coin = FiniteSet('H', 'T')
>>> set(coin**2)
{(H, H), (H, T), (T, H), (T, T)}
The Cartesian product is not commutative or associative e.g.:
>>> I*S == S*I
False
>>> (I*I)*I == I*(I*I)
False
Notes
=====
- Passes most operations down to the argument sets
References
==========
.. [1] https://en.wikipedia.org/wiki/Cartesian_product
"""
is_ProductSet = True
def __new__(cls, *sets, **assumptions):
if len(sets) == 1 and iterable(sets[0]) and not isinstance(sets[0], (Set, set)):
SymPyDeprecationWarning(
feature="ProductSet(iterable)",
useinstead="ProductSet(*iterable)",
issue=17557,
deprecated_since_version="1.5"
).warn()
sets = tuple(sets[0])
sets = [sympify(s) for s in sets]
if not all(isinstance(s, Set) for s in sets):
raise TypeError("Arguments to ProductSet should be of type Set")
# Nullary product of sets is *not* the empty set
if len(sets) == 0:
return FiniteSet(())
if S.EmptySet in sets:
return S.EmptySet
return Basic.__new__(cls, *sets, **assumptions)
@property
def sets(self):
return self.args
def flatten(self):
def _flatten(sets):
for s in sets:
if s.is_ProductSet:
yield from _flatten(s.sets)
else:
yield s
return ProductSet(*_flatten(self.sets))
def _contains(self, element):
"""
``in`` operator for ProductSets.
Examples
========
>>> from sympy import Interval
>>> (2, 3) in Interval(0, 5) * Interval(0, 5)
True
>>> (10, 10) in Interval(0, 5) * Interval(0, 5)
False
Passes operation on to constituent sets
"""
if element.is_Symbol:
return None
if not isinstance(element, Tuple) or len(element) != len(self.sets):
return False
return fuzzy_and(s._contains(e) for s, e in zip(self.sets, element))
def as_relational(self, *symbols):
symbols = [_sympify(s) for s in symbols]
if len(symbols) != len(self.sets) or not all(
i.is_Symbol for i in symbols):
raise ValueError(
'number of symbols must match the number of sets')
return And(*[s.as_relational(i) for s, i in zip(self.sets, symbols)])
@property
def _boundary(self):
return Union(*(ProductSet(*(b + b.boundary if i != j else b.boundary
for j, b in enumerate(self.sets)))
for i, a in enumerate(self.sets)))
@property
def is_iterable(self):
"""
A property method which tests whether a set is iterable or not.
Returns True if set is iterable, otherwise returns False.
Examples
========
>>> from sympy import FiniteSet, Interval
>>> I = Interval(0, 1)
>>> A = FiniteSet(1, 2, 3, 4, 5)
>>> I.is_iterable
False
>>> A.is_iterable
True
"""
return all(set.is_iterable for set in self.sets)
def __iter__(self):
"""
A method which implements is_iterable property method.
If self.is_iterable returns True (both constituent sets are iterable),
then return the Cartesian Product. Otherwise, raise TypeError.
"""
return iproduct(*self.sets)
@property
def is_empty(self):
return fuzzy_or(s.is_empty for s in self.sets)
@property
def is_finite_set(self):
all_finite = fuzzy_and(s.is_finite_set for s in self.sets)
return fuzzy_or([self.is_empty, all_finite])
@property
def _measure(self):
measure = 1
for s in self.sets:
measure *= s.measure
return measure
def __len__(self):
return reduce(lambda a, b: a*b, (len(s) for s in self.args))
def __bool__(self):
return all(self.sets)
class Interval(Set):
"""
Represents a real interval as a Set.
Usage:
Returns an interval with end points ``start`` and ``end``.
For ``left_open=True`` (default ``left_open`` is ``False``) the interval
will be open on the left. Similarly, for ``right_open=True`` the interval
will be open on the right.
Examples
========
>>> from sympy import Symbol, Interval
>>> Interval(0, 1)
Interval(0, 1)
>>> Interval.Ropen(0, 1)
Interval.Ropen(0, 1)
>>> Interval.Ropen(0, 1)
Interval.Ropen(0, 1)
>>> Interval.Lopen(0, 1)
Interval.Lopen(0, 1)
>>> Interval.open(0, 1)
Interval.open(0, 1)
>>> a = Symbol('a', real=True)
>>> Interval(0, a)
Interval(0, a)
Notes
=====
- Only real end points are supported
- ``Interval(a, b)`` with $a > b$ will return the empty set
- Use the ``evalf()`` method to turn an Interval into an mpmath
``mpi`` interval instance
References
==========
.. [1] https://en.wikipedia.org/wiki/Interval_%28mathematics%29
"""
is_Interval = True
def __new__(cls, start, end, left_open=False, right_open=False):
start = _sympify(start)
end = _sympify(end)
left_open = _sympify(left_open)
right_open = _sympify(right_open)
if not all(isinstance(a, (type(true), type(false)))
for a in [left_open, right_open]):
raise NotImplementedError(
"left_open and right_open can have only true/false values, "
"got %s and %s" % (left_open, right_open))
# Only allow real intervals
if fuzzy_not(fuzzy_and(i.is_extended_real for i in (start, end, end-start))):
raise ValueError("Non-real intervals are not supported")
# evaluate if possible
if is_lt(end, start):
return S.EmptySet
elif (end - start).is_negative:
return S.EmptySet
if end == start and (left_open or right_open):
return S.EmptySet
if end == start and not (left_open or right_open):
if start is S.Infinity or start is S.NegativeInfinity:
return S.EmptySet
return FiniteSet(end)
# Make sure infinite interval end points are open.
if start is S.NegativeInfinity:
left_open = true
if end is S.Infinity:
right_open = true
if start == S.Infinity or end == S.NegativeInfinity:
return S.EmptySet
return Basic.__new__(cls, start, end, left_open, right_open)
@property
def start(self):
"""
The left end point of the interval.
This property takes the same value as the ``inf`` property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).start
0
"""
return self._args[0]
@property
def end(self):
"""
The right end point of the interval.
This property takes the same value as the ``sup`` property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).end
1
"""
return self._args[1]
@property
def left_open(self):
"""
True if interval is left-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, left_open=True).left_open
True
>>> Interval(0, 1, left_open=False).left_open
False
"""
return self._args[2]
@property
def right_open(self):
"""
True if interval is right-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, right_open=True).right_open
True
>>> Interval(0, 1, right_open=False).right_open
False
"""
return self._args[3]
@classmethod
def open(cls, a, b):
"""Return an interval including neither boundary."""
return cls(a, b, True, True)
@classmethod
def Lopen(cls, a, b):
"""Return an interval not including the left boundary."""
return cls(a, b, True, False)
@classmethod
def Ropen(cls, a, b):
"""Return an interval not including the right boundary."""
return cls(a, b, False, True)
@property
def _inf(self):
return self.start
@property
def _sup(self):
return self.end
@property
def left(self):
return self.start
@property
def right(self):
return self.end
@property
def is_empty(self):
if self.left_open or self.right_open:
cond = self.start >= self.end # One/both bounds open
else:
cond = self.start > self.end # Both bounds closed
return fuzzy_bool(cond)
@property
def is_finite_set(self):
return self.measure.is_zero
def _complement(self, other):
if other == S.Reals:
a = Interval(S.NegativeInfinity, self.start,
True, not self.left_open)
b = Interval(self.end, S.Infinity, not self.right_open, True)
return Union(a, b)
if isinstance(other, FiniteSet):
nums = [m for m in other.args if m.is_number]
if nums == []:
return None
return Set._complement(self, other)
@property
def _boundary(self):
finite_points = [p for p in (self.start, self.end)
if abs(p) != S.Infinity]
return FiniteSet(*finite_points)
def _contains(self, other):
if (not isinstance(other, Expr) or other is S.NaN
or other.is_real is False or other.has(S.ComplexInfinity)):
# if an expression has zoo it will be zoo or nan
# and neither of those is real
return false
if self.start is S.NegativeInfinity and self.end is S.Infinity:
if other.is_real is not None:
return other.is_real
d = Dummy()
return self.as_relational(d).subs(d, other)
def as_relational(self, x):
"""Rewrite an interval in terms of inequalities and logic operators."""
x = sympify(x)
if self.right_open:
right = x < self.end
else:
right = x <= self.end
if self.left_open:
left = self.start < x
else:
left = self.start <= x
return And(left, right)
@property
def _measure(self):
return self.end - self.start
def to_mpi(self, prec=53):
return mpi(mpf(self.start._eval_evalf(prec)),
mpf(self.end._eval_evalf(prec)))
def _eval_evalf(self, prec):
return Interval(self.left._evalf(prec), self.right._evalf(prec),
left_open=self.left_open, right_open=self.right_open)
def _is_comparable(self, other):
is_comparable = self.start.is_comparable
is_comparable &= self.end.is_comparable
is_comparable &= other.start.is_comparable
is_comparable &= other.end.is_comparable
return is_comparable
@property
def is_left_unbounded(self):
"""Return ``True`` if the left endpoint is negative infinity. """
return self.left is S.NegativeInfinity or self.left == Float("-inf")
@property
def is_right_unbounded(self):
"""Return ``True`` if the right endpoint is positive infinity. """
return self.right is S.Infinity or self.right == Float("+inf")
def _eval_Eq(self, other):
if not isinstance(other, Interval):
if isinstance(other, FiniteSet):
return false
elif isinstance(other, Set):
return None
return false
class Union(Set, LatticeOp):
"""
Represents a union of sets as a :class:`Set`.
Examples
========
>>> from sympy import Union, Interval
>>> Union(Interval(1, 2), Interval(3, 4))
Union(Interval(1, 2), Interval(3, 4))
The Union constructor will always try to merge overlapping intervals,
if possible. For example:
>>> Union(Interval(1, 2), Interval(2, 3))
Interval(1, 3)
See Also
========
Intersection
References
==========
.. [1] https://en.wikipedia.org/wiki/Union_%28set_theory%29
"""
is_Union = True
@property
def identity(self):
return S.EmptySet
@property
def zero(self):
return S.UniversalSet
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs to merge intersections and iterables
args = _sympify(args)
# Reduce sets using known rules
if evaluate:
args = list(cls._new_args_filter(args))
return simplify_union(args)
args = list(ordered(args, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._argset = frozenset(args)
return obj
@property
def args(self):
return self._args
def _complement(self, universe):
# DeMorgan's Law
return Intersection(s.complement(universe) for s in self.args)
@property
def _inf(self):
# We use Min so that sup is meaningful in combination with symbolic
# interval end points.
from sympy.functions.elementary.miscellaneous import Min
return Min(*[set.inf for set in self.args])
@property
def _sup(self):
# We use Max so that sup is meaningful in combination with symbolic
# end points.
from sympy.functions.elementary.miscellaneous import Max
return Max(*[set.sup for set in self.args])
@property
def is_empty(self):
return fuzzy_and(set.is_empty for set in self.args)
@property
def is_finite_set(self):
return fuzzy_and(set.is_finite_set for set in self.args)
@property
def _measure(self):
# Measure of a union is the sum of the measures of the sets minus
# the sum of their pairwise intersections plus the sum of their
# triple-wise intersections minus ... etc...
# Sets is a collection of intersections and a set of elementary
# sets which made up those intersections (called "sos" for set of sets)
# An example element might of this list might be:
# ( {A,B,C}, A.intersect(B).intersect(C) )
# Start with just elementary sets ( ({A}, A), ({B}, B), ... )
# Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero
sets = [(FiniteSet(s), s) for s in self.args]
measure = 0
parity = 1
while sets:
# Add up the measure of these sets and add or subtract it to total
measure += parity * sum(inter.measure for sos, inter in sets)
# For each intersection in sets, compute the intersection with every
# other set not already part of the intersection.
sets = ((sos + FiniteSet(newset), newset.intersect(intersection))
for sos, intersection in sets for newset in self.args
if newset not in sos)
# Clear out sets with no measure
sets = [(sos, inter) for sos, inter in sets if inter.measure != 0]
# Clear out duplicates
sos_list = []
sets_list = []
for _set in sets:
if _set[0] in sos_list:
continue
else:
sos_list.append(_set[0])
sets_list.append(_set)
sets = sets_list
# Flip Parity - next time subtract/add if we added/subtracted here
parity *= -1
return measure
@property
def _boundary(self):
def boundary_of_set(i):
""" The boundary of set i minus interior of all other sets """
b = self.args[i].boundary
for j, a in enumerate(self.args):
if j != i:
b = b - a.interior
return b
return Union(*map(boundary_of_set, range(len(self.args))))
def _contains(self, other):
return Or(*[s.contains(other) for s in self.args])
def is_subset(self, other):
return fuzzy_and(s.is_subset(other) for s in self.args)
def as_relational(self, symbol):
"""Rewrite a Union in terms of equalities and logic operators. """
if (len(self.args) == 2 and
all(isinstance(i, Interval) for i in self.args)):
# optimization to give 3 args as (x > 1) & (x < 5) & Ne(x, 3)
# instead of as 4, ((1 <= x) & (x < 3)) | ((x <= 5) & (3 < x))
a, b = self.args
if (a.sup == b.inf and
not any(a.sup in i for i in self.args)):
return And(Ne(symbol, a.sup), symbol < b.sup, symbol > a.inf)
return Or(*[i.as_relational(symbol) for i in self.args])
@property
def is_iterable(self):
return all(arg.is_iterable for arg in self.args)
def __iter__(self):
return roundrobin(*(iter(arg) for arg in self.args))
class Intersection(Set, LatticeOp):
"""
Represents an intersection of sets as a :class:`Set`.
Examples
========
>>> from sympy import Intersection, Interval
>>> Intersection(Interval(1, 3), Interval(2, 4))
Interval(2, 3)
We often use the .intersect method
>>> Interval(1,3).intersect(Interval(2,4))
Interval(2, 3)
See Also
========
Union
References
==========
.. [1] https://en.wikipedia.org/wiki/Intersection_%28set_theory%29
"""
is_Intersection = True
@property
def identity(self):
return S.UniversalSet
@property
def zero(self):
return S.EmptySet
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs to merge intersections and iterables
args = list(ordered(set(_sympify(args))))
# Reduce sets using known rules
if evaluate:
args = list(cls._new_args_filter(args))
return simplify_intersection(args)
args = list(ordered(args, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._argset = frozenset(args)
return obj
@property
def args(self):
return self._args
@property
def is_iterable(self):
return any(arg.is_iterable for arg in self.args)
@property
def is_finite_set(self):
if fuzzy_or(arg.is_finite_set for arg in self.args):
return True
@property
def _inf(self):
raise NotImplementedError()
@property
def _sup(self):
raise NotImplementedError()
def _contains(self, other):
return And(*[set.contains(other) for set in self.args])
def __iter__(self):
sets_sift = sift(self.args, lambda x: x.is_iterable)
completed = False
candidates = sets_sift[True] + sets_sift[None]
finite_candidates, others = [], []
for candidate in candidates:
length = None
try:
length = len(candidate)
except TypeError:
others.append(candidate)
if length is not None:
finite_candidates.append(candidate)
finite_candidates.sort(key=len)
for s in finite_candidates + others:
other_sets = set(self.args) - {s}
other = Intersection(*other_sets, evaluate=False)
completed = True
for x in s:
try:
if x in other:
yield x
except TypeError:
completed = False
if completed:
return
if not completed:
if not candidates:
raise TypeError("None of the constituent sets are iterable")
raise TypeError(
"The computation had not completed because of the "
"undecidable set membership is found in every candidates.")
@staticmethod
def _handle_finite_sets(args):
'''Simplify intersection of one or more FiniteSets and other sets'''
# First separate the FiniteSets from the others
fs_args, others = sift(args, lambda x: x.is_FiniteSet, binary=True)
# Let the caller handle intersection of non-FiniteSets
if not fs_args:
return
# Convert to Python sets and build the set of all elements
fs_sets = [set(fs) for fs in fs_args]
all_elements = reduce(lambda a, b: a | b, fs_sets, set())
# Extract elements that are definitely in or definitely not in the
# intersection. Here we check contains for all of args.
definite = set()
for e in all_elements:
inall = fuzzy_and(s.contains(e) for s in args)
if inall is True:
definite.add(e)
if inall is not None:
for s in fs_sets:
s.discard(e)
# At this point all elements in all of fs_sets are possibly in the
# intersection. In some cases this is because they are definitely in
# the intersection of the finite sets but it's not clear if they are
# members of others. We might have {m, n}, {m}, and Reals where we
# don't know if m or n is real. We want to remove n here but it is
# possibly in because it might be equal to m. So what we do now is
# extract the elements that are definitely in the remaining finite
# sets iteratively until we end up with {n}, {}. At that point if we
# get any empty set all remaining elements are discarded.
fs_elements = reduce(lambda a, b: a | b, fs_sets, set())
# Need fuzzy containment testing
fs_symsets = [FiniteSet(*s) for s in fs_sets]
while fs_elements:
for e in fs_elements:
infs = fuzzy_and(s.contains(e) for s in fs_symsets)
if infs is True:
definite.add(e)
if infs is not None:
for n, s in enumerate(fs_sets):
# Update Python set and FiniteSet
if e in s:
s.remove(e)
fs_symsets[n] = FiniteSet(*s)
fs_elements.remove(e)
break
# If we completed the for loop without removing anything we are
# done so quit the outer while loop
else:
break
# If any of the sets of remainder elements is empty then we discard
# all of them for the intersection.
if not all(fs_sets):
fs_sets = [set()]
# Here we fold back the definitely included elements into each fs.
# Since they are definitely included they must have been members of
# each FiniteSet to begin with. We could instead fold these in with a
# Union at the end to get e.g. {3}|({x}&{y}) rather than {3,x}&{3,y}.
if definite:
fs_sets = [fs | definite for fs in fs_sets]
if fs_sets == [set()]:
return S.EmptySet
sets = [FiniteSet(*s) for s in fs_sets]
# Any set in others is redundant if it contains all the elements that
# are in the finite sets so we don't need it in the Intersection
all_elements = reduce(lambda a, b: a | b, fs_sets, set())
is_redundant = lambda o: all(fuzzy_bool(o.contains(e)) for e in all_elements)
others = [o for o in others if not is_redundant(o)]
if others:
rest = Intersection(*others)
# XXX: Maybe this shortcut should be at the beginning. For large
# FiniteSets it could much more efficient to process the other
# sets first...
if rest is S.EmptySet:
return S.EmptySet
# Flatten the Intersection
if rest.is_Intersection:
sets.extend(rest.args)
else:
sets.append(rest)
if len(sets) == 1:
return sets[0]
else:
return Intersection(*sets, evaluate=False)
def as_relational(self, symbol):
"""Rewrite an Intersection in terms of equalities and logic operators"""
return And(*[set.as_relational(symbol) for set in self.args])
class Complement(Set):
r"""Represents the set difference or relative complement of a set with
another set.
$$A - B = \{x \in A \mid x \notin B\}$$
Examples
========
>>> from sympy import Complement, FiniteSet
>>> Complement(FiniteSet(0, 1, 2), FiniteSet(1))
{0, 2}
See Also
=========
Intersection, Union
References
==========
.. [1] http://mathworld.wolfram.com/ComplementSet.html
"""
is_Complement = True
def __new__(cls, a, b, evaluate=True):
a, b = map(_sympify, (a, b))
if evaluate:
return Complement.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
"""
Simplify a :class:`Complement`.
"""
if B == S.UniversalSet or A.is_subset(B):
return S.EmptySet
if isinstance(B, Union):
return Intersection(*(s.complement(A) for s in B.args))
result = B._complement(A)
if result is not None:
return result
else:
return Complement(A, B, evaluate=False)
def _contains(self, other):
A = self.args[0]
B = self.args[1]
return And(A.contains(other), Not(B.contains(other)))
def as_relational(self, symbol):
"""Rewrite a complement in terms of equalities and logic
operators"""
A, B = self.args
A_rel = A.as_relational(symbol)
B_rel = Not(B.as_relational(symbol))
return And(A_rel, B_rel)
@property
def is_iterable(self):
if self.args[0].is_iterable:
return True
@property
def is_finite_set(self):
A, B = self.args
a_finite = A.is_finite_set
if a_finite is True:
return True
elif a_finite is False and B.is_finite_set:
return False
def __iter__(self):
A, B = self.args
for a in A:
if a not in B:
yield a
else:
continue
class EmptySet(Set, metaclass=Singleton):
"""
Represents the empty set. The empty set is available as a singleton
as ``S.EmptySet``.
Examples
========
>>> from sympy import S, Interval
>>> S.EmptySet
EmptySet
>>> Interval(1, 2).intersect(S.EmptySet)
EmptySet
See Also
========
UniversalSet
References
==========
.. [1] https://en.wikipedia.org/wiki/Empty_set
"""
is_empty = True
is_finite_set = True
is_FiniteSet = True
@property # type: ignore
@deprecated(useinstead="is S.EmptySet or is_empty",
issue=16946, deprecated_since_version="1.5")
def is_EmptySet(self):
return True
@property
def _measure(self):
return 0
def _contains(self, other):
return false
def as_relational(self, symbol):
return false
def __len__(self):
return 0
def __iter__(self):
return iter([])
def _eval_powerset(self):
return FiniteSet(self)
@property
def _boundary(self):
return self
def _complement(self, other):
return other
def _symmetric_difference(self, other):
return other
class UniversalSet(Set, metaclass=Singleton):
"""
Represents the set of all things.
The universal set is available as a singleton as ``S.UniversalSet``.
Examples
========
>>> from sympy import S, Interval
>>> S.UniversalSet
UniversalSet
>>> Interval(1, 2).intersect(S.UniversalSet)
Interval(1, 2)
See Also
========
EmptySet
References
==========
.. [1] https://en.wikipedia.org/wiki/Universal_set
"""
is_UniversalSet = True
is_empty = False
is_finite_set = False
def _complement(self, other):
return S.EmptySet
def _symmetric_difference(self, other):
return other
@property
def _measure(self):
return S.Infinity
def _contains(self, other):
return true
def as_relational(self, symbol):
return true
@property
def _boundary(self):
return S.EmptySet
class FiniteSet(Set):
"""
Represents a finite set of discrete numbers.
Examples
========
>>> from sympy import FiniteSet
>>> FiniteSet(1, 2, 3, 4)
{1, 2, 3, 4}
>>> 3 in FiniteSet(1, 2, 3, 4)
True
>>> members = [1, 2, 3, 4]
>>> f = FiniteSet(*members)
>>> f
{1, 2, 3, 4}
>>> f - FiniteSet(2)
{1, 3, 4}
>>> f + FiniteSet(2, 5)
{1, 2, 3, 4, 5}
References
==========
.. [1] https://en.wikipedia.org/wiki/Finite_set
"""
is_FiniteSet = True
is_iterable = True
is_empty = False
is_finite_set = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
if evaluate:
args = list(map(sympify, args))
if len(args) == 0:
return S.EmptySet
else:
args = list(map(sympify, args))
# keep the form of the first canonical arg
dargs = {}
for i in reversed(list(ordered(args))):
if i.is_Symbol:
dargs[i] = i
else:
try:
dargs[i.as_dummy()] = i
except TypeError:
# e.g. i = class without args like `Interval`
dargs[i] = i
_args_set = set(dargs.values())
args = list(ordered(_args_set, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._args_set = _args_set
return obj
def __iter__(self):
return iter(self.args)
def _complement(self, other):
if isinstance(other, Interval):
# Splitting in sub-intervals is only done for S.Reals;
# other cases that need splitting will first pass through
# Set._complement().
nums, syms = [], []
for m in self.args:
if m.is_number and m.is_real:
nums.append(m)
elif m.is_real == False:
pass # drop non-reals
else:
syms.append(m) # various symbolic expressions
if other == S.Reals and nums != []:
nums.sort()
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, nums[0], True, True)]
for a, b in zip(nums[:-1], nums[1:]):
intervals.append(Interval(a, b, True, True)) # both open
intervals.append(Interval(nums[-1], S.Infinity, True, True))
if syms != []:
return Complement(Union(*intervals, evaluate=False),
FiniteSet(*syms), evaluate=False)
else:
return Union(*intervals, evaluate=False)
elif nums == []: # no splitting necessary or possible:
if syms:
return Complement(other, FiniteSet(*syms), evaluate=False)
else:
return other
elif isinstance(other, FiniteSet):
unk = []
for i in self:
c = sympify(other.contains(i))
if c is not S.true and c is not S.false:
unk.append(i)
unk = FiniteSet(*unk)
if unk == self:
return
not_true = []
for i in other:
c = sympify(self.contains(i))
if c is not S.true:
not_true.append(i)
return Complement(FiniteSet(*not_true), unk)
return Set._complement(self, other)
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Explanation
===========
The actual test is for mathematical equality (as opposed to
syntactical equality). In the worst case all elements of the
set must be checked.
Examples
========
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
if other in self._args_set:
return True
else:
# evaluate=True is needed to override evaluate=False context;
# we need Eq to do the evaluation
return fuzzy_or(fuzzy_bool(Eq(e, other, evaluate=True))
for e in self.args)
def _eval_is_subset(self, other):
return fuzzy_and(other._contains(e) for e in self.args)
@property
def _boundary(self):
return self
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*self)
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*self)
@property
def measure(self):
return 0
def __len__(self):
return len(self.args)
def as_relational(self, symbol):
"""Rewrite a FiniteSet in terms of equalities and logic operators. """
return Or(*[Eq(symbol, elem) for elem in self])
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
dps = prec_to_dps(prec)
return FiniteSet(*[elem.evalf(n=dps) for elem in self])
def _eval_simplify(self, **kwargs):
from sympy.simplify import simplify
return FiniteSet(*[simplify(elem, **kwargs) for elem in self])
@property
def _sorted_args(self):
return self.args
def _eval_powerset(self):
return self.func(*[self.func(*s) for s in subsets(self.args)])
def _eval_rewrite_as_PowerSet(self, *args, **kwargs):
"""Rewriting method for a finite set to a power set."""
from .powerset import PowerSet
is2pow = lambda n: bool(n and not n & (n - 1))
if not is2pow(len(self)):
return None
fs_test = lambda arg: isinstance(arg, Set) and arg.is_FiniteSet
if not all(fs_test(arg) for arg in args):
return None
biggest = max(args, key=len)
for arg in subsets(biggest.args):
arg_set = FiniteSet(*arg)
if arg_set not in args:
return None
return PowerSet(biggest)
def __ge__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return other.is_subset(self)
def __gt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_superset(other)
def __le__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_subset(other)
def __lt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_subset(other)
converter[set] = lambda x: FiniteSet(*x)
converter[frozenset] = lambda x: FiniteSet(*x)
class SymmetricDifference(Set):
"""Represents the set of elements which are in either of the
sets and not in their intersection.
Examples
========
>>> from sympy import SymmetricDifference, FiniteSet
>>> SymmetricDifference(FiniteSet(1, 2, 3), FiniteSet(3, 4, 5))
{1, 2, 4, 5}
See Also
========
Complement, Union
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_difference
"""
is_SymmetricDifference = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return SymmetricDifference.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
result = B._symmetric_difference(A)
if result is not None:
return result
else:
return SymmetricDifference(A, B, evaluate=False)
def as_relational(self, symbol):
"""Rewrite a symmetric_difference in terms of equalities and
logic operators"""
A, B = self.args
A_rel = A.as_relational(symbol)
B_rel = B.as_relational(symbol)
return Xor(A_rel, B_rel)
@property
def is_iterable(self):
if all(arg.is_iterable for arg in self.args):
return True
def __iter__(self):
args = self.args
union = roundrobin(*(iter(arg) for arg in args))
for item in union:
count = 0
for s in args:
if item in s:
count += 1
if count % 2 == 1:
yield item
class DisjointUnion(Set):
""" Represents the disjoint union (also known as the external disjoint union)
of a finite number of sets.
Examples
========
>>> from sympy import DisjointUnion, FiniteSet, Interval, Union, Symbol
>>> A = FiniteSet(1, 2, 3)
>>> B = Interval(0, 5)
>>> DisjointUnion(A, B)
DisjointUnion({1, 2, 3}, Interval(0, 5))
>>> DisjointUnion(A, B).rewrite(Union)
Union(ProductSet({1, 2, 3}, {0}), ProductSet(Interval(0, 5), {1}))
>>> C = FiniteSet(Symbol('x'), Symbol('y'), Symbol('z'))
>>> DisjointUnion(C, C)
DisjointUnion({x, y, z}, {x, y, z})
>>> DisjointUnion(C, C).rewrite(Union)
ProductSet({x, y, z}, {0, 1})
References
==========
https://en.wikipedia.org/wiki/Disjoint_union
"""
def __new__(cls, *sets):
dj_collection = []
for set_i in sets:
if isinstance(set_i, Set):
dj_collection.append(set_i)
else:
raise TypeError("Invalid input: '%s', input args \
to DisjointUnion must be Sets" % set_i)
obj = Basic.__new__(cls, *dj_collection)
return obj
@property
def sets(self):
return self.args
@property
def is_empty(self):
return fuzzy_and(s.is_empty for s in self.sets)
@property
def is_finite_set(self):
all_finite = fuzzy_and(s.is_finite_set for s in self.sets)
return fuzzy_or([self.is_empty, all_finite])
@property
def is_iterable(self):
if self.is_empty:
return False
iter_flag = True
for set_i in self.sets:
if not set_i.is_empty:
iter_flag = iter_flag and set_i.is_iterable
return iter_flag
def _eval_rewrite_as_Union(self, *sets):
"""
Rewrites the disjoint union as the union of (``set`` x {``i``})
where ``set`` is the element in ``sets`` at index = ``i``
"""
dj_union = S.EmptySet
index = 0
for set_i in sets:
if isinstance(set_i, Set):
cross = ProductSet(set_i, FiniteSet(index))
dj_union = Union(dj_union, cross)
index = index + 1
return dj_union
def _contains(self, element):
"""
``in`` operator for DisjointUnion
Examples
========
>>> from sympy import Interval, DisjointUnion
>>> D = DisjointUnion(Interval(0, 1), Interval(0, 2))
>>> (0.5, 0) in D
True
>>> (0.5, 1) in D
True
>>> (1.5, 0) in D
False
>>> (1.5, 1) in D
True
Passes operation on to constituent sets
"""
if not isinstance(element, Tuple) or len(element) != 2:
return False
if not element[1].is_Integer:
return False
if element[1] >= len(self.sets) or element[1] < 0:
return False
return element[0] in self.sets[element[1]]
def __iter__(self):
if self.is_iterable:
iters = []
for i, s in enumerate(self.sets):
iters.append(iproduct(s, {Integer(i)}))
return iter(roundrobin(*iters))
else:
raise ValueError("'%s' is not iterable." % self)
def __len__(self):
"""
Returns the length of the disjoint union, i.e., the number of elements in the set.
Examples
========
>>> from sympy import FiniteSet, DisjointUnion, EmptySet
>>> D1 = DisjointUnion(FiniteSet(1, 2, 3, 4), EmptySet, FiniteSet(3, 4, 5))
>>> len(D1)
7
>>> D2 = DisjointUnion(FiniteSet(3, 5, 7), EmptySet, FiniteSet(3, 5, 7))
>>> len(D2)
6
>>> D3 = DisjointUnion(EmptySet, EmptySet)
>>> len(D3)
0
Adds up the lengths of the constituent sets.
"""
if self.is_finite_set:
size = 0
for set in self.sets:
size += len(set)
return size
else:
raise ValueError("'%s' is not a finite set." % self)
def imageset(*args):
r"""
Return an image of the set under transformation ``f``.
Explanation
===========
If this function cannot compute the image, it returns an
unevaluated ImageSet object.
.. math::
\{ f(x) \mid x \in \mathrm{self} \}
Examples
========
>>> from sympy import S, Interval, imageset, sin, Lambda
>>> from sympy.abc import x
>>> imageset(x, 2*x, Interval(0, 2))
Interval(0, 4)
>>> imageset(lambda x: 2*x, Interval(0, 2))
Interval(0, 4)
>>> imageset(Lambda(x, sin(x)), Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), Interval(-2, 1))
>>> imageset(sin, Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), Interval(-2, 1))
>>> imageset(lambda y: x + y, Interval(-2, 1))
ImageSet(Lambda(y, x + y), Interval(-2, 1))
Expressions applied to the set of Integers are simplified
to show as few negatives as possible and linear expressions
are converted to a canonical form. If this is not desirable
then the unevaluated ImageSet should be used.
>>> imageset(x, -2*x + 5, S.Integers)
ImageSet(Lambda(x, 2*x + 1), Integers)
See Also
========
sympy.sets.fancysets.ImageSet
"""
from sympy.sets.fancysets import ImageSet
from sympy.sets.setexpr import set_function
if len(args) < 2:
raise ValueError('imageset expects at least 2 args, got: %s' % len(args))
if isinstance(args[0], (Symbol, tuple)) and len(args) > 2:
f = Lambda(args[0], args[1])
set_list = args[2:]
else:
f = args[0]
set_list = args[1:]
if isinstance(f, Lambda):
pass
elif callable(f):
nargs = getattr(f, 'nargs', {})
if nargs:
if len(nargs) != 1:
raise NotImplementedError(filldedent('''
This function can take more than 1 arg
but the potentially complicated set input
has not been analyzed at this point to
know its dimensions. TODO
'''))
N = nargs.args[0]
if N == 1:
s = 'x'
else:
s = [Symbol('x%i' % i) for i in range(1, N + 1)]
else:
s = inspect.signature(f).parameters
dexpr = _sympify(f(*[Dummy() for i in s]))
var = tuple(uniquely_named_symbol(
Symbol(i), dexpr) for i in s)
f = Lambda(var, f(*var))
else:
raise TypeError(filldedent('''
expecting lambda, Lambda, or FunctionClass,
not \'%s\'.''' % func_name(f)))
if any(not isinstance(s, Set) for s in set_list):
name = [func_name(s) for s in set_list]
raise ValueError(
'arguments after mapping should be sets, not %s' % name)
if len(set_list) == 1:
set = set_list[0]
try:
# TypeError if arg count != set dimensions
r = set_function(f, set)
if r is None:
raise TypeError
if not r:
return r
except TypeError:
r = ImageSet(f, set)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
# XXX: Maybe this should just be:
# f2 = set.lambda
# fun = Lambda(f2.signature, f(*f2.expr))
# return imageset(fun, *set.base_sets)
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
x = set.lamda.variables[0]
y = f.variables[0]
return imageset(
Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets)
if r is not None:
return r
return ImageSet(f, *set_list)
def is_function_invertible_in_set(func, setv):
"""
Checks whether function ``func`` is invertible when the domain is
restricted to set ``setv``.
"""
from sympy.functions.elementary.exponential import (exp, log)
# Functions known to always be invertible:
if func in (exp, log):
return True
u = Dummy("u")
fdiff = func(u).diff(u)
# monotonous functions:
# TODO: check subsets (`func` in `setv`)
if (fdiff > 0) == True or (fdiff < 0) == True:
return True
# TODO: support more
return None
def simplify_union(args):
"""
Simplify a :class:`Union` using known rules.
Explanation
===========
We first start with global rules like 'Merge all FiniteSets'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent. This process depends
on ``union_sets(a, b)`` functions.
"""
from sympy.sets.handlers.union import union_sets
# ===== Global Rules =====
if not args:
return S.EmptySet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
a = (x for set in finite_sets for x in set)
finite_set = FiniteSet(*a)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - {s}:
new_set = union_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = {new_set}
new_args = (args - {s, t}).union(new_set)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Union(*args, evaluate=False)
def simplify_intersection(args):
"""
Simplify an intersection using known rules.
Explanation
===========
We first start with global rules like
'if any empty sets return empty set' and 'distribute any unions'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
if not args:
return S.UniversalSet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# If any EmptySets return EmptySet
if S.EmptySet in args:
return S.EmptySet
# Handle Finite sets
rv = Intersection._handle_finite_sets(args)
if rv is not None:
return rv
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - {s}
if len(other_sets) > 0:
other = Intersection(*other_sets)
return Union(*(Intersection(arg, other) for arg in s.args))
else:
return Union(*[arg for arg in s.args])
for s in args:
if s.is_Complement:
args.remove(s)
other_sets = args + [s.args[0]]
return Complement(Intersection(*other_sets), s.args[1])
from sympy.sets.handlers.intersection import intersection_sets
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - {s}:
new_set = intersection_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - {s, t}).union({new_set})
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Intersection(*args, evaluate=False)
def _handle_finite_sets(op, x, y, commutative):
# Handle finite sets:
fs_args, other = sift([x, y], lambda x: isinstance(x, FiniteSet), binary=True)
if len(fs_args) == 2:
return FiniteSet(*[op(i, j) for i in fs_args[0] for j in fs_args[1]])
elif len(fs_args) == 1:
sets = [_apply_operation(op, other[0], i, commutative) for i in fs_args[0]]
return Union(*sets)
else:
return None
def _apply_operation(op, x, y, commutative):
from sympy.sets import ImageSet
d = Dummy('d')
out = _handle_finite_sets(op, x, y, commutative)
if out is None:
out = op(x, y)
if out is None and commutative:
out = op(y, x)
if out is None:
_x, _y = symbols("x y")
if isinstance(x, Set) and not isinstance(y, Set):
out = ImageSet(Lambda(d, op(d, y)), x).doit()
elif not isinstance(x, Set) and isinstance(y, Set):
out = ImageSet(Lambda(d, op(x, d)), y).doit()
else:
out = ImageSet(Lambda((_x, _y), op(_x, _y)), x, y)
return out
def set_add(x, y):
from sympy.sets.handlers.add import _set_add
return _apply_operation(_set_add, x, y, commutative=True)
def set_sub(x, y):
from sympy.sets.handlers.add import _set_sub
return _apply_operation(_set_sub, x, y, commutative=False)
def set_mul(x, y):
from sympy.sets.handlers.mul import _set_mul
return _apply_operation(_set_mul, x, y, commutative=True)
def set_div(x, y):
from sympy.sets.handlers.mul import _set_div
return _apply_operation(_set_div, x, y, commutative=False)
def set_pow(x, y):
from sympy.sets.handlers.power import _set_pow
return _apply_operation(_set_pow, x, y, commutative=False)
def set_function(f, x):
from sympy.sets.handlers.functions import _set_function
return _set_function(f, x)
|
573ae55df015d0f3d247dbd9b0ae4ff7bf77ce20e77e3ed16148211f7e3af8a2 | from sympy.core.singleton import S
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.function import Lambda, BadSignatureError
from sympy.core.logic import fuzzy_bool
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy
from sympy.core.sympify import _sympify
from sympy.logic.boolalg import And, as_Boolean
from sympy.utilities.iterables import sift, flatten, has_dups
from sympy.utilities.exceptions import SymPyDeprecationWarning
from .contains import Contains
from .sets import Set, Union, FiniteSet
adummy = Dummy('conditionset')
class ConditionSet(Set):
r"""
Set of elements which satisfies a given condition.
.. math:: \{x \mid \textrm{condition}(x) = \texttt{True}, x \in S\}
Examples
========
>>> from sympy import Symbol, S, ConditionSet, pi, Eq, sin, Interval
>>> from sympy.abc import x, y, z
>>> sin_sols = ConditionSet(x, Eq(sin(x), 0), Interval(0, 2*pi))
>>> 2*pi in sin_sols
True
>>> pi/2 in sin_sols
False
>>> 3*pi in sin_sols
False
>>> 5 in ConditionSet(x, x**2 > 4, S.Reals)
True
If the value is not in the base set, the result is false:
>>> 5 in ConditionSet(x, x**2 > 4, Interval(2, 4))
False
Notes
=====
Symbols with assumptions should be avoided or else the
condition may evaluate without consideration of the set:
>>> n = Symbol('n', negative=True)
>>> cond = (n > 0); cond
False
>>> ConditionSet(n, cond, S.Integers)
EmptySet
Only free symbols can be changed by using `subs`:
>>> c = ConditionSet(x, x < 1, {x, z})
>>> c.subs(x, y)
ConditionSet(x, x < 1, {y, z})
To check if ``pi`` is in ``c`` use:
>>> pi in c
False
If no base set is specified, the universal set is implied:
>>> ConditionSet(x, x < 1).base_set
UniversalSet
Only symbols or symbol-like expressions can be used:
>>> ConditionSet(x + 1, x + 1 < 1, S.Integers)
Traceback (most recent call last):
...
ValueError: non-symbol dummy not recognized in condition
When the base set is a ConditionSet, the symbols will be
unified if possible with preference for the outermost symbols:
>>> ConditionSet(x, x < y, ConditionSet(z, z + y < 2, S.Integers))
ConditionSet(x, (x < y) & (x + y < 2), Integers)
"""
def __new__(cls, sym, condition, base_set=S.UniversalSet):
sym = _sympify(sym)
flat = flatten([sym])
if has_dups(flat):
raise BadSignatureError("Duplicate symbols detected")
base_set = _sympify(base_set)
if not isinstance(base_set, Set):
raise TypeError(
'base set should be a Set object, not %s' % base_set)
condition = _sympify(condition)
if isinstance(condition, FiniteSet):
condition_orig = condition
temp = (Eq(lhs, 0) for lhs in condition)
condition = And(*temp)
SymPyDeprecationWarning(
feature="Using {} for condition".format(condition_orig),
issue=17651,
deprecated_since_version='1.5',
useinstead="{} for condition".format(condition)
).warn()
condition = as_Boolean(condition)
if condition is S.true:
return base_set
if condition is S.false:
return S.EmptySet
if base_set is S.EmptySet:
return S.EmptySet
# no simple answers, so now check syms
for i in flat:
if not getattr(i, '_diff_wrt', False):
raise ValueError('`%s` is not symbol-like' % i)
if base_set.contains(sym) is S.false:
raise TypeError('sym `%s` is not in base_set `%s`' % (sym, base_set))
know = None
if isinstance(base_set, FiniteSet):
sifted = sift(
base_set, lambda _: fuzzy_bool(condition.subs(sym, _)))
if sifted[None]:
know = FiniteSet(*sifted[True])
base_set = FiniteSet(*sifted[None])
else:
return FiniteSet(*sifted[True])
if isinstance(base_set, cls):
s, c, b = base_set.args
def sig(s):
return cls(s, Eq(adummy, 0)).as_dummy().sym
sa, sb = map(sig, (sym, s))
if sa != sb:
raise BadSignatureError('sym does not match sym of base set')
reps = dict(zip(flatten([sym]), flatten([s])))
if s == sym:
condition = And(condition, c)
base_set = b
elif not c.free_symbols & sym.free_symbols:
reps = {v: k for k, v in reps.items()}
condition = And(condition, c.xreplace(reps))
base_set = b
elif not condition.free_symbols & s.free_symbols:
sym = sym.xreplace(reps)
condition = And(condition.xreplace(reps), c)
base_set = b
# flatten ConditionSet(Contains(ConditionSet())) expressions
if isinstance(condition, Contains) and (sym == condition.args[0]):
if isinstance(condition.args[1], Set):
return condition.args[1].intersect(base_set)
rv = Basic.__new__(cls, sym, condition, base_set)
return rv if know is None else Union(know, rv)
sym = property(lambda self: self.args[0])
condition = property(lambda self: self.args[1])
base_set = property(lambda self: self.args[2])
@property
def free_symbols(self):
cond_syms = self.condition.free_symbols - self.sym.free_symbols
return cond_syms | self.base_set.free_symbols
@property
def bound_symbols(self):
return flatten([self.sym])
def _contains(self, other):
def ok_sig(a, b):
tuples = [isinstance(i, Tuple) for i in (a, b)]
c = tuples.count(True)
if c == 1:
return False
if c == 0:
return True
return len(a) == len(b) and all(
ok_sig(i, j) for i, j in zip(a, b))
if not ok_sig(self.sym, other):
return S.false
# try doing base_cond first and return
# False immediately if it is False
base_cond = Contains(other, self.base_set)
if base_cond is S.false:
return S.false
# Substitute other into condition. This could raise e.g. for
# ConditionSet(x, 1/x >= 0, Reals).contains(0)
lamda = Lambda((self.sym,), self.condition)
try:
lambda_cond = lamda(other)
except TypeError:
return Contains(other, self, evaluate=False)
else:
return And(base_cond, lambda_cond)
def as_relational(self, other):
f = Lambda(self.sym, self.condition)
if isinstance(self.sym, Tuple):
f = f(*other)
else:
f = f(other)
return And(f, self.base_set.contains(other))
def _eval_subs(self, old, new):
sym, cond, base = self.args
dsym = sym.subs(old, adummy)
insym = dsym.has(adummy)
# prioritize changing a symbol in the base
newbase = base.subs(old, new)
if newbase != base:
if not insym:
cond = cond.subs(old, new)
return self.func(sym, cond, newbase)
if insym:
pass # no change of bound symbols via subs
elif getattr(new, '_diff_wrt', False):
cond = cond.subs(old, new)
else:
pass # let error about the symbol raise from __new__
return self.func(sym, cond, base)
|
6eb39c6270b463ea175c5aa10669fcdbe582ac427ad6f8e184556b36b60283ac | """Implicit plotting module for SymPy.
Explanation
===========
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted using interval arithmetic.
It is also possible to specify to use the fall back algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
.. [1] Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
.. [2] Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy.core.containers import Tuple
from sympy.core.relational import Eq
from sympy.core.symbol import (Dummy, Symbol)
from sympy.core.sympify import sympify
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points,
line_color):
super().__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
self.line_color = line_color
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
func(xinterval, yinterval)
except AttributeError:
# XXX: AttributeError("'list' object has no attribute 'is_real'")
# That needs fixing somehow - we shouldn't be catching
# AttributeError here.
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, adaptive=True, depth=0,
points=300, line_color="blue", show=True, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive`` Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 300.
- ``show`` Boolean. Default value is True. If set to False, the plot will
not be shown. See ``Plot`` for further information.
- ``title`` string. The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
Aesthetics options:
- ``line_color``: float or string. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
Default value is "Blue"
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples
========
Plot expressions:
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import plot_implicit, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p2 = plot_implicit(
... Eq(x**2 + y**2, 3), (x, -3, 3), (y, -3, 3))
With depth of recursion as argument:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p3 = plot_implicit(
... Eq(x**2 + y**2, 5), (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p4 = plot_implicit(
... Eq(x**2 + y**2, 5), (x, -5, 5), (y, -2, 2),
... adaptive=False)
Using mesh grid without using adaptive meshing with number of points
specified:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p5 = plot_implicit(
... Eq(x**2 + y**2, 5), (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p7 = plot_implicit(And(y > x, y > -x))
When plotting an expression with a single variable (y - 1, for example),
specify the x or the y variable explicitly:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p8 = plot_implicit(y - 1, y_var=y)
>>> p9 = plot_implicit(x - 1, x_var=x)
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, adaptive, depth,
points, line_color)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
|
483785a3c295aacceb010fe5c20416270a56fe01085d2bf3c5538c154dcf3b4c | """Plotting module for SymPy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from SymPy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from collections.abc import Callable
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.symbol import (Dummy, Symbol)
from sympy.core.sympify import sympify
from sympy.external import import_module
from sympy.core.function import arity
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
from sympy.utilities.exceptions import SymPyDeprecationWarning
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/testing/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
"""
Disable show(). For use in the tests.
"""
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot:
"""The central class of the plotting module.
Explanation
===========
For interactive work the function ``plot`` is better suited.
This class permits the plotting of SymPy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of SymPy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- zlabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
- backend : {'default', 'matplotlib', 'text'} or a subclass of BaseBackend
- size : optional tuple of two floats, (width, height); default: None
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : string, or float, or function, optional
Specifies the color for the plot, which depends on the backend being
used.
For example, if ``MatplotlibBackend`` is being used, then
Matplotlib string colors are acceptable ("red", "r", "cyan", "c", ...).
Alternatively, we can use a float number `0 < color < 1` wrapped in a
string (for example, `line_color="0.5"`) to specify grayscale colors.
Alternatively, We can specify a function returning a single
float value: this will be used to apply a color-loop (for example,
`line_color=lambda x: math.cos(x)`).
Note that by setting line_color, it would be applied simultaneously
to all the series.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args,
title=None, xlabel=None, ylabel=None, zlabel=None, aspect_ratio='auto',
xlim=None, ylim=None, axis_center='auto', axis=True,
xscale='linear', yscale='linear', legend=False, autoscale=True,
margin=0, annotations=None, markers=None, rectangles=None,
fill=None, backend='default', size=None, **kwargs):
super().__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
self.zlabel = zlabel
self.aspect_ratio = aspect_ratio
self.axis_center = axis_center
self.axis = axis
self.xscale = xscale
self.yscale = yscale
self.legend = legend
self.autoscale = autoscale
self.margin = margin
self.annotations = annotations
self.markers = markers
self.rectangles = rectangles
self.fill = fill
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
if isinstance(backend, str):
self.backend = plot_backends[backend]
elif (type(backend) == type) and issubclass(backend, BaseBackend):
self.backend = backend
else:
raise TypeError(
"backend must be either a string or a subclass of BaseBackend")
is_real = \
lambda lim: all(getattr(i, 'is_real', True) for i in lim)
is_finite = \
lambda lim: all(getattr(i, 'is_finite', True) for i in lim)
# reduce code repetition
def check_and_set(t_name, t):
if t:
if not is_real(t):
raise ValueError(
"All numbers from {}={} must be real".format(t_name, t))
if not is_finite(t):
raise ValueError(
"All numbers from {}={} must be finite".format(t_name, t))
setattr(self, t_name, (float(t[0]), float(t[1])))
self.xlim = None
check_and_set("xlim", xlim)
self.ylim = None
check_and_set("ylim", ylim)
self.size = None
check_and_set("size", size)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
.. plot::
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x, show=False)
>>> p2 = plot(x, show=False)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
>>> p1.show()
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
.. plot::
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x**2, show=False)
>>> p2 = plot(x, -x, show=False)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
[2]: cartesian line: -x for x over (-10.0, 10.0)
>>> p1.show()
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
class PlotGrid:
"""This class helps to plot subplots from already created SymPy plots
in a single figure.
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot, plot3d, PlotGrid
>>> x, y = symbols('x, y')
>>> p1 = plot(x, x**2, x**3, (x, -5, 5))
>>> p2 = plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
>>> p3 = plot(x**3, (x, -5, 5))
>>> p4 = plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plotting vertically in a single line:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> PlotGrid(2, 1, p1, p2)
PlotGrid object containing:
Plot[0]:Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[1]:Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
Plotting horizontally in a single line:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> PlotGrid(1, 3, p2, p3, p4)
PlotGrid object containing:
Plot[0]:Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
Plot[1]:Plot object containing:
[0]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[2]:Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Plotting in a grid form:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> PlotGrid(2, 2, p1, p2, p3, p4)
PlotGrid object containing:
Plot[0]:Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[1]:Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
Plot[2]:Plot object containing:
[0]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[3]:Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
"""
def __init__(self, nrows, ncolumns, *args, show=True, size=None, **kwargs):
"""
Parameters
==========
nrows :
The number of rows that should be in the grid of the
required subplot.
ncolumns :
The number of columns that should be in the grid
of the required subplot.
nrows and ncolumns together define the required grid.
Arguments
=========
A list of predefined plot objects entered in a row-wise sequence
i.e. plot objects which are to be in the top row of the required
grid are written first, then the second row objects and so on
Keyword arguments
=================
show : Boolean
The default value is set to ``True``. Set show to ``False`` and
the function will not display the subplot. The returned instance
of the ``PlotGrid`` class can then be used to save or display the
plot by calling the ``save()`` and ``show()`` methods
respectively.
size : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
"""
self.nrows = nrows
self.ncolumns = ncolumns
self._series = []
self.args = args
for arg in args:
self._series.append(arg._series)
self.backend = DefaultBackend
self.size = size
if show:
self.show()
def show(self):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
plot_strs = [('Plot[%d]:' % i) + str(plot)
for i, plot in enumerate(self.args)]
return 'PlotGrid object containing:\n' + '\n'.join(plot_strs)
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries:
"""Base class for the data objects containing stuff to be plotted.
Explanation
===========
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
# Different from is_contour as the colormap in backend will be
# different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super().__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super().__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_data(self):
""" Return lists of coordinates for plotting the line.
Returns
=======
x: list
List of x-coordinates
y: list
List of y-coordinates
y: list
List of z-coordinates in case of Parametric3DLineSeries
"""
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
if len(points) == 2:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
else:
x = np.repeat(points[0], 3)[2:]
y = np.repeat(points[1], 3)[:-2]
z = np.repeat(points[2], 3)[1:-1]
points = (x, y, z)
return points
def get_segments(self):
SymPyDeprecationWarning(
feature="get_segments",
issue=21329,
deprecated_since_version="1.9",
useinstead="MatplotlibBackend.get_segments").warn()
np = import_module('numpy')
points = type(self).get_data(self)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
nargs = arity(c)
if nargs == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super().__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super().__init__()
self.expr = sympify(expr)
self.label = kwargs.get('label', None) or str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
self.xscale = kwargs.get('xscale', 'linear')
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_points(self):
""" Return lists of coordinates for plotting. Depending on the
`adaptive` option, this function will either use an adaptive algorithm
or it will uniformly sample the expression over the provided range.
Returns
=======
x: list
List of x-coordinates
y: list
List of y-coordinates
Explanation
===========
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return self._uniform_sampling()
else:
f = lambdify([self.var], self.expr)
x_coords = []
y_coords = []
np = import_module('numpy')
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
# Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
if self.xscale == 'log':
xnew = 10**(np.log10(p[0]) + random * (np.log10(q[0]) -
np.log10(p[0])))
else:
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > self.depth:
x_coords.append(q[0])
y_coords.append(q[1])
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif p[1] is None and q[1] is None:
if self.xscale == 'log':
xarray = np.logspace(p[0], q[0], 10)
else:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if not all(y is None for y in yarray):
for i in range(len(yarray) - 1):
if not (yarray[i] is None and yarray[i + 1] is None):
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
# Sample further if one of the end points in None (i.e. a
# complex value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
x_coords.append(q[0])
y_coords.append(q[1])
f_start = f(self.start)
f_end = f(self.end)
x_coords.append(self.start)
y_coords.append(f_start)
sample(np.array([self.start, f_start]),
np.array([self.end, f_end]), 0)
return (x_coords, y_coords)
def _uniform_sampling(self):
np = import_module('numpy')
if self.only_integers is True:
if self.xscale == 'log':
list_x = np.logspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
if self.xscale == 'log':
list_x = np.logspace(self.start, self.end, num=self.nb_of_points)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric SymPy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super().__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = kwargs.get('label', None) or \
"(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def _uniform_sampling(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_points(self):
""" Return lists of coordinates for plotting. Depending on the
`adaptive` option, this function will either use an adaptive algorithm
or it will uniformly sample the expression over the provided range.
Returns
=======
x: list
List of x-coordinates
y: list
List of y-coordinates
Explanation
===========
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return self._uniform_sampling()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
x_coords = []
y_coords = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
# Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > self.depth:
x_coords.append(q[0])
y_coords.append(q[1])
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if not all(x is None and y is None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
# Sample further if one of the end points in None (i.e. a complex
# value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
x_coords.append(q[0])
y_coords.append(q[1])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
x_coords.append(f_start_x)
y_coords.append(f_start_y)
sample(self.start, self.end, start, end, 0)
return x_coords, y_coords
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super().__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of three parametric SymPy
expressions and a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super().__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = kwargs.get('label', None) or \
"(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
self._xlim = None
self._ylim = None
self._zlim = None
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
np = import_module('numpy')
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
list_x = np.array(list_x, dtype=np.float64)
list_y = np.array(list_y, dtype=np.float64)
list_z = np.array(list_z, dtype=np.float64)
list_x = np.ma.masked_invalid(list_x)
list_y = np.ma.masked_invalid(list_y)
list_z = np.ma.masked_invalid(list_z)
self._xlim = (np.amin(list_x), np.amax(list_x))
self._ylim = (np.amin(list_y), np.amax(list_y))
self._zlim = (np.amin(list_z), np.amax(list_z))
return list_x, list_y, list_z
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super().__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
nargs = arity(c)
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
if isinstance(self, SurfaceOver2DRangeSeries):
return c*np.ones(min(self.nb_of_points_x, self.nb_of_points_y))
else:
return c*np.ones(min(self.nb_of_points_u, self.nb_of_points_v))
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a SymPy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super().__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
self._xlim = (self.start_x, self.end_x)
self._ylim = (self.start_y, self.end_y)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
mesh_z = f(mesh_x, mesh_y)
mesh_z = np.array(mesh_z, dtype=np.float64)
mesh_z = np.ma.masked_invalid(mesh_z)
self._zlim = (np.amin(mesh_z), np.amax(mesh_z))
return mesh_x, mesh_y, mesh_z
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric SymPy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super().__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
np = import_module('numpy')
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
mesh_x = fx(mesh_u, mesh_v)
mesh_y = fy(mesh_u, mesh_v)
mesh_z = fz(mesh_u, mesh_v)
mesh_x = np.array(mesh_x, dtype=np.float64)
mesh_y = np.array(mesh_y, dtype=np.float64)
mesh_z = np.array(mesh_z, dtype=np.float64)
mesh_x = np.ma.masked_invalid(mesh_x)
mesh_y = np.ma.masked_invalid(mesh_y)
mesh_z = np.ma.masked_invalid(mesh_z)
self._xlim = (np.amin(mesh_x), np.amax(mesh_x))
self._ylim = (np.amin(mesh_y), np.amax(mesh_y))
self._zlim = (np.amin(mesh_z), np.amax(mesh_z))
return mesh_x, mesh_y, mesh_z
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
# The code is mostly repetition of SurfaceOver2DRange.
# Presently used in contour_plot function
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super().__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
self._xlim = (self.start_x, self.end_x)
self._ylim = (self.start_y, self.end_y)
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend:
"""Base class for all backends. A backend represents the plotting library,
which implements the necessary functionalities in order to use SymPy
plotting functions.
How the plotting module works:
1. Whenever a plotting function is called, the provided expressions are
processed and a list of instances of the `BaseSeries` class is created,
containing the necessary information to plot the expressions (eg the
expression, ranges, series name, ...). Eventually, these objects will
generate the numerical data to be plotted.
2. A Plot object is instantiated, which stores the list of series and the
main attributes of the plot (eg axis labels, title, ...).
3. When the "show" command is executed, a new backend is instantiated,
which loops through each series object to generate and plot the
numerical data. The backend is also going to set the axis labels, title,
..., according to the values stored in the Plot instance.
The backend should check if it supports the data series that it's given
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of data series
that it's given. Note that the current implementation of the `*Series`
classes is "matplotlib-centric": the numerical data returned by the
`get_points` and `get_meshes` methods is meant to be used directly by
Matplotlib. Therefore, the new backend will have to pre-process the
numerical data to make it compatible with the chosen plotting library.
Keep in mind that future SymPy versions may improve the `*Series` classes in
order to return numerical data "non-matplotlib-centric", hence if you code
a new backend you have the responsibility to check if its working on each
SymPy release.
Please, explore the `MatplotlibBackend` source code to understand how a
backend should be coded.
Methods
=======
In order to be used by SymPy plotting functions, a backend must implement
the following methods:
* `show(self)`: used to loop over the data series, generate the numerical
data, plot it and set the axis labels, title, ...
* save(self, path): used to save the current plot to the specified file
path.
* close(self): used to close the current plot backend (note: some plotting
library doesn't support this functionality. In that case, just raise a
warning).
See also
========
MatplotlibBackend
"""
def __init__(self, parent):
super().__init__()
self.parent = parent
def show(self):
raise NotImplementedError
def save(self, path):
raise NotImplementedError
def close(self):
raise NotImplementedError
# Don't have to check for the success of importing matplotlib in each case;
# we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
""" This class implements the functionalities to use Matplotlib with SymPy
plotting functions.
"""
def __init__(self, parent):
super().__init__(parent)
self.matplotlib = import_module('matplotlib',
import_kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
aspect = getattr(self.parent, 'aspect_ratio', 'auto')
if aspect != 'auto':
aspect = float(aspect[1]) / aspect[0]
if isinstance(self.parent, Plot):
nrows, ncolumns = 1, 1
series_list = [self.parent._series]
elif isinstance(self.parent, PlotGrid):
nrows, ncolumns = self.parent.nrows, self.parent.ncolumns
series_list = self.parent._series
self.ax = []
self.fig = self.plt.figure(figsize=parent.size)
for i, series in enumerate(series_list):
are_3D = [s.is_3D for s in series]
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend cannot mix 2D and 3D.')
elif all(are_3D):
# mpl_toolkits.mplot3d is necessary for
# projection='3d'
mpl_toolkits = import_module('mpl_toolkits', # noqa
import_kwargs={'fromlist': ['mplot3d']})
self.ax.append(self.fig.add_subplot(nrows, ncolumns, i + 1, projection='3d', aspect=aspect))
elif not any(are_3D):
self.ax.append(self.fig.add_subplot(nrows, ncolumns, i + 1, aspect=aspect))
self.ax[i].spines['left'].set_position('zero')
self.ax[i].spines['right'].set_color('none')
self.ax[i].spines['bottom'].set_position('zero')
self.ax[i].spines['top'].set_color('none')
self.ax[i].xaxis.set_ticks_position('bottom')
self.ax[i].yaxis.set_ticks_position('left')
@staticmethod
def get_segments(x, y, z=None):
""" Convert two list of coordinates to a list of segments to be used
with Matplotlib's LineCollection.
Parameters
==========
x: list
List of x-coordinates
y: list
List of y-coordinates
z: list
List of z-coordinates for a 3D line.
"""
np = import_module('numpy')
if z is not None:
dim = 3
points = (x, y, z)
else:
dim = 2
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def _process_series(self, series, ax, parent):
np = import_module('numpy')
mpl_toolkits = import_module(
'mpl_toolkits', import_kwargs={'fromlist': ['mplot3d']})
# XXX Workaround for matplotlib issue
# https://github.com/matplotlib/matplotlib/issues/17130
xlims, ylims, zlims = [], [], []
for s in series:
# Create the collections
if s.is_2Dline:
x, y = s.get_data()
if (isinstance(s.line_color, (int, float)) or
callable(s.line_color)):
segments = self.get_segments(x, y)
collection = self.LineCollection(segments)
collection.set_array(s.get_color_array())
ax.add_collection(collection)
else:
line, = ax.plot(x, y, label=s.label, color=s.line_color)
elif s.is_contour:
ax.contour(*s.get_meshes())
elif s.is_3Dline:
x, y, z = s.get_data()
if (isinstance(s.line_color, (int, float)) or
callable(s.line_color)):
art3d = mpl_toolkits.mplot3d.art3d
segments = self.get_segments(x, y, z)
collection = art3d.Line3DCollection(segments)
collection.set_array(s.get_color_array())
ax.add_collection(collection)
else:
ax.plot(x, y, z, label=s.label,
color=s.line_color)
xlims.append(s._xlim)
ylims.append(s._ylim)
zlims.append(s._zlim)
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = ax.plot_surface(x, y, z,
cmap=getattr(self.cm, 'viridis', self.cm.jet),
rstride=1, cstride=1, linewidth=0.1)
if isinstance(s.surface_color, (float, int, Callable)):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
xlims.append(s._xlim)
ylims.append(s._ylim)
zlims.append(s._zlim)
elif s.is_implicit:
points = s.get_raster()
if len(points) == 2:
# interval math plotting
x, y = _matplotlib_list(points[0])
ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
# XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
ax.contour(xarray, yarray, zarray, cmap=colormap)
else:
ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise NotImplementedError(
'{} is not supported in the SymPy plotting module '
'with matplotlib backend. Please report this issue.'
.format(ax))
Axes3D = mpl_toolkits.mplot3d.Axes3D
if not isinstance(ax, Axes3D):
ax.autoscale_view(
scalex=ax.get_autoscalex_on(),
scaley=ax.get_autoscaley_on())
else:
# XXX Workaround for matplotlib issue
# https://github.com/matplotlib/matplotlib/issues/17130
if xlims:
xlims = np.array(xlims)
xlim = (np.amin(xlims[:, 0]), np.amax(xlims[:, 1]))
ax.set_xlim(xlim)
else:
ax.set_xlim([0, 1])
if ylims:
ylims = np.array(ylims)
ylim = (np.amin(ylims[:, 0]), np.amax(ylims[:, 1]))
ax.set_ylim(ylim)
else:
ax.set_ylim([0, 1])
if zlims:
zlims = np.array(zlims)
zlim = (np.amin(zlims[:, 0]), np.amax(zlims[:, 1]))
ax.set_zlim(zlim)
else:
ax.set_zlim([0, 1])
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
if parent.xscale and not isinstance(ax, Axes3D):
ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(ax, Axes3D):
ax.set_yscale(parent.yscale)
if not isinstance(ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(ax, Axes3D):
pass
elif val == 'center':
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = ax.get_xlim()
yl, yh = ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
ax.spines['left'].set_position(pos_left)
ax.spines['bottom'].set_position(pos_bottom)
else:
ax.spines['left'].set_position(('data', val[0]))
ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
ax.set_axis_off()
if parent.legend:
if ax.legend():
ax.legend_.set_visible(parent.legend)
if parent.margin:
ax.set_xmargin(parent.margin)
ax.set_ymargin(parent.margin)
if parent.title:
ax.set_title(parent.title)
if parent.xlabel:
ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
ax.set_ylabel(parent.ylabel, position=(0, 1))
if isinstance(ax, Axes3D) and parent.zlabel:
ax.set_zlabel(parent.zlabel, position=(0, 1))
if parent.annotations:
for a in parent.annotations:
ax.annotate(**a)
if parent.markers:
for marker in parent.markers:
# make a copy of the marker dictionary
# so that it doesn't get altered
m = marker.copy()
args = m.pop('args')
ax.plot(*args, **m)
if parent.rectangles:
for r in parent.rectangles:
rect = self.matplotlib.patches.Rectangle(**r)
ax.add_patch(rect)
if parent.fill:
ax.fill_between(**parent.fill)
# xlim and ylim shoulld always be set at last so that plot limits
# doesn't get altered during the process.
if parent.xlim:
ax.set_xlim(parent.xlim)
if parent.ylim:
ax.set_ylim(parent.ylim)
def process_series(self):
"""
Iterates over every ``Plot`` object and further calls
_process_series()
"""
parent = self.parent
if isinstance(parent, Plot):
series_list = [parent._series]
else:
series_list = parent._series
for i, (series, ax) in enumerate(zip(series_list, self.ax)):
if isinstance(self.parent, PlotGrid):
parent = self.parent.args[i]
self._process_series(series, ax, parent)
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.fig.tight_layout()
self.plt.show()
else:
self.close()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super().__init__(parent)
def show(self):
if not _show:
return
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.mean(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.mean(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1:],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float64)
vector_b = (z - y).astype(np.float64)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend((None, None, None, None))
ylist.extend((None, None, None, None))
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
def plot(*args, show=True, **kwargs):
"""Plots a function of a single variable as a curve.
Parameters
==========
args :
The first argument is the expression representing the function
of single variable to be plotted.
The last argument is a 3-tuple denoting the range of the free
variable. e.g. ``(x, 0, 5)``
Typical usage examples are in the followings:
- Plotting a single expression with a single range.
``plot(expr, range, **kwargs)``
- Plotting a single expression with the default range (-10, 10).
``plot(expr, **kwargs)``
- Plotting multiple expressions with a single range.
``plot(expr1, expr2, ..., range, **kwargs)``
- Plotting multiple expressions with multiple ranges.
``plot((expr1, range1), (expr2, range2), ..., **kwargs)``
It is best practice to specify range explicitly because default
range may change in the future if a more advanced default range
detection algorithm is implemented.
show : bool, optional
The default value is set to ``True``. Set show to ``False`` and
the function will not display the plot. The returned instance of
the ``Plot`` class can then be used to save or display the plot
by calling the ``save()`` and ``show()`` methods respectively.
line_color : string, or float, or function, optional
Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
Note that by setting ``line_color``, it would be applied simultaneously
to all the series.
title : str, optional
Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
label : str, optional
The label of the expression in the plot. It will be used when
called with ``legend``. Default is the name of the expression.
e.g. ``sin(x)``
xlabel : str, optional
Label for the x-axis.
ylabel : str, optional
Label for the y-axis.
xscale : 'linear' or 'log', optional
Sets the scaling of the x-axis.
yscale : 'linear' or 'log', optional
Sets the scaling of the y-axis.
axis_center : (float, float), optional
Tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
xlim : (float, float), optional
Denotes the x-axis limits, ``(min, max)```.
ylim : (float, float), optional
Denotes the y-axis limits, ``(min, max)```.
annotations : list, optional
A list of dictionaries specifying the type of annotation
required. The keys in the dictionary should be equivalent
to the arguments of the matplotlib's annotate() function.
markers : list, optional
A list of dictionaries specifying the type the markers required.
The keys in the dictionary should be equivalent to the arguments
of the matplotlib's plot() function along with the marker
related keyworded arguments.
rectangles : list, optional
A list of dictionaries specifying the dimensions of the
rectangles to be plotted. The keys in the dictionary should be
equivalent to the arguments of the matplotlib's
patches.Rectangle class.
fill : dict, optional
A dictionary specifying the type of color filling required in
the plot. The keys in the dictionary should be equivalent to the
arguments of the matplotlib's fill_between() function.
adaptive : bool, optional
The default value is set to ``True``. Set adaptive to ``False``
and specify ``nb_of_points`` if uniform sampling is required.
The plotting uses an adaptive algorithm which samples
recursively to accurately plot. The adaptive algorithm uses a
random point near the midpoint of two points that has to be
further sampled. Hence the same plots can appear slightly
different.
depth : int, optional
Recursion depth of the adaptive algorithm. A depth of value
``n`` samples a maximum of `2^{n}` points.
If the ``adaptive`` flag is set to ``False``, this will be
ignored.
nb_of_points : int, optional
Used when the ``adaptive`` is set to ``False``. The function
is uniformly sampled at ``nb_of_points`` number of points.
If the ``adaptive`` flag is set to ``True``, this will be
ignored.
size : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot_parametric(*args, show=True, **kwargs):
"""
Plots a 2D parametric curve.
Parameters
==========
args
Common specifications are:
- Plotting a single parametric curve with a range
``plot_parametric((expr_x, expr_y), range)``
- Plotting multiple parametric curves with the same range
``plot_parametric((expr_x, expr_y), ..., range)``
- Plotting multiple parametric curves with different ranges
``plot_parametric((expr_x, expr_y, range), ...)``
``expr_x`` is the expression representing $x$ component of the
parametric function.
``expr_y`` is the expression representing $y$ component of the
parametric function.
``range`` is a 3-tuple denoting the parameter symbol, start and
stop. For example, ``(u, 0, 5)``.
If the range is not specified, then a default range of (-10, 10)
is used.
However, if the arguments are specified as
``(expr_x, expr_y, range), ...``, you must specify the ranges
for each expressions manually.
Default range may change in the future if a more advanced
algorithm is implemented.
adaptive : bool, optional
Specifies whether to use the adaptive sampling or not.
The default value is set to ``True``. Set adaptive to ``False``
and specify ``nb_of_points`` if uniform sampling is required.
depth : int, optional
The recursion depth of the adaptive algorithm. A depth of
value $n$ samples a maximum of $2^n$ points.
nb_of_points : int, optional
Used when the ``adaptive`` flag is set to ``False``.
Specifies the number of the points used for the uniform
sampling.
line_color : string, or float, or function, optional
Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
Note that by setting ``line_color``, it would be applied simultaneously
to all the series.
label : str, optional
The label of the expression in the plot. It will be used when
called with ``legend``. Default is the name of the expression.
e.g. ``sin(x)``
xlabel : str, optional
Label for the x-axis.
ylabel : str, optional
Label for the y-axis.
xscale : 'linear' or 'log', optional
Sets the scaling of the x-axis.
yscale : 'linear' or 'log', optional
Sets the scaling of the y-axis.
axis_center : (float, float), optional
Tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
xlim : (float, float), optional
Denotes the x-axis limits, ``(min, max)```.
ylim : (float, float), optional
Denotes the y-axis limits, ``(min, max)```.
size : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
A parametric plot with a single expression:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot_parametric((cos(u), sin(u)), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
A parametric plot with multiple expressions with the same range:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)), (u, -10, 10))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
A parametric plot with multiple expressions with different ranges
for each curve:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
Notes
=====
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the curve. The adaptive algorithm uses a random point
near the midpoint of two points that has to be further sampled.
Hence, repeating the same plot command can give slightly different
results because of the random sampling.
If there are multiple plots, then the same optional arguments are
applied to all the plots drawn in the same canvas. If you want to
set these options separately, you can index the returned ``Plot``
object and set it.
For example, when you specify ``line_color`` once, it would be
applied simultaneously to both series.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy import pi
>>> expr1 = (u, cos(2*pi*u)/2 + 1/2)
>>> expr2 = (u, sin(2*pi*u)/2 + 1/2)
>>> p = plot_parametric(expr1, expr2, (u, 0, 1), line_color='blue')
If you want to specify the line color for the specific series, you
should index each item and apply the property manually.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p[0].line_color = 'red'
>>> p.show()
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d_parametric_line(*args, show=True, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: string, or float, or function, optional
Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
Note that by setting ``line_color``, it would be applied simultaneously
to all the series.
``label``: str
The label to the plot. It will be used when called with ``legend=True``
to denote the function with the given label in the plot.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
``size`` : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
kwargs.setdefault("xlabel", "x")
kwargs.setdefault("ylabel", "y")
kwargs.setdefault("zlabel", "z")
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d(*args, show=True, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
``size`` : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of the
overall figure. The default value is set to ``None``, meaning the size will
be set by the default backend.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
xlabel = series[0].var_x.name
ylabel = series[0].var_y.name
kwargs.setdefault("xlabel", xlabel)
kwargs.setdefault("ylabel", ylabel)
kwargs.setdefault("zlabel", "f(%s, %s)" % (xlabel, ylabel))
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d_parametric_surface(*args, show=True, **kwargs):
"""
Plots a 3D parametric surface plot.
Explanation
===========
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
``size`` : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of the
overall figure. The default value is set to ``None``, meaning the size will
be set by the default backend.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
kwargs.setdefault("xlabel", "x")
kwargs.setdefault("ylabel", "y")
kwargs.setdefault("zlabel", "z")
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot_contour(*args, show=True, **kwargs):
"""
Draws contour plot of a function
Usage
=====
Single plot
``plot_contour(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot_contour(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_contour((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``ContourSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
``size`` : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
See Also
========
Plot, ContourSeries
"""
args = list(map(sympify, args))
plot_expr = check_arguments(args, 1, 2)
series = [ContourSeries(*arg) for arg in plot_expr]
plot_contours = Plot(*series, **kwargs)
if len(plot_expr[0].free_symbols) > 2:
raise ValueError('Contour Plot cannot Plot for more than two variables.')
if show:
plot_contours.show()
return plot_contours
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges).
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if not args:
return []
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
# Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(nb_of_free_symbols - len(free_symbols)):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
# Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
|
e2ded6ebf7ba275145c4003ffbd241b35ff48c69c0df2c8b0fba7b9039be262d | from sympy.core.numbers import Float
from sympy.core.symbol import Dummy
from sympy.utilities.lambdify import lambdify
import math
def is_valid(x):
"""Check if a floating point number is valid"""
if x is None:
return False
if isinstance(x, complex):
return False
return not math.isinf(x) and not math.isnan(x)
def rescale(y, W, H, mi, ma):
"""Rescale the given array `y` to fit into the integer values
between `0` and `H-1` for the values between ``mi`` and ``ma``.
"""
y_new = list()
norm = ma - mi
offset = (ma + mi) / 2
for x in range(W):
if is_valid(y[x]):
normalized = (y[x] - offset) / norm
if not is_valid(normalized):
y_new.append(None)
else:
rescaled = Float((normalized*H + H/2) * (H-1)/H).round()
rescaled = int(rescaled)
y_new.append(rescaled)
else:
y_new.append(None)
return y_new
def linspace(start, stop, num):
return [start + (stop - start) * x / (num-1) for x in range(num)]
def textplot_str(expr, a, b, W=55, H=21):
"""Generator for the lines of the plot"""
free = expr.free_symbols
if len(free) > 1:
raise ValueError(
"The expression must have a single variable. (Got {})"
.format(free))
x = free.pop() if free else Dummy()
f = lambdify([x], expr)
a = float(a)
b = float(b)
# Calculate function values
x = linspace(a, b, W)
y = list()
for val in x:
try:
y.append(f(val))
# Not sure what exceptions to catch here or why...
except (ValueError, TypeError, ZeroDivisionError):
y.append(None)
# Normalize height to screen space
y_valid = list(filter(is_valid, y))
if y_valid:
ma = max(y_valid)
mi = min(y_valid)
if ma == mi:
if ma:
mi, ma = sorted([0, 2*ma])
else:
mi, ma = -1, 1
else:
mi, ma = -1, 1
y_range = ma - mi
precision = math.floor(math.log(y_range, 10)) - 1
precision *= -1
mi = round(mi, precision)
ma = round(ma, precision)
y = rescale(y, W, H, mi, ma)
y_bins = linspace(mi, ma, H)
# Draw plot
margin = 7
for h in range(H - 1, -1, -1):
s = [' '] * W
for i in range(W):
if y[i] == h:
if (i == 0 or y[i - 1] == h - 1) and (i == W - 1 or y[i + 1] == h + 1):
s[i] = '/'
elif (i == 0 or y[i - 1] == h + 1) and (i == W - 1 or y[i + 1] == h - 1):
s[i] = '\\'
else:
s[i] = '.'
if h == 0:
for i in range(W):
s[i] = '_'
# Print y values
if h in (0, H//2, H - 1):
prefix = ("%g" % y_bins[h]).rjust(margin)[:margin]
else:
prefix = " "*margin
s = "".join(s)
if h == H//2:
s = s.replace(" ", "-")
yield prefix + " |" + s
# Print x values
bottom = " " * (margin + 2)
bottom += ("%g" % x[0]).ljust(W//2)
if W % 2 == 1:
bottom += ("%g" % x[W//2]).ljust(W//2)
else:
bottom += ("%g" % x[W//2]).ljust(W//2-1)
bottom += "%g" % x[-1]
yield bottom
def textplot(expr, a, b, W=55, H=21):
r"""
Print a crude ASCII art plot of the SymPy expression 'expr' (which
should contain a single symbol, e.g. x or something else) over the
interval [a, b].
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.plotting import textplot
>>> t = Symbol('t')
>>> textplot(sin(t)*t, 0, 15)
14 | ...
| .
| .
| .
| .
| ...
| / . .
| /
| / .
| . . .
1.5 |----.......--------------------------------------------
|.... \ . .
| \ / .
| .. / .
| \ / .
| ....
| .
| . .
|
| . .
-11 |_______________________________________________________
0 7.5 15
"""
for line in textplot_str(expr, a, b, W, H):
print(line)
|
1a838a7b92e8d5b1a96bce83a8178f2b074618b0f827fd592149d1e415cfb4fc | """ rewrite of lambdify - This stuff is not stable at all.
It is for internal use in the new plotting module.
It may (will! see the Q'n'A in the source) be rewritten.
It's completely self contained. Especially it does not use lambdarepr.
It does not aim to replace the current lambdify. Most importantly it will never
ever support anything else than SymPy expressions (no Matrices, dictionaries
and so on).
"""
import re
from sympy.core.numbers import (I, NumberSymbol, oo, zoo)
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import numbered_symbols
# We parse the expression string into a tree that identifies functions. Then
# we translate the names of the functions and we translate also some strings
# that are not names of functions (all this according to translation
# dictionaries).
# If the translation goes to another module (like numpy) the
# module is imported and 'func' is translated to 'module.func'.
# If a function can not be translated, the inner nodes of that part of the
# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not
# translated to np.sqrt and the Integral does not crash.
# A namespace for all this is generated by crawling the (func, args) tree of
# the expression. The creation of this namespace involves many ugly
# workarounds.
# The namespace consists of all the names needed for the SymPy expression and
# all the name of modules used for translation. Those modules are imported only
# as a name (import numpy as np) in order to keep the namespace small and
# manageable.
# Please, if there is a bug, do not try to fix it here! Rewrite this by using
# the method proposed in the last Q'n'A below. That way the new function will
# work just as well, be just as simple, but it wont need any new workarounds.
# If you insist on fixing it here, look at the workarounds in the function
# sympy_expression_namespace and in lambdify.
# Q: Why are you not using Python abstract syntax tree?
# A: Because it is more complicated and not much more powerful in this case.
# Q: What if I have Symbol('sin') or g=Function('f')?
# A: You will break the algorithm. We should use srepr to defend against this?
# The problem with Symbol('sin') is that it will be printed as 'sin'. The
# parser will distinguish it from the function 'sin' because functions are
# detected thanks to the opening parenthesis, but the lambda expression won't
# understand the difference if we have also the sin function.
# The solution (complicated) is to use srepr and maybe ast.
# The problem with the g=Function('f') is that it will be printed as 'f' but in
# the global namespace we have only 'g'. But as the same printer is used in the
# constructor of the namespace there will be no problem.
# Q: What if some of the printers are not printing as expected?
# A: The algorithm wont work. You must use srepr for those cases. But even
# srepr may not print well. All problems with printers should be considered
# bugs.
# Q: What about _imp_ functions?
# A: Those are taken care for by evalf. A special case treatment will work
# faster but it's not worth the code complexity.
# Q: Will ast fix all possible problems?
# A: No. You will always have to use some printer. Even srepr may not work in
# some cases. But if the printer does not work, that should be considered a
# bug.
# Q: Is there same way to fix all possible problems?
# A: Probably by constructing our strings ourself by traversing the (func,
# args) tree and creating the namespace at the same time. That actually sounds
# good.
from sympy.external import import_module
import warnings
#TODO debugging output
class vectorized_lambdify:
""" Return a sufficiently smart, vectorized and lambdified function.
Returns only reals.
Explanation
===========
This function uses experimental_lambdify to created a lambdified
expression ready to be used with numpy. Many of the functions in SymPy
are not implemented in numpy so in some cases we resort to Python cmath or
even to evalf.
The following translations are tried:
only numpy complex
- on errors raised by SymPy trying to work with ndarray:
only Python cmath and then vectorize complex128
When using Python cmath there is no need for evalf or float/complex
because Python cmath calls those.
This function never tries to mix numpy directly with evalf because numpy
does not understand SymPy Float. If this is needed one can use the
float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or
better one can be explicit about the dtypes that numpy works with.
Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what
types of errors to expect.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.np = import_module('numpy')
self.lambda_func_1 = experimental_lambdify(
args, expr, use_np=True)
self.vector_func_1 = self.lambda_func_1
self.lambda_func_2 = experimental_lambdify(
args, expr, use_python_cmath=True)
self.vector_func_2 = self.np.vectorize(
self.lambda_func_2, otypes=[complex])
self.vector_func = self.vector_func_1
self.failure = False
def __call__(self, *args):
np = self.np
try:
temp_args = (np.array(a, dtype=complex) for a in args)
results = self.vector_func(*temp_args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
return results
except ValueError:
if self.failure:
raise
self.failure = True
self.vector_func = self.vector_func_2
warnings.warn(
'The evaluation of the expression is problematic. '
'We are trying a failback method that may still work. '
'Please report this as a bug.')
return self.__call__(*args)
class lambdify:
"""Returns the lambdified function.
Explanation
===========
This function uses experimental_lambdify to create a lambdified
expression. It uses cmath to lambdify the expression. If the function
is not implemented in Python cmath, Python cmath calls evalf on those
functions.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func_1 = experimental_lambdify(
args, expr, use_python_cmath=True, use_evalf=True)
self.lambda_func_2 = experimental_lambdify(
args, expr, use_python_math=True, use_evalf=True)
self.lambda_func_3 = experimental_lambdify(
args, expr, use_evalf=True, complex_wrap_evalf=True)
self.lambda_func = self.lambda_func_1
self.failure = False
def __call__(self, args):
try:
#The result can be sympy.Float. Hence wrap it with complex type.
result = complex(self.lambda_func(args))
if abs(result.imag) > 1e-7 * abs(result):
return None
return result.real
except (ZeroDivisionError, OverflowError):
return None
except TypeError as e:
if self.failure:
raise e
if self.lambda_func == self.lambda_func_1:
self.lambda_func = self.lambda_func_2
return self.__call__(args)
self.failure = True
self.lambda_func = self.lambda_func_3
warnings.warn(
'The evaluation of the expression is problematic. '
'We are trying a failback method that may still work. '
'Please report this as a bug.')
return self.__call__(args)
def experimental_lambdify(*args, **kwargs):
l = Lambdifier(*args, **kwargs)
return l
class Lambdifier:
def __init__(self, args, expr, print_lambda=False, use_evalf=False,
float_wrap_evalf=False, complex_wrap_evalf=False,
use_np=False, use_python_math=False, use_python_cmath=False,
use_interval=False):
self.print_lambda = print_lambda
self.use_evalf = use_evalf
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
self.use_np = use_np
self.use_python_math = use_python_math
self.use_python_cmath = use_python_cmath
self.use_interval = use_interval
# Constructing the argument string
# - check
if not all(isinstance(a, Symbol) for a in args):
raise ValueError('The arguments must be Symbols.')
# - use numbered symbols
syms = numbered_symbols(exclude=expr.free_symbols)
newargs = [next(syms) for _ in args]
expr = expr.xreplace(dict(zip(args, newargs)))
argstr = ', '.join([str(a) for a in newargs])
del syms, newargs, args
# Constructing the translation dictionaries and making the translation
self.dict_str = self.get_dict_str()
self.dict_fun = self.get_dict_fun()
exprstr = str(expr)
newexpr = self.tree2str_translate(self.str2tree(exprstr))
# Constructing the namespaces
namespace = {}
namespace.update(self.sympy_atoms_namespace(expr))
namespace.update(self.sympy_expression_namespace(expr))
# XXX Workaround
# Ugly workaround because Pow(a,Half) prints as sqrt(a)
# and sympy_expression_namespace can not catch it.
from sympy.functions.elementary.miscellaneous import sqrt
namespace.update({'sqrt': sqrt})
namespace.update({'Eq': lambda x, y: x == y})
namespace.update({'Ne': lambda x, y: x != y})
# End workaround.
if use_python_math:
namespace.update({'math': __import__('math')})
if use_python_cmath:
namespace.update({'cmath': __import__('cmath')})
if use_np:
try:
namespace.update({'np': __import__('numpy')})
except ImportError:
raise ImportError(
'experimental_lambdify failed to import numpy.')
if use_interval:
namespace.update({'imath': __import__(
'sympy.plotting.intervalmath', fromlist=['intervalmath'])})
namespace.update({'math': __import__('math')})
# Construct the lambda
if self.print_lambda:
print(newexpr)
eval_str = 'lambda %s : ( %s )' % (argstr, newexpr)
self.eval_str = eval_str
exec("from __future__ import division; MYNEWLAMBDA = %s" % eval_str, namespace)
self.lambda_func = namespace['MYNEWLAMBDA']
def __call__(self, *args, **kwargs):
return self.lambda_func(*args, **kwargs)
##############################################################################
# Dicts for translating from SymPy to other modules
##############################################################################
###
# builtins
###
# Functions with different names in builtins
builtin_functions_different = {
'Min': 'min',
'Max': 'max',
'Abs': 'abs',
}
# Strings that should be translated
builtin_not_functions = {
'I': '1j',
# 'oo': '1e400',
}
###
# numpy
###
# Functions that are the same in numpy
numpy_functions_same = [
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'exp', 'log',
'sqrt', 'floor', 'conjugate',
]
# Functions with different names in numpy
numpy_functions_different = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"im": "imag",
"ln": "log",
"Max": "amax",
"Min": "amin",
"re": "real",
"Abs": "abs",
}
# Strings that should be translated
numpy_not_functions = {
'pi': 'np.pi',
'oo': 'np.inf',
'E': 'np.e',
}
###
# Python math
###
# Functions that are the same in math
math_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'erf', 'sqrt', 'floor', 'factorial', 'gamma',
]
# Functions with different names in math
math_functions_different = {
'ceiling': 'ceil',
'ln': 'log',
'loggamma': 'lgamma'
}
# Strings that should be translated
math_not_functions = {
'pi': 'math.pi',
'E': 'math.e',
}
###
# Python cmath
###
# Functions that are the same in cmath
cmath_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'sqrt',
]
# Functions with different names in cmath
cmath_functions_different = {
'ln': 'log',
'arg': 'phase',
}
# Strings that should be translated
cmath_not_functions = {
'pi': 'cmath.pi',
'E': 'cmath.e',
}
###
# intervalmath
###
interval_not_functions = {
'pi': 'math.pi',
'E': 'math.e'
}
interval_functions_same = [
'sin', 'cos', 'exp', 'tan', 'atan', 'log',
'sqrt', 'cosh', 'sinh', 'tanh', 'floor',
'acos', 'asin', 'acosh', 'asinh', 'atanh',
'Abs', 'And', 'Or'
]
interval_functions_different = {
'Min': 'imin',
'Max': 'imax',
'ceiling': 'ceil',
}
###
# mpmath, etc
###
#TODO
###
# Create the final ordered tuples of dictionaries
###
# For strings
def get_dict_str(self):
dict_str = dict(self.builtin_not_functions)
if self.use_np:
dict_str.update(self.numpy_not_functions)
if self.use_python_math:
dict_str.update(self.math_not_functions)
if self.use_python_cmath:
dict_str.update(self.cmath_not_functions)
if self.use_interval:
dict_str.update(self.interval_not_functions)
return dict_str
# For functions
def get_dict_fun(self):
dict_fun = dict(self.builtin_functions_different)
if self.use_np:
for s in self.numpy_functions_same:
dict_fun[s] = 'np.' + s
for k, v in self.numpy_functions_different.items():
dict_fun[k] = 'np.' + v
if self.use_python_math:
for s in self.math_functions_same:
dict_fun[s] = 'math.' + s
for k, v in self.math_functions_different.items():
dict_fun[k] = 'math.' + v
if self.use_python_cmath:
for s in self.cmath_functions_same:
dict_fun[s] = 'cmath.' + s
for k, v in self.cmath_functions_different.items():
dict_fun[k] = 'cmath.' + v
if self.use_interval:
for s in self.interval_functions_same:
dict_fun[s] = 'imath.' + s
for k, v in self.interval_functions_different.items():
dict_fun[k] = 'imath.' + v
return dict_fun
##############################################################################
# The translator functions, tree parsers, etc.
##############################################################################
def str2tree(self, exprstr):
"""Converts an expression string to a tree.
Explanation
===========
Functions are represented by ('func_name(', tree_of_arguments).
Other expressions are (head_string, mid_tree, tail_str).
Expressions that do not contain functions are directly returned.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> str2tree(str(Integral(x, (x, 1, y))))
('', ('Integral(', 'x, (x, 1, y)'), ')')
>>> str2tree(str(x+y))
'x + y'
>>> str2tree(str(x+y*sin(z)+1))
('x + y*', ('sin(', 'z'), ') + 1')
>>> str2tree('sin(y*(y + 1.1) + (sin(y)))')
('', ('sin(', ('y*(y + 1.1) + (', ('sin(', 'y'), '))')), ')')
"""
#matches the first 'function_name('
first_par = re.search(r'(\w+\()', exprstr)
if first_par is None:
return exprstr
else:
start = first_par.start()
end = first_par.end()
head = exprstr[:start]
func = exprstr[start:end]
tail = exprstr[end:]
count = 0
for i, c in enumerate(tail):
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == -1:
break
func_tail = self.str2tree(tail[:i])
tail = self.str2tree(tail[i:])
return (head, (func, func_tail), tail)
@classmethod
def tree2str(cls, tree):
"""Converts a tree to string without translations.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> tree2str = Lambdifier([x], x).tree2str
>>> tree2str(str2tree(str(x+y*sin(z)+1)))
'x + y*sin(z) + 1'
"""
if isinstance(tree, str):
return tree
else:
return ''.join(map(cls.tree2str, tree))
def tree2str_translate(self, tree):
"""Converts a tree to string with translations.
Explanation
===========
Function names are translated by translate_func.
Other strings are translated by translate_str.
"""
if isinstance(tree, str):
return self.translate_str(tree)
elif isinstance(tree, tuple) and len(tree) == 2:
return self.translate_func(tree[0][:-1], tree[1])
else:
return ''.join([self.tree2str_translate(t) for t in tree])
def translate_str(self, estr):
"""Translate substrings of estr using in order the dictionaries in
dict_tuple_str."""
for pattern, repl in self.dict_str.items():
estr = re.sub(pattern, repl, estr)
return estr
def translate_func(self, func_name, argtree):
"""Translate function names and the tree of arguments.
Explanation
===========
If the function name is not in the dictionaries of dict_tuple_fun then the
function is surrounded by a float((...).evalf()).
The use of float is necessary as np.<function>(sympy.Float(..)) raises an
error."""
if func_name in self.dict_fun:
new_name = self.dict_fun[func_name]
argstr = self.tree2str_translate(argtree)
return new_name + '(' + argstr
elif func_name in ['Eq', 'Ne']:
op = {'Eq': '==', 'Ne': '!='}
return "(lambda x, y: x {} y)({}".format(op[func_name], self.tree2str_translate(argtree))
else:
template = '(%s(%s)).evalf(' if self.use_evalf else '%s(%s'
if self.float_wrap_evalf:
template = 'float(%s)' % template
elif self.complex_wrap_evalf:
template = 'complex(%s)' % template
# Wrapping should only happen on the outermost expression, which
# is the only thing we know will be a number.
float_wrap_evalf = self.float_wrap_evalf
complex_wrap_evalf = self.complex_wrap_evalf
self.float_wrap_evalf = False
self.complex_wrap_evalf = False
ret = template % (func_name, self.tree2str_translate(argtree))
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
return ret
##############################################################################
# The namespace constructors
##############################################################################
@classmethod
def sympy_expression_namespace(cls, expr):
"""Traverses the (func, args) tree of an expression and creates a SymPy
namespace. All other modules are imported only as a module name. That way
the namespace is not polluted and rests quite small. It probably causes much
more variable lookups and so it takes more time, but there are no tests on
that for the moment."""
if expr is None:
return {}
else:
funcname = str(expr.func)
# XXX Workaround
# Here we add an ugly workaround because str(func(x))
# is not always the same as str(func). Eg
# >>> str(Integral(x))
# "Integral(x)"
# >>> str(Integral)
# "<class 'sympy.integrals.integrals.Integral'>"
# >>> str(sqrt(x))
# "sqrt(x)"
# >>> str(sqrt)
# "<function sqrt at 0x3d92de8>"
# >>> str(sin(x))
# "sin(x)"
# >>> str(sin)
# "sin"
# Either one of those can be used but not all at the same time.
# The code considers the sin example as the right one.
regexlist = [
r'<class \'sympy[\w.]*?.([\w]*)\'>$',
# the example Integral
r'<function ([\w]*) at 0x[\w]*>$', # the example sqrt
]
for r in regexlist:
m = re.match(r, funcname)
if m is not None:
funcname = m.groups()[0]
# End of the workaround
# XXX debug: print funcname
args_dict = {}
for a in expr.args:
if (isinstance(a, Symbol) or
isinstance(a, NumberSymbol) or
a in [I, zoo, oo]):
continue
else:
args_dict.update(cls.sympy_expression_namespace(a))
args_dict.update({funcname: expr.func})
return args_dict
@staticmethod
def sympy_atoms_namespace(expr):
"""For no real reason this function is separated from
sympy_expression_namespace. It can be moved to it."""
atoms = expr.atoms(Symbol, NumberSymbol, I, zoo, oo)
d = {}
for a in atoms:
# XXX debug: print 'atom:' + str(a)
d[str(a)] = a
return d
|
418aa5ee391014fe0957b224720650d483e996627d682e472388cc5356def83d | from sympy.concrete.summations import Sum
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.function import Lambda
from sympy.core.numbers import (Rational, nan, oo, pi)
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.combinatorial.factorials import (FallingFactorial, binomial)
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.functions.special.delta_functions import DiracDelta
from sympy.integrals.integrals import integrate
from sympy.logic.boolalg import (And, Or)
from sympy.matrices.dense import Matrix
from sympy.sets.sets import Interval
from sympy.tensor.indexed import Indexed
from sympy.stats import (Die, Normal, Exponential, FiniteRV, P, E, H, variance,
density, given, independent, dependent, where, pspace, GaussianUnitaryEnsemble,
random_symbols, sample, Geometric, factorial_moment, Binomial, Hypergeometric,
DiscreteUniform, Poisson, characteristic_function, moment_generating_function,
BernoulliProcess, Variance, Expectation, Probability, Covariance, covariance, cmoment,
moment, median)
from sympy.stats.rv import (IndependentProductPSpace, rs_swap, Density, NamedArgsMixin,
RandomSymbol, sample_iter, PSpace, is_random, RandomIndexedSymbol, RandomMatrixSymbol)
from sympy.testing.pytest import raises, skip, XFAIL
from sympy.external import import_module
from sympy.core.numbers import comp
from sympy.stats.frv_types import BernoulliDistribution
from sympy.core.symbol import Dummy
from sympy.functions.elementary.piecewise import Piecewise
def test_where():
X, Y = Die('X'), Die('Y')
Z = Normal('Z', 0, 1)
assert where(Z**2 <= 1).set == Interval(-1, 1)
assert where(Z**2 <= 1).as_boolean() == Interval(-1, 1).as_relational(Z.symbol)
assert where(And(X > Y, Y > 4)).as_boolean() == And(
Eq(X.symbol, 6), Eq(Y.symbol, 5))
assert len(where(X < 3).set) == 2
assert 1 in where(X < 3).set
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
assert where(And(X**2 <= 1, X >= 0)).set == Interval(0, 1)
XX = given(X, And(X**2 <= 1, X >= 0))
assert XX.pspace.domain.set == Interval(0, 1)
assert XX.pspace.domain.as_boolean() == \
And(0 <= X.symbol, X.symbol**2 <= 1, -oo < X.symbol, X.symbol < oo)
with raises(TypeError):
XX = given(X, X + 3)
def test_random_symbols():
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
assert set(random_symbols(2*X + 1)) == {X}
assert set(random_symbols(2*X + Y)) == {X, Y}
assert set(random_symbols(2*X + Y.symbol)) == {X}
assert set(random_symbols(2)) == set()
def test_characteristic_function():
# Imports I from sympy
from sympy.core.numbers import I
X = Normal('X',0,1)
Y = DiscreteUniform('Y', [1,2,7])
Z = Poisson('Z', 2)
t = symbols('_t')
P = Lambda(t, exp(-t**2/2))
Q = Lambda(t, exp(7*t*I)/3 + exp(2*t*I)/3 + exp(t*I)/3)
R = Lambda(t, exp(2 * exp(t*I) - 2))
assert characteristic_function(X).dummy_eq(P)
assert characteristic_function(Y).dummy_eq(Q)
assert characteristic_function(Z).dummy_eq(R)
def test_moment_generating_function():
X = Normal('X',0,1)
Y = DiscreteUniform('Y', [1,2,7])
Z = Poisson('Z', 2)
t = symbols('_t')
P = Lambda(t, exp(t**2/2))
Q = Lambda(t, (exp(7*t)/3 + exp(2*t)/3 + exp(t)/3))
R = Lambda(t, exp(2 * exp(t) - 2))
assert moment_generating_function(X).dummy_eq(P)
assert moment_generating_function(Y).dummy_eq(Q)
assert moment_generating_function(Z).dummy_eq(R)
def test_sample_iter():
X = Normal('X',0,1)
Y = DiscreteUniform('Y', [1, 2, 7])
Z = Poisson('Z', 2)
scipy = import_module('scipy')
if not scipy:
skip('Scipy is not installed. Abort tests')
expr = X**2 + 3
iterator = sample_iter(expr)
expr2 = Y**2 + 5*Y + 4
iterator2 = sample_iter(expr2)
expr3 = Z**3 + 4
iterator3 = sample_iter(expr3)
def is_iterator(obj):
if (
hasattr(obj, '__iter__') and
(hasattr(obj, 'next') or
hasattr(obj, '__next__')) and
callable(obj.__iter__) and
obj.__iter__() is obj
):
return True
else:
return False
assert is_iterator(iterator)
assert is_iterator(iterator2)
assert is_iterator(iterator3)
def test_pspace():
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
x = Symbol('x')
raises(ValueError, lambda: pspace(5 + 3))
raises(ValueError, lambda: pspace(x < 1))
assert pspace(X) == X.pspace
assert pspace(2*X + 1) == X.pspace
assert pspace(2*X + Y) == IndependentProductPSpace(Y.pspace, X.pspace)
def test_rs_swap():
X = Normal('x', 0, 1)
Y = Exponential('y', 1)
XX = Normal('x', 0, 2)
YY = Normal('y', 0, 3)
expr = 2*X + Y
assert expr.subs(rs_swap((X, Y), (YY, XX))) == 2*XX + YY
def test_RandomSymbol():
X = Normal('x', 0, 1)
Y = Normal('x', 0, 2)
assert X.symbol == Y.symbol
assert X != Y
assert X.name == X.symbol.name
X = Normal('lambda', 0, 1) # make sure we can use protected terms
X = Normal('Lambda', 0, 1) # make sure we can use SymPy terms
def test_RandomSymbol_diff():
X = Normal('x', 0, 1)
assert (2*X).diff(X)
def test_random_symbol_no_pspace():
x = RandomSymbol(Symbol('x'))
assert x.pspace == PSpace()
def test_overlap():
X = Normal('x', 0, 1)
Y = Normal('x', 0, 2)
raises(ValueError, lambda: P(X > Y))
def test_IndependentProductPSpace():
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 1)
px = X.pspace
py = Y.pspace
assert pspace(X + Y) == IndependentProductPSpace(px, py)
assert pspace(X + Y) == IndependentProductPSpace(py, px)
def test_E():
assert E(5) == 5
def test_H():
X = Normal('X', 0, 1)
D = Die('D', sides = 4)
G = Geometric('G', 0.5)
assert H(X, X > 0) == -log(2)/2 + S.Half + log(pi)/2
assert H(D, D > 2) == log(2)
assert comp(H(G).evalf().round(2), 1.39)
def test_Sample():
X = Die('X', 6)
Y = Normal('Y', 0, 1)
z = Symbol('z', integer=True)
scipy = import_module('scipy')
if not scipy:
skip('Scipy is not installed. Abort tests')
assert sample(X) in [1, 2, 3, 4, 5, 6]
assert isinstance(sample(X + Y), float)
assert P(X + Y > 0, Y < 0, numsamples=10).is_number
assert E(X + Y, numsamples=10).is_number
assert E(X**2 + Y, numsamples=10).is_number
assert E((X + Y)**2, numsamples=10).is_number
assert variance(X + Y, numsamples=10).is_number
raises(TypeError, lambda: P(Y > z, numsamples=5))
assert P(sin(Y) <= 1, numsamples=10) == 1
assert P(sin(Y) <= 1, cos(Y) < 1, numsamples=10) == 1
assert all(i in range(1, 7) for i in density(X, numsamples=10))
assert all(i in range(4, 7) for i in density(X, X>3, numsamples=10))
numpy = import_module('numpy')
if not numpy:
skip('Numpy is not installed. Abort tests')
#Test Issue #21563: Output of sample must be a float or array
assert isinstance(sample(X), (numpy.int32, numpy.int64))
assert isinstance(sample(Y), numpy.float64)
assert isinstance(sample(X, size=2), numpy.ndarray)
@XFAIL
def test_samplingE():
scipy = import_module('scipy')
if not scipy:
skip('Scipy is not installed. Abort tests')
Y = Normal('Y', 0, 1)
z = Symbol('z', integer=True)
assert E(Sum(1/z**Y, (z, 1, oo)), Y > 2, numsamples=3).is_number
def test_given():
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 1)
A = given(X, True)
B = given(X, Y > 2)
assert X == A == B
def test_factorial_moment():
X = Poisson('X', 2)
Y = Binomial('Y', 2, S.Half)
Z = Hypergeometric('Z', 4, 2, 2)
assert factorial_moment(X, 2) == 4
assert factorial_moment(Y, 2) == S.Half
assert factorial_moment(Z, 2) == Rational(1, 3)
x, y, z, l = symbols('x y z l')
Y = Binomial('Y', 2, y)
Z = Hypergeometric('Z', 10, 2, 3)
assert factorial_moment(Y, l) == y**2*FallingFactorial(
2, l) + 2*y*(1 - y)*FallingFactorial(1, l) + (1 - y)**2*\
FallingFactorial(0, l)
assert factorial_moment(Z, l) == 7*FallingFactorial(0, l)/\
15 + 7*FallingFactorial(1, l)/15 + FallingFactorial(2, l)/15
def test_dependence():
X, Y = Die('X'), Die('Y')
assert independent(X, 2*Y)
assert not dependent(X, 2*Y)
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
assert independent(X, Y)
assert dependent(X, 2*X)
# Create a dependency
XX, YY = given(Tuple(X, Y), Eq(X + Y, 3))
assert dependent(XX, YY)
def test_dependent_finite():
X, Y = Die('X'), Die('Y')
# Dependence testing requires symbolic conditions which currently break
# finite random variables
assert dependent(X, Y + X)
XX, YY = given(Tuple(X, Y), X + Y > 5) # Create a dependency
assert dependent(XX, YY)
def test_normality():
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
x = Symbol('x', real=True, finite=True)
z = Symbol('z', real=True, finite=True)
dens = density(X - Y, Eq(X + Y, z))
assert integrate(dens(x), (x, -oo, oo)) == 1
def test_Density():
X = Die('X', 6)
d = Density(X)
assert d.doit() == density(X)
def test_NamedArgsMixin():
class Foo(Basic, NamedArgsMixin):
_argnames = 'foo', 'bar'
a = Foo(1, 2)
assert a.foo == 1
assert a.bar == 2
raises(AttributeError, lambda: a.baz)
class Bar(Basic, NamedArgsMixin):
pass
raises(AttributeError, lambda: Bar(1, 2).foo)
def test_density_constant():
assert density(3)(2) == 0
assert density(3)(3) == DiracDelta(0)
def test_cmoment_constant():
assert variance(3) == 0
assert cmoment(3, 3) == 0
assert cmoment(3, 4) == 0
x = Symbol('x')
assert variance(x) == 0
assert cmoment(x, 15) == 0
assert cmoment(x, 0) == 1
def test_moment_constant():
assert moment(3, 0) == 1
assert moment(3, 1) == 3
assert moment(3, 2) == 9
x = Symbol('x')
assert moment(x, 2) == x**2
def test_median_constant():
assert median(3) == 3
x = Symbol('x')
assert median(x) == x
def test_real():
x = Normal('x', 0, 1)
assert x.is_real
def test_issue_10052():
X = Exponential('X', 3)
assert P(X < oo) == 1
assert P(X > oo) == 0
assert P(X < 2, X > oo) == 0
assert P(X < oo, X > oo) == 0
assert P(X < oo, X > 2) == 1
assert P(X < 3, X == 2) == 0
raises(ValueError, lambda: P(1))
raises(ValueError, lambda: P(X < 1, 2))
def test_issue_11934():
density = {0: .5, 1: .5}
X = FiniteRV('X', density)
assert E(X) == 0.5
assert P( X>= 2) == 0
def test_issue_8129():
X = Exponential('X', 4)
assert P(X >= X) == 1
assert P(X > X) == 0
assert P(X > X+1) == 0
def test_issue_12237():
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 1)
U = P(X > 0, X)
V = P(Y < 0, X)
W = P(X + Y > 0, X)
assert W == P(X + Y > 0, X)
assert U == BernoulliDistribution(S.Half, S.Zero, S.One)
assert V == S.Half
def test_is_random():
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 1)
a, b = symbols('a, b')
G = GaussianUnitaryEnsemble('U', 2)
B = BernoulliProcess('B', 0.9)
assert not is_random(a)
assert not is_random(a + b)
assert not is_random(a * b)
assert not is_random(Matrix([a**2, b**2]))
assert is_random(X)
assert is_random(X**2 + Y)
assert is_random(Y + b**2)
assert is_random(Y > 5)
assert is_random(B[3] < 1)
assert is_random(G)
assert is_random(X * Y * B[1])
assert is_random(Matrix([[X, B[2]], [G, Y]]))
assert is_random(Eq(X, 4))
def test_issue_12283():
x = symbols('x')
X = RandomSymbol(x)
Y = RandomSymbol('Y')
Z = RandomMatrixSymbol('Z', 2, 1)
W = RandomMatrixSymbol('W', 2, 1)
RI = RandomIndexedSymbol(Indexed('RI', 3))
assert pspace(Z) == PSpace()
assert pspace(RI) == PSpace()
assert pspace(X) == PSpace()
assert E(X) == Expectation(X)
assert P(Y > 3) == Probability(Y > 3)
assert variance(X) == Variance(X)
assert variance(RI) == Variance(RI)
assert covariance(X, Y) == Covariance(X, Y)
assert covariance(W, Z) == Covariance(W, Z)
def test_issue_6810():
X = Die('X', 6)
Y = Normal('Y', 0, 1)
assert P(Eq(X, 2)) == S(1)/6
assert P(Eq(Y, 0)) == 0
assert P(Or(X > 2, X < 3)) == 1
assert P(And(X > 3, X > 2)) == S(1)/2
def test_issue_20286():
n, p = symbols('n p')
B = Binomial('B', n, p)
k = Dummy('k', integer = True)
eq = Sum(Piecewise((-p**k*(1 - p)**(-k + n)*log(p**k*(1 - p)**(-k + n)*binomial(n, k))*binomial(n, k), (k >= 0) & (k <= n)), (nan, True)), (k, 0, n))
assert eq.dummy_eq(H(B))
|
247620638b27145e6df6a72741f29a379cda1ac3bbfd2b8d47b4eadb7e16e0da | from sympy.concrete.summations import Sum
from sympy.core.containers import (Dict, Tuple)
from sympy.core.function import Function
from sympy.core.numbers import (I, Rational, nan)
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, symbols)
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.factorials import binomial
from sympy.functions.combinatorial.numbers import harmonic
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import cos
from sympy.functions.special.beta_functions import beta
from sympy.logic.boolalg import (And, Or)
from sympy.polys.polytools import cancel
from sympy.sets.sets import FiniteSet
from sympy.simplify.simplify import simplify
from sympy.matrices import Matrix
from sympy.stats import (DiscreteUniform, Die, Bernoulli, Coin, Binomial, BetaBinomial,
Hypergeometric, Rademacher, IdealSoliton, RobustSoliton, P, E, variance,
covariance, skewness, density, where, FiniteRV, pspace, cdf,
correlation, moment, cmoment, smoment, characteristic_function,
moment_generating_function, quantile, kurtosis, median, coskewness)
from sympy.stats.frv_types import DieDistribution, BinomialDistribution, \
HypergeometricDistribution
from sympy.stats.rv import Density
from sympy.testing.pytest import raises
def BayesTest(A, B):
assert P(A, B) == P(And(A, B)) / P(B)
assert P(A, B) == P(B, A) * P(A) / P(B)
def test_discreteuniform():
# Symbolic
a, b, c, t = symbols('a b c t')
X = DiscreteUniform('X', [a, b, c])
assert E(X) == (a + b + c)/3
assert simplify(variance(X)
- ((a**2 + b**2 + c**2)/3 - (a/3 + b/3 + c/3)**2)) == 0
assert P(Eq(X, a)) == P(Eq(X, b)) == P(Eq(X, c)) == S('1/3')
Y = DiscreteUniform('Y', range(-5, 5))
# Numeric
assert E(Y) == S('-1/2')
assert variance(Y) == S('33/4')
assert median(Y) == FiniteSet(-1, 0)
for x in range(-5, 5):
assert P(Eq(Y, x)) == S('1/10')
assert P(Y <= x) == S(x + 6)/10
assert P(Y >= x) == S(5 - x)/10
assert dict(density(Die('D', 6)).items()) == \
dict(density(DiscreteUniform('U', range(1, 7))).items())
assert characteristic_function(X)(t) == exp(I*a*t)/3 + exp(I*b*t)/3 + exp(I*c*t)/3
assert moment_generating_function(X)(t) == exp(a*t)/3 + exp(b*t)/3 + exp(c*t)/3
# issue 18611
raises(ValueError, lambda: DiscreteUniform('Z', [a, a, a, b, b, c]))
def test_dice():
# TODO: Make iid method!
X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6)
a, b, t, p = symbols('a b t p')
assert E(X) == 3 + S.Half
assert variance(X) == Rational(35, 12)
assert E(X + Y) == 7
assert E(X + X) == 7
assert E(a*X + b) == a*E(X) + b
assert variance(X + Y) == variance(X) + variance(Y) == cmoment(X + Y, 2)
assert variance(X + X) == 4 * variance(X) == cmoment(X + X, 2)
assert cmoment(X, 0) == 1
assert cmoment(4*X, 3) == 64*cmoment(X, 3)
assert covariance(X, Y) is S.Zero
assert covariance(X, X + Y) == variance(X)
assert density(Eq(cos(X*S.Pi), 1))[True] == S.Half
assert correlation(X, Y) == 0
assert correlation(X, Y) == correlation(Y, X)
assert smoment(X + Y, 3) == skewness(X + Y)
assert smoment(X + Y, 4) == kurtosis(X + Y)
assert smoment(X, 0) == 1
assert P(X > 3) == S.Half
assert P(2*X > 6) == S.Half
assert P(X > Y) == Rational(5, 12)
assert P(Eq(X, Y)) == P(Eq(X, 1))
assert E(X, X > 3) == 5 == moment(X, 1, 0, X > 3)
assert E(X, Y > 3) == E(X) == moment(X, 1, 0, Y > 3)
assert E(X + Y, Eq(X, Y)) == E(2*X)
assert moment(X, 0) == 1
assert moment(5*X, 2) == 25*moment(X, 2)
assert quantile(X)(p) == Piecewise((nan, (p > 1) | (p < 0)),\
(S.One, p <= Rational(1, 6)), (S(2), p <= Rational(1, 3)), (S(3), p <= S.Half),\
(S(4), p <= Rational(2, 3)), (S(5), p <= Rational(5, 6)), (S(6), p <= 1))
assert P(X > 3, X > 3) is S.One
assert P(X > Y, Eq(Y, 6)) is S.Zero
assert P(Eq(X + Y, 12)) == Rational(1, 36)
assert P(Eq(X + Y, 12), Eq(X, 6)) == Rational(1, 6)
assert density(X + Y) == density(Y + Z) != density(X + X)
d = density(2*X + Y**Z)
assert d[S(22)] == Rational(1, 108) and d[S(4100)] == Rational(1, 216) and S(3130) not in d
assert pspace(X).domain.as_boolean() == Or(
*[Eq(X.symbol, i) for i in [1, 2, 3, 4, 5, 6]])
assert where(X > 3).set == FiniteSet(4, 5, 6)
assert characteristic_function(X)(t) == exp(6*I*t)/6 + exp(5*I*t)/6 + exp(4*I*t)/6 + exp(3*I*t)/6 + exp(2*I*t)/6 + exp(I*t)/6
assert moment_generating_function(X)(t) == exp(6*t)/6 + exp(5*t)/6 + exp(4*t)/6 + exp(3*t)/6 + exp(2*t)/6 + exp(t)/6
assert median(X) == FiniteSet(3, 4)
D = Die('D', 7)
assert median(D) == FiniteSet(4)
# Bayes test for die
BayesTest(X > 3, X + Y < 5)
BayesTest(Eq(X - Y, Z), Z > Y)
BayesTest(X > 3, X > 2)
# arg test for die
raises(ValueError, lambda: Die('X', -1)) # issue 8105: negative sides.
raises(ValueError, lambda: Die('X', 0))
raises(ValueError, lambda: Die('X', 1.5)) # issue 8103: non integer sides.
# symbolic test for die
n, k = symbols('n, k', positive=True)
D = Die('D', n)
dens = density(D).dict
assert dens == Density(DieDistribution(n))
assert set(dens.subs(n, 4).doit().keys()) == {1, 2, 3, 4}
assert set(dens.subs(n, 4).doit().values()) == {Rational(1, 4)}
k = Dummy('k', integer=True)
assert E(D).dummy_eq(
Sum(Piecewise((k/n, k <= n), (0, True)), (k, 1, n)))
assert variance(D).subs(n, 6).doit() == Rational(35, 12)
ki = Dummy('ki')
cumuf = cdf(D)(k)
assert cumuf.dummy_eq(
Sum(Piecewise((1/n, (ki >= 1) & (ki <= n)), (0, True)), (ki, 1, k)))
assert cumuf.subs({n: 6, k: 2}).doit() == Rational(1, 3)
t = Dummy('t')
cf = characteristic_function(D)(t)
assert cf.dummy_eq(
Sum(Piecewise((exp(ki*I*t)/n, (ki >= 1) & (ki <= n)), (0, True)), (ki, 1, n)))
assert cf.subs(n, 3).doit() == exp(3*I*t)/3 + exp(2*I*t)/3 + exp(I*t)/3
mgf = moment_generating_function(D)(t)
assert mgf.dummy_eq(
Sum(Piecewise((exp(ki*t)/n, (ki >= 1) & (ki <= n)), (0, True)), (ki, 1, n)))
assert mgf.subs(n, 3).doit() == exp(3*t)/3 + exp(2*t)/3 + exp(t)/3
def test_given():
X = Die('X', 6)
assert density(X, X > 5) == {S(6): S.One}
assert where(X > 2, X > 5).as_boolean() == Eq(X.symbol, 6)
def test_domains():
X, Y = Die('x', 6), Die('y', 6)
x, y = X.symbol, Y.symbol
# Domains
d = where(X > Y)
assert d.condition == (x > y)
d = where(And(X > Y, Y > 3))
assert d.as_boolean() == Or(And(Eq(x, 5), Eq(y, 4)), And(Eq(x, 6),
Eq(y, 5)), And(Eq(x, 6), Eq(y, 4)))
assert len(d.elements) == 3
assert len(pspace(X + Y).domain.elements) == 36
Z = Die('x', 4)
raises(ValueError, lambda: P(X > Z)) # Two domains with same internal symbol
assert pspace(X + Y).domain.set == FiniteSet(1, 2, 3, 4, 5, 6)**2
assert where(X > 3).set == FiniteSet(4, 5, 6)
assert X.pspace.domain.dict == FiniteSet(
*[Dict({X.symbol: i}) for i in range(1, 7)])
assert where(X > Y).dict == FiniteSet(*[Dict({X.symbol: i, Y.symbol: j})
for i in range(1, 7) for j in range(1, 7) if i > j])
def test_bernoulli():
p, a, b, t = symbols('p a b t')
X = Bernoulli('B', p, a, b)
assert E(X) == a*p + b*(-p + 1)
assert density(X)[a] == p
assert density(X)[b] == 1 - p
assert characteristic_function(X)(t) == p * exp(I * a * t) + (-p + 1) * exp(I * b * t)
assert moment_generating_function(X)(t) == p * exp(a * t) + (-p + 1) * exp(b * t)
X = Bernoulli('B', p, 1, 0)
z = Symbol("z")
assert E(X) == p
assert simplify(variance(X)) == p*(1 - p)
assert E(a*X + b) == a*E(X) + b
assert simplify(variance(a*X + b)) == simplify(a**2 * variance(X))
assert quantile(X)(z) == Piecewise((nan, (z > 1) | (z < 0)), (0, z <= 1 - p), (1, z <= 1))
Y = Bernoulli('Y', Rational(1, 2))
assert median(Y) == FiniteSet(0, 1)
Z = Bernoulli('Z', Rational(2, 3))
assert median(Z) == FiniteSet(1)
raises(ValueError, lambda: Bernoulli('B', 1.5))
raises(ValueError, lambda: Bernoulli('B', -0.5))
#issue 8248
assert X.pspace.compute_expectation(1) == 1
p = Rational(1, 5)
X = Binomial('X', 5, p)
Y = Binomial('Y', 7, 2*p)
Z = Binomial('Z', 9, 3*p)
assert coskewness(Y + Z, X + Y, X + Z).simplify() == 0
assert coskewness(Y + 2*X + Z, X + 2*Y + Z, X + 2*Z + Y).simplify() == \
sqrt(1529)*Rational(12, 16819)
assert coskewness(Y + 2*X + Z, X + 2*Y + Z, X + 2*Z + Y, X < 2).simplify() \
== -sqrt(357451121)*Rational(2812, 4646864573)
def test_cdf():
D = Die('D', 6)
o = S.One
assert cdf(
D) == sympify({1: o/6, 2: o/3, 3: o/2, 4: 2*o/3, 5: 5*o/6, 6: o})
def test_coins():
C, D = Coin('C'), Coin('D')
H, T = symbols('H, T')
assert P(Eq(C, D)) == S.Half
assert density(Tuple(C, D)) == {(H, H): Rational(1, 4), (H, T): Rational(1, 4),
(T, H): Rational(1, 4), (T, T): Rational(1, 4)}
assert dict(density(C).items()) == {H: S.Half, T: S.Half}
F = Coin('F', Rational(1, 10))
assert P(Eq(F, H)) == Rational(1, 10)
d = pspace(C).domain
assert d.as_boolean() == Or(Eq(C.symbol, H), Eq(C.symbol, T))
raises(ValueError, lambda: P(C > D)) # Can't intelligently compare H to T
def test_binomial_verify_parameters():
raises(ValueError, lambda: Binomial('b', .2, .5))
raises(ValueError, lambda: Binomial('b', 3, 1.5))
def test_binomial_numeric():
nvals = range(5)
pvals = [0, Rational(1, 4), S.Half, Rational(3, 4), 1]
for n in nvals:
for p in pvals:
X = Binomial('X', n, p)
assert E(X) == n*p
assert variance(X) == n*p*(1 - p)
if n > 0 and 0 < p < 1:
assert skewness(X) == (1 - 2*p)/sqrt(n*p*(1 - p))
assert kurtosis(X) == 3 + (1 - 6*p*(1 - p))/(n*p*(1 - p))
for k in range(n + 1):
assert P(Eq(X, k)) == binomial(n, k)*p**k*(1 - p)**(n - k)
def test_binomial_quantile():
X = Binomial('X', 50, S.Half)
assert quantile(X)(0.95) == S(31)
assert median(X) == FiniteSet(25)
X = Binomial('X', 5, S.Half)
p = Symbol("p", positive=True)
assert quantile(X)(p) == Piecewise((nan, p > S.One), (S.Zero, p <= Rational(1, 32)),\
(S.One, p <= Rational(3, 16)), (S(2), p <= S.Half), (S(3), p <= Rational(13, 16)),\
(S(4), p <= Rational(31, 32)), (S(5), p <= S.One))
assert median(X) == FiniteSet(2, 3)
def test_binomial_symbolic():
n = 2
p = symbols('p', positive=True)
X = Binomial('X', n, p)
t = Symbol('t')
assert simplify(E(X)) == n*p == simplify(moment(X, 1))
assert simplify(variance(X)) == n*p*(1 - p) == simplify(cmoment(X, 2))
assert cancel(skewness(X) - (1 - 2*p)/sqrt(n*p*(1 - p))) == 0
assert cancel((kurtosis(X)) - (3 + (1 - 6*p*(1 - p))/(n*p*(1 - p)))) == 0
assert characteristic_function(X)(t) == p ** 2 * exp(2 * I * t) + 2 * p * (-p + 1) * exp(I * t) + (-p + 1) ** 2
assert moment_generating_function(X)(t) == p ** 2 * exp(2 * t) + 2 * p * (-p + 1) * exp(t) + (-p + 1) ** 2
# Test ability to change success/failure winnings
H, T = symbols('H T')
Y = Binomial('Y', n, p, succ=H, fail=T)
assert simplify(E(Y) - (n*(H*p + T*(1 - p)))) == 0
# test symbolic dimensions
n = symbols('n')
B = Binomial('B', n, p)
raises(NotImplementedError, lambda: P(B > 2))
assert density(B).dict == Density(BinomialDistribution(n, p, 1, 0))
assert set(density(B).dict.subs(n, 4).doit().keys()) == \
{S.Zero, S.One, S(2), S(3), S(4)}
assert set(density(B).dict.subs(n, 4).doit().values()) == \
{(1 - p)**4, 4*p*(1 - p)**3, 6*p**2*(1 - p)**2, 4*p**3*(1 - p), p**4}
k = Dummy('k', integer=True)
assert E(B > 2).dummy_eq(
Sum(Piecewise((k*p**k*(1 - p)**(-k + n)*binomial(n, k), (k >= 0)
& (k <= n) & (k > 2)), (0, True)), (k, 0, n)))
def test_beta_binomial():
# verify parameters
raises(ValueError, lambda: BetaBinomial('b', .2, 1, 2))
raises(ValueError, lambda: BetaBinomial('b', 2, -1, 2))
raises(ValueError, lambda: BetaBinomial('b', 2, 1, -2))
assert BetaBinomial('b', 2, 1, 1)
# test numeric values
nvals = range(1,5)
alphavals = [Rational(1, 4), S.Half, Rational(3, 4), 1, 10]
betavals = [Rational(1, 4), S.Half, Rational(3, 4), 1, 10]
for n in nvals:
for a in alphavals:
for b in betavals:
X = BetaBinomial('X', n, a, b)
assert E(X) == moment(X, 1)
assert variance(X) == cmoment(X, 2)
# test symbolic
n, a, b = symbols('a b n')
assert BetaBinomial('x', n, a, b)
n = 2 # Because we're using for loops, can't do symbolic n
a, b = symbols('a b', positive=True)
X = BetaBinomial('X', n, a, b)
t = Symbol('t')
assert E(X).expand() == moment(X, 1).expand()
assert variance(X).expand() == cmoment(X, 2).expand()
assert skewness(X) == smoment(X, 3)
assert characteristic_function(X)(t) == exp(2*I*t)*beta(a + 2, b)/beta(a, b) +\
2*exp(I*t)*beta(a + 1, b + 1)/beta(a, b) + beta(a, b + 2)/beta(a, b)
assert moment_generating_function(X)(t) == exp(2*t)*beta(a + 2, b)/beta(a, b) +\
2*exp(t)*beta(a + 1, b + 1)/beta(a, b) + beta(a, b + 2)/beta(a, b)
def test_hypergeometric_numeric():
for N in range(1, 5):
for m in range(0, N + 1):
for n in range(1, N + 1):
X = Hypergeometric('X', N, m, n)
N, m, n = map(sympify, (N, m, n))
assert sum(density(X).values()) == 1
assert E(X) == n * m / N
if N > 1:
assert variance(X) == n*(m/N)*(N - m)/N*(N - n)/(N - 1)
# Only test for skewness when defined
if N > 2 and 0 < m < N and n < N:
assert skewness(X) == simplify((N - 2*m)*sqrt(N - 1)*(N - 2*n)
/ (sqrt(n*m*(N - m)*(N - n))*(N - 2)))
def test_hypergeometric_symbolic():
N, m, n = symbols('N, m, n')
H = Hypergeometric('H', N, m, n)
dens = density(H).dict
expec = E(H > 2)
assert dens == Density(HypergeometricDistribution(N, m, n))
assert dens.subs(N, 5).doit() == Density(HypergeometricDistribution(5, m, n))
assert set(dens.subs({N: 3, m: 2, n: 1}).doit().keys()) == {S.Zero, S.One}
assert set(dens.subs({N: 3, m: 2, n: 1}).doit().values()) == {Rational(1, 3), Rational(2, 3)}
k = Dummy('k', integer=True)
assert expec.dummy_eq(
Sum(Piecewise((k*binomial(m, k)*binomial(N - m, -k + n)
/binomial(N, n), k > 2), (0, True)), (k, 0, n)))
def test_rademacher():
X = Rademacher('X')
t = Symbol('t')
assert E(X) == 0
assert variance(X) == 1
assert density(X)[-1] == S.Half
assert density(X)[1] == S.Half
assert characteristic_function(X)(t) == exp(I*t)/2 + exp(-I*t)/2
assert moment_generating_function(X)(t) == exp(t) / 2 + exp(-t) / 2
def test_ideal_soliton():
raises(ValueError, lambda : IdealSoliton('sol', -12))
raises(ValueError, lambda : IdealSoliton('sol', 13.2))
raises(ValueError, lambda : IdealSoliton('sol', 0))
f = Function('f')
raises(ValueError, lambda : density(IdealSoliton('sol', 10)).pmf(f))
k = Symbol('k', integer=True, positive=True)
x = Symbol('x', integer=True, positive=True)
t = Symbol('t')
sol = IdealSoliton('sol', k)
assert density(sol).low == S.One
assert density(sol).high == k
assert density(sol).dict == Density(density(sol))
assert density(sol).pmf(x) == Piecewise((1/k, Eq(x, 1)), (1/(x*(x - 1)), k >= x), (0, True))
k_vals = [5, 20, 50, 100, 1000]
for i in k_vals:
assert E(sol.subs(k, i)) == harmonic(i) == moment(sol.subs(k, i), 1)
assert variance(sol.subs(k, i)) == (i - 1) + harmonic(i) - harmonic(i)**2 == cmoment(sol.subs(k, i),2)
assert skewness(sol.subs(k, i)) == smoment(sol.subs(k, i), 3)
assert kurtosis(sol.subs(k, i)) == smoment(sol.subs(k, i), 4)
assert exp(I*t)/10 + Sum(exp(I*t*x)/(x*x - x), (x, 2, k)).subs(k, 10).doit() == characteristic_function(sol.subs(k, 10))(t)
assert exp(t)/10 + Sum(exp(t*x)/(x*x - x), (x, 2, k)).subs(k, 10).doit() == moment_generating_function(sol.subs(k, 10))(t)
def test_robust_soliton():
raises(ValueError, lambda : RobustSoliton('robSol', -12, 0.1, 0.02))
raises(ValueError, lambda : RobustSoliton('robSol', 13, 1.89, 0.1))
raises(ValueError, lambda : RobustSoliton('robSol', 15, 0.6, -2.31))
f = Function('f')
raises(ValueError, lambda : density(RobustSoliton('robSol', 15, 0.6, 0.1)).pmf(f))
k = Symbol('k', integer=True, positive=True)
delta = Symbol('delta', positive=True)
c = Symbol('c', positive=True)
robSol = RobustSoliton('robSol', k, delta, c)
assert density(robSol).low == 1
assert density(robSol).high == k
k_vals = [10, 20, 50]
delta_vals = [0.2, 0.4, 0.6]
c_vals = [0.01, 0.03, 0.05]
for x in k_vals:
for y in delta_vals:
for z in c_vals:
assert E(robSol.subs({k: x, delta: y, c: z})) == moment(robSol.subs({k: x, delta: y, c: z}), 1)
assert variance(robSol.subs({k: x, delta: y, c: z})) == cmoment(robSol.subs({k: x, delta: y, c: z}), 2)
assert skewness(robSol.subs({k: x, delta: y, c: z})) == smoment(robSol.subs({k: x, delta: y, c: z}), 3)
assert kurtosis(robSol.subs({k: x, delta: y, c: z})) == smoment(robSol.subs({k: x, delta: y, c: z}), 4)
def test_FiniteRV():
F = FiniteRV('F', {1: S.Half, 2: Rational(1, 4), 3: Rational(1, 4)}, check=True)
p = Symbol("p", positive=True)
assert dict(density(F).items()) == {S.One: S.Half, S(2): Rational(1, 4), S(3): Rational(1, 4)}
assert P(F >= 2) == S.Half
assert quantile(F)(p) == Piecewise((nan, p > S.One), (S.One, p <= S.Half),\
(S(2), p <= Rational(3, 4)),(S(3), True))
assert pspace(F).domain.as_boolean() == Or(
*[Eq(F.symbol, i) for i in [1, 2, 3]])
assert F.pspace.domain.set == FiniteSet(1, 2, 3)
raises(ValueError, lambda: FiniteRV('F', {1: S.Half, 2: S.Half, 3: S.Half}, check=True))
raises(ValueError, lambda: FiniteRV('F', {1: S.Half, 2: Rational(-1, 2), 3: S.One}, check=True))
raises(ValueError, lambda: FiniteRV('F', {1: S.One, 2: Rational(3, 2), 3: S.Zero,\
4: Rational(-1, 2), 5: Rational(-3, 4), 6: Rational(-1, 4)}, check=True))
# purposeful invalid pmf but it should not raise since check=False
# see test_drv_types.test_ContinuousRV for explanation
X = FiniteRV('X', {1: 1, 2: 2})
assert E(X) == 5
assert P(X <= 2) + P(X > 2) != 1
def test_density_call():
from sympy.abc import p
x = Bernoulli('x', p)
d = density(x)
assert d(0) == 1 - p
assert d(S.Zero) == 1 - p
assert d(5) == 0
assert 0 in d
assert 5 not in d
assert d(S.Zero) == d[S.Zero]
def test_DieDistribution():
from sympy.abc import x
X = DieDistribution(6)
assert X.pmf(S.Half) is S.Zero
assert X.pmf(x).subs({x: 1}).doit() == Rational(1, 6)
assert X.pmf(x).subs({x: 7}).doit() == 0
assert X.pmf(x).subs({x: -1}).doit() == 0
assert X.pmf(x).subs({x: Rational(1, 3)}).doit() == 0
raises(ValueError, lambda: X.pmf(Matrix([0, 0])))
raises(ValueError, lambda: X.pmf(x**2 - 1))
def test_FinitePSpace():
X = Die('X', 6)
space = pspace(X)
assert space.density == DieDistribution(6)
def test_symbolic_conditions():
B = Bernoulli('B', Rational(1, 4))
D = Die('D', 4)
b, n = symbols('b, n')
Y = P(Eq(B, b))
Z = E(D > n)
assert Y == \
Piecewise((Rational(1, 4), Eq(b, 1)), (0, True)) + \
Piecewise((Rational(3, 4), Eq(b, 0)), (0, True))
assert Z == \
Piecewise((Rational(1, 4), n < 1), (0, True)) + Piecewise((S.Half, n < 2), (0, True)) + \
Piecewise((Rational(3, 4), n < 3), (0, True)) + Piecewise((S.One, n < 4), (0, True))
|
7991775b4e3d7ad9e3794931c7295738d7ab5136636f00a2e6855e48fd7c96f5 | from sympy.stats import Expectation, Normal, Variance, Covariance
from sympy.testing.pytest import raises
from sympy.core.symbol import symbols
from sympy.matrices.common import ShapeError
from sympy.matrices.dense import Matrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.expressions.special import ZeroMatrix
from sympy.stats.rv import RandomMatrixSymbol
from sympy.stats.symbolic_multivariate_probability import (ExpectationMatrix,
VarianceMatrix, CrossCovarianceMatrix)
j, k = symbols("j,k")
A = MatrixSymbol("A", k, k)
B = MatrixSymbol("B", k, k)
C = MatrixSymbol("C", k, k)
D = MatrixSymbol("D", k, k)
a = MatrixSymbol("a", k, 1)
b = MatrixSymbol("b", k, 1)
A2 = MatrixSymbol("A2", 2, 2)
B2 = MatrixSymbol("B2", 2, 2)
X = RandomMatrixSymbol("X", k, 1)
Y = RandomMatrixSymbol("Y", k, 1)
Z = RandomMatrixSymbol("Z", k, 1)
W = RandomMatrixSymbol("W", k, 1)
R = RandomMatrixSymbol("R", k, k)
X2 = RandomMatrixSymbol("X2", 2, 1)
normal = Normal("normal", 0, 1)
m1 = Matrix([
[1, j*Normal("normal2", 2, 1)],
[normal, 0]
])
def test_multivariate_expectation():
expr = Expectation(a)
assert expr == Expectation(a) == ExpectationMatrix(a)
assert expr.expand() == a
expr = Expectation(X)
assert expr == Expectation(X) == ExpectationMatrix(X)
assert expr.shape == (k, 1)
assert expr.rows == k
assert expr.cols == 1
assert isinstance(expr, ExpectationMatrix)
expr = Expectation(A*X + b)
assert expr == ExpectationMatrix(A*X + b)
assert expr.expand() == A*ExpectationMatrix(X) + b
assert isinstance(expr, ExpectationMatrix)
assert expr.shape == (k, 1)
expr = Expectation(m1*X2)
assert expr.expand() == expr
expr = Expectation(A2*m1*B2*X2)
assert expr.args[0].args == (A2, m1, B2, X2)
assert expr.expand() == A2*ExpectationMatrix(m1*B2*X2)
expr = Expectation((X + Y)*(X - Y).T)
assert expr.expand() == ExpectationMatrix(X*X.T) - ExpectationMatrix(X*Y.T) +\
ExpectationMatrix(Y*X.T) - ExpectationMatrix(Y*Y.T)
expr = Expectation(A*X + B*Y)
assert expr.expand() == A*ExpectationMatrix(X) + B*ExpectationMatrix(Y)
assert Expectation(m1).doit() == Matrix([[1, 2*j], [0, 0]])
x1 = Matrix([
[Normal('N11', 11, 1), Normal('N12', 12, 1)],
[Normal('N21', 21, 1), Normal('N22', 22, 1)]
])
x2 = Matrix([
[Normal('M11', 1, 1), Normal('M12', 2, 1)],
[Normal('M21', 3, 1), Normal('M22', 4, 1)]
])
assert Expectation(Expectation(x1 + x2)).doit(deep=False) == ExpectationMatrix(x1 + x2)
assert Expectation(Expectation(x1 + x2)).doit() == Matrix([[12, 14], [24, 26]])
def test_multivariate_variance():
raises(ShapeError, lambda: Variance(A))
expr = Variance(a) # type: VarianceMatrix
assert expr == Variance(a) == VarianceMatrix(a)
assert expr.expand() == ZeroMatrix(k, k)
expr = Variance(a.T)
assert expr == Variance(a.T) == VarianceMatrix(a.T)
assert expr.expand() == ZeroMatrix(k, k)
expr = Variance(X)
assert expr == Variance(X) == VarianceMatrix(X)
assert expr.shape == (k, k)
assert expr.rows == k
assert expr.cols == k
assert isinstance(expr, VarianceMatrix)
expr = Variance(A*X)
assert expr == VarianceMatrix(A*X)
assert expr.expand() == A*VarianceMatrix(X)*A.T
assert isinstance(expr, VarianceMatrix)
assert expr.shape == (k, k)
expr = Variance(A*B*X)
assert expr.expand() == A*B*VarianceMatrix(X)*B.T*A.T
expr = Variance(m1*X2)
assert expr.expand() == expr
expr = Variance(A2*m1*B2*X2)
assert expr.args[0].args == (A2, m1, B2, X2)
assert expr.expand() == expr
expr = Variance(A*X + B*Y)
assert expr.expand() == 2*A*CrossCovarianceMatrix(X, Y)*B.T +\
A*VarianceMatrix(X)*A.T + B*VarianceMatrix(Y)*B.T
def test_multivariate_crosscovariance():
raises(ShapeError, lambda: Covariance(X, Y.T))
raises(ShapeError, lambda: Covariance(X, A))
expr = Covariance(a.T, b.T)
assert expr.shape == (1, 1)
assert expr.expand() == ZeroMatrix(1, 1)
expr = Covariance(a, b)
assert expr == Covariance(a, b) == CrossCovarianceMatrix(a, b)
assert expr.expand() == ZeroMatrix(k, k)
assert expr.shape == (k, k)
assert expr.rows == k
assert expr.cols == k
assert isinstance(expr, CrossCovarianceMatrix)
expr = Covariance(A*X + a, b)
assert expr.expand() == ZeroMatrix(k, k)
expr = Covariance(X, Y)
assert isinstance(expr, CrossCovarianceMatrix)
assert expr.expand() == expr
expr = Covariance(X, X)
assert isinstance(expr, CrossCovarianceMatrix)
assert expr.expand() == VarianceMatrix(X)
expr = Covariance(X + Y, Z)
assert isinstance(expr, CrossCovarianceMatrix)
assert expr.expand() == CrossCovarianceMatrix(X, Z) + CrossCovarianceMatrix(Y, Z)
expr = Covariance(A*X, Y)
assert isinstance(expr, CrossCovarianceMatrix)
assert expr.expand() == A*CrossCovarianceMatrix(X, Y)
expr = Covariance(X, B*Y)
assert isinstance(expr, CrossCovarianceMatrix)
assert expr.expand() == CrossCovarianceMatrix(X, Y)*B.T
expr = Covariance(A*X + a, B.T*Y + b)
assert isinstance(expr, CrossCovarianceMatrix)
assert expr.expand() == A*CrossCovarianceMatrix(X, Y)*B
expr = Covariance(A*X + B*Y + a, C.T*Z + D.T*W + b)
assert isinstance(expr, CrossCovarianceMatrix)
assert expr.expand() == A*CrossCovarianceMatrix(X, W)*D + A*CrossCovarianceMatrix(X, Z)*C \
+ B*CrossCovarianceMatrix(Y, W)*D + B*CrossCovarianceMatrix(Y, Z)*C
|
c4a024c371795c9ef6c620d1108ecf93245e58d556ffcf420ff9a3398fdd8836 | from sympy.core.function import Function
from sympy.core.symbol import symbols
from sympy.functions.elementary.exponential import exp
from sympy.stats.error_prop import variance_prop
from sympy.stats.symbolic_probability import (RandomSymbol, Variance,
Covariance)
def test_variance_prop():
x, y, z = symbols('x y z')
phi, t = consts = symbols('phi t')
a = RandomSymbol(x)
var_x = Variance(a)
var_y = Variance(RandomSymbol(y))
var_z = Variance(RandomSymbol(z))
f = Function('f')(x)
cases = {
x + y: var_x + var_y,
a + y: var_x + var_y,
x + y + z: var_x + var_y + var_z,
2*x: 4*var_x,
x*y: var_x*y**2 + var_y*x**2,
1/x: var_x/x**4,
x/y: (var_x*y**2 + var_y*x**2)/y**4,
exp(x): var_x*exp(2*x),
exp(2*x): 4*var_x*exp(4*x),
exp(-x*t): t**2*var_x*exp(-2*t*x),
f: Variance(f),
}
for inp, out in cases.items():
obs = variance_prop(inp, consts=consts)
assert out == obs
def test_variance_prop_with_covar():
x, y, z = symbols('x y z')
phi, t = consts = symbols('phi t')
a = RandomSymbol(x)
var_x = Variance(a)
b = RandomSymbol(y)
var_y = Variance(b)
c = RandomSymbol(z)
var_z = Variance(c)
covar_x_y = Covariance(a, b)
covar_x_z = Covariance(a, c)
covar_y_z = Covariance(b, c)
cases = {
x + y: var_x + var_y + 2*covar_x_y,
a + y: var_x + var_y + 2*covar_x_y,
x + y + z: var_x + var_y + var_z + \
2*covar_x_y + 2*covar_x_z + 2*covar_y_z,
2*x: 4*var_x,
x*y: var_x*y**2 + var_y*x**2 + 2*covar_x_y/(x*y),
1/x: var_x/x**4,
exp(x): var_x*exp(2*x),
exp(2*x): 4*var_x*exp(4*x),
exp(-x*t): t**2*var_x*exp(-2*t*x),
}
for inp, out in cases.items():
obs = variance_prop(inp, consts=consts, include_covar=True)
assert out == obs
|
e082f014c47420431c033bf96930e72c443180f96a55c2e3ccb52451ef096952 | from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
from sympy.core.numbers import (Rational, oo, pi)
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import symbols
from sympy.functions.combinatorial.factorials import (RisingFactorial, factorial)
from sympy.functions.elementary.complexes import polar_lift
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.bessel import besselk
from sympy.functions.special.gamma_functions import gamma
from sympy.matrices.dense import eye
from sympy.matrices.expressions.determinant import Determinant
from sympy.sets.fancysets import Range
from sympy.sets.sets import (Interval, ProductSet)
from sympy.simplify.simplify import simplify
from sympy.tensor.indexed import (Indexed, IndexedBase)
from sympy.core.numbers import comp
from sympy.integrals.integrals import integrate
from sympy.matrices import Matrix, MatrixSymbol
from sympy.matrices.expressions.matexpr import MatrixElement
from sympy.stats import density, median, marginal_distribution, Normal, Laplace, E, sample
from sympy.stats.joint_rv_types import (JointRV, MultivariateNormalDistribution,
JointDistributionHandmade, MultivariateT, NormalGamma,
GeneralizedMultivariateLogGammaOmega as GMVLGO, MultivariateBeta,
GeneralizedMultivariateLogGamma as GMVLG, MultivariateEwens,
Multinomial, NegativeMultinomial, MultivariateNormal,
MultivariateLaplace)
from sympy.testing.pytest import raises, XFAIL, skip
from sympy.external import import_module
from sympy.abc import x, y
def test_Normal():
m = Normal('A', [1, 2], [[1, 0], [0, 1]])
A = MultivariateNormal('A', [1, 2], [[1, 0], [0, 1]])
assert m == A
assert density(m)(1, 2) == 1/(2*pi)
assert m.pspace.distribution.set == ProductSet(S.Reals, S.Reals)
raises (ValueError, lambda:m[2])
n = Normal('B', [1, 2, 3], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
p = Normal('C', Matrix([1, 2]), Matrix([[1, 0], [0, 1]]))
assert density(m)(x, y) == density(p)(x, y)
assert marginal_distribution(n, 0, 1)(1, 2) == 1/(2*pi)
raises(ValueError, lambda: marginal_distribution(m))
assert integrate(density(m)(x, y), (x, -oo, oo), (y, -oo, oo)).evalf() == 1
N = Normal('N', [1, 2], [[x, 0], [0, y]])
assert density(N)(0, 0) == exp(-((4*x + y)/(2*x*y)))/(2*pi*sqrt(x*y))
raises (ValueError, lambda: Normal('M', [1, 2], [[1, 1], [1, -1]]))
# symbolic
n = symbols('n', integer=True, positive=True)
mu = MatrixSymbol('mu', n, 1)
sigma = MatrixSymbol('sigma', n, n)
X = Normal('X', mu, sigma)
assert density(X) == MultivariateNormalDistribution(mu, sigma)
raises (NotImplementedError, lambda: median(m))
# Below tests should work after issue #17267 is resolved
# assert E(X) == mu
# assert variance(X) == sigma
# test symbolic multivariate normal densities
n = 3
Sg = MatrixSymbol('Sg', n, n)
mu = MatrixSymbol('mu', n, 1)
obs = MatrixSymbol('obs', n, 1)
X = MultivariateNormal('X', mu, Sg)
density_X = density(X)
eval_a = density_X(obs).subs({Sg: eye(3),
mu: Matrix([0, 0, 0]), obs: Matrix([0, 0, 0])}).doit()
eval_b = density_X(0, 0, 0).subs({Sg: eye(3), mu: Matrix([0, 0, 0])}).doit()
assert eval_a == sqrt(2)/(4*pi**Rational(3/2))
assert eval_b == sqrt(2)/(4*pi**Rational(3/2))
n = symbols('n', integer=True, positive=True)
Sg = MatrixSymbol('Sg', n, n)
mu = MatrixSymbol('mu', n, 1)
obs = MatrixSymbol('obs', n, 1)
X = MultivariateNormal('X', mu, Sg)
density_X_at_obs = density(X)(obs)
expected_density = MatrixElement(
exp((S(1)/2) * (mu.T - obs.T) * Sg**(-1) * (-mu + obs)) / \
sqrt((2*pi)**n * Determinant(Sg)), 0, 0)
assert density_X_at_obs == expected_density
def test_MultivariateTDist():
t1 = MultivariateT('T', [0, 0], [[1, 0], [0, 1]], 2)
assert(density(t1))(1, 1) == 1/(8*pi)
assert t1.pspace.distribution.set == ProductSet(S.Reals, S.Reals)
assert integrate(density(t1)(x, y), (x, -oo, oo), \
(y, -oo, oo)).evalf() == 1
raises(ValueError, lambda: MultivariateT('T', [1, 2], [[1, 1], [1, -1]], 1))
t2 = MultivariateT('t2', [1, 2], [[x, 0], [0, y]], 1)
assert density(t2)(1, 2) == 1/(2*pi*sqrt(x*y))
def test_multivariate_laplace():
raises(ValueError, lambda: Laplace('T', [1, 2], [[1, 2], [2, 1]]))
L = Laplace('L', [1, 0], [[1, 0], [0, 1]])
L2 = MultivariateLaplace('L2', [1, 0], [[1, 0], [0, 1]])
assert density(L)(2, 3) == exp(2)*besselk(0, sqrt(39))/pi
L1 = Laplace('L1', [1, 2], [[x, 0], [0, y]])
assert density(L1)(0, 1) == \
exp(2/y)*besselk(0, sqrt((2 + 4/y + 1/x)/y))/(pi*sqrt(x*y))
assert L.pspace.distribution.set == ProductSet(S.Reals, S.Reals)
assert L.pspace.distribution == L2.pspace.distribution
def test_NormalGamma():
ng = NormalGamma('G', 1, 2, 3, 4)
assert density(ng)(1, 1) == 32*exp(-4)/sqrt(pi)
assert ng.pspace.distribution.set == ProductSet(S.Reals, Interval(0, oo))
raises(ValueError, lambda:NormalGamma('G', 1, 2, 3, -1))
assert marginal_distribution(ng, 0)(1) == \
3*sqrt(10)*gamma(Rational(7, 4))/(10*sqrt(pi)*gamma(Rational(5, 4)))
assert marginal_distribution(ng, y)(1) == exp(Rational(-1, 4))/128
assert marginal_distribution(ng,[0,1])(x) == x**2*exp(-x/4)/128
def test_GeneralizedMultivariateLogGammaDistribution():
h = S.Half
omega = Matrix([[1, h, h, h],
[h, 1, h, h],
[h, h, 1, h],
[h, h, h, 1]])
v, l, mu = (4, [1, 2, 3, 4], [1, 2, 3, 4])
y_1, y_2, y_3, y_4 = symbols('y_1:5', real=True)
delta = symbols('d', positive=True)
G = GMVLGO('G', omega, v, l, mu)
Gd = GMVLG('Gd', delta, v, l, mu)
dend = ("d**4*Sum(4*24**(-n - 4)*(1 - d)**n*exp((n + 4)*(y_1 + 2*y_2 + 3*y_3 "
"+ 4*y_4) - exp(y_1) - exp(2*y_2)/2 - exp(3*y_3)/3 - exp(4*y_4)/4)/"
"(gamma(n + 1)*gamma(n + 4)**3), (n, 0, oo))")
assert str(density(Gd)(y_1, y_2, y_3, y_4)) == dend
den = ("5*2**(2/3)*5**(1/3)*Sum(4*24**(-n - 4)*(-2**(2/3)*5**(1/3)/4 + 1)**n*"
"exp((n + 4)*(y_1 + 2*y_2 + 3*y_3 + 4*y_4) - exp(y_1) - exp(2*y_2)/2 - "
"exp(3*y_3)/3 - exp(4*y_4)/4)/(gamma(n + 1)*gamma(n + 4)**3), (n, 0, oo))/64")
assert str(density(G)(y_1, y_2, y_3, y_4)) == den
marg = ("5*2**(2/3)*5**(1/3)*exp(4*y_1)*exp(-exp(y_1))*Integral(exp(-exp(4*G[3])"
"/4)*exp(16*G[3])*Integral(exp(-exp(3*G[2])/3)*exp(12*G[2])*Integral(exp("
"-exp(2*G[1])/2)*exp(8*G[1])*Sum((-1/4)**n*(-4 + 2**(2/3)*5**(1/3"
"))**n*exp(n*y_1)*exp(2*n*G[1])*exp(3*n*G[2])*exp(4*n*G[3])/(24**n*gamma(n + 1)"
"*gamma(n + 4)**3), (n, 0, oo)), (G[1], -oo, oo)), (G[2], -oo, oo)), (G[3]"
", -oo, oo))/5308416")
assert str(marginal_distribution(G, G[0])(y_1)) == marg
omega_f1 = Matrix([[1, h, h]])
omega_f2 = Matrix([[1, h, h, h],
[h, 1, 2, h],
[h, h, 1, h],
[h, h, h, 1]])
omega_f3 = Matrix([[6, h, h, h],
[h, 1, 2, h],
[h, h, 1, h],
[h, h, h, 1]])
v_f = symbols("v_f", positive=False, real=True)
l_f = [1, 2, v_f, 4]
m_f = [v_f, 2, 3, 4]
omega_f4 = Matrix([[1, h, h, h, h],
[h, 1, h, h, h],
[h, h, 1, h, h],
[h, h, h, 1, h],
[h, h, h, h, 1]])
l_f1 = [1, 2, 3, 4, 5]
omega_f5 = Matrix([[1]])
mu_f5 = l_f5 = [1]
raises(ValueError, lambda: GMVLGO('G', omega_f1, v, l, mu))
raises(ValueError, lambda: GMVLGO('G', omega_f2, v, l, mu))
raises(ValueError, lambda: GMVLGO('G', omega_f3, v, l, mu))
raises(ValueError, lambda: GMVLGO('G', omega, v_f, l, mu))
raises(ValueError, lambda: GMVLGO('G', omega, v, l_f, mu))
raises(ValueError, lambda: GMVLGO('G', omega, v, l, m_f))
raises(ValueError, lambda: GMVLGO('G', omega_f4, v, l, mu))
raises(ValueError, lambda: GMVLGO('G', omega, v, l_f1, mu))
raises(ValueError, lambda: GMVLGO('G', omega_f5, v, l_f5, mu_f5))
raises(ValueError, lambda: GMVLG('G', Rational(3, 2), v, l, mu))
def test_MultivariateBeta():
a1, a2 = symbols('a1, a2', positive=True)
a1_f, a2_f = symbols('a1, a2', positive=False, real=True)
mb = MultivariateBeta('B', [a1, a2])
mb_c = MultivariateBeta('C', a1, a2)
assert density(mb)(1, 2) == S(2)**(a2 - 1)*gamma(a1 + a2)/\
(gamma(a1)*gamma(a2))
assert marginal_distribution(mb_c, 0)(3) == S(3)**(a1 - 1)*gamma(a1 + a2)/\
(a2*gamma(a1)*gamma(a2))
raises(ValueError, lambda: MultivariateBeta('b1', [a1_f, a2]))
raises(ValueError, lambda: MultivariateBeta('b2', [a1, a2_f]))
raises(ValueError, lambda: MultivariateBeta('b3', [0, 0]))
raises(ValueError, lambda: MultivariateBeta('b4', [a1_f, a2_f]))
assert mb.pspace.distribution.set == ProductSet(Interval(0, 1), Interval(0, 1))
def test_MultivariateEwens():
n, theta, i = symbols('n theta i', positive=True)
# tests for integer dimensions
theta_f = symbols('t_f', negative=True)
a = symbols('a_1:4', positive = True, integer = True)
ed = MultivariateEwens('E', 3, theta)
assert density(ed)(a[0], a[1], a[2]) == Piecewise((6*2**(-a[1])*3**(-a[2])*
theta**a[0]*theta**a[1]*theta**a[2]/
(theta*(theta + 1)*(theta + 2)*
factorial(a[0])*factorial(a[1])*
factorial(a[2])), Eq(a[0] + 2*a[1] +
3*a[2], 3)), (0, True))
assert marginal_distribution(ed, ed[1])(a[1]) == Piecewise((6*2**(-a[1])*
theta**a[1]/((theta + 1)*
(theta + 2)*factorial(a[1])),
Eq(2*a[1] + 1, 3)), (0, True))
raises(ValueError, lambda: MultivariateEwens('e1', 5, theta_f))
assert ed.pspace.distribution.set == ProductSet(Range(0, 4, 1),
Range(0, 2, 1), Range(0, 2, 1))
# tests for symbolic dimensions
eds = MultivariateEwens('E', n, theta)
a = IndexedBase('a')
j, k = symbols('j, k')
den = Piecewise((factorial(n)*Product(theta**a[j]*(j + 1)**(-a[j])/
factorial(a[j]), (j, 0, n - 1))/RisingFactorial(theta, n),
Eq(n, Sum((k + 1)*a[k], (k, 0, n - 1)))), (0, True))
assert density(eds)(a).dummy_eq(den)
def test_Multinomial():
n, x1, x2, x3, x4 = symbols('n, x1, x2, x3, x4', nonnegative=True, integer=True)
p1, p2, p3, p4 = symbols('p1, p2, p3, p4', positive=True)
p1_f, n_f = symbols('p1_f, n_f', negative=True)
M = Multinomial('M', n, [p1, p2, p3, p4])
C = Multinomial('C', 3, p1, p2, p3)
f = factorial
assert density(M)(x1, x2, x3, x4) == Piecewise((p1**x1*p2**x2*p3**x3*p4**x4*
f(n)/(f(x1)*f(x2)*f(x3)*f(x4)),
Eq(n, x1 + x2 + x3 + x4)), (0, True))
assert marginal_distribution(C, C[0])(x1).subs(x1, 1) ==\
3*p1*p2**2 +\
6*p1*p2*p3 +\
3*p1*p3**2
raises(ValueError, lambda: Multinomial('b1', 5, [p1, p2, p3, p1_f]))
raises(ValueError, lambda: Multinomial('b2', n_f, [p1, p2, p3, p4]))
raises(ValueError, lambda: Multinomial('b3', n, 0.5, 0.4, 0.3, 0.1))
def test_NegativeMultinomial():
k0, x1, x2, x3, x4 = symbols('k0, x1, x2, x3, x4', nonnegative=True, integer=True)
p1, p2, p3, p4 = symbols('p1, p2, p3, p4', positive=True)
p1_f = symbols('p1_f', negative=True)
N = NegativeMultinomial('N', 4, [p1, p2, p3, p4])
C = NegativeMultinomial('C', 4, 0.1, 0.2, 0.3)
g = gamma
f = factorial
assert simplify(density(N)(x1, x2, x3, x4) -
p1**x1*p2**x2*p3**x3*p4**x4*(-p1 - p2 - p3 - p4 + 1)**4*g(x1 + x2 +
x3 + x4 + 4)/(6*f(x1)*f(x2)*f(x3)*f(x4))) is S.Zero
assert comp(marginal_distribution(C, C[0])(1).evalf(), 0.33, .01)
raises(ValueError, lambda: NegativeMultinomial('b1', 5, [p1, p2, p3, p1_f]))
raises(ValueError, lambda: NegativeMultinomial('b2', k0, 0.5, 0.4, 0.3, 0.4))
assert N.pspace.distribution.set == ProductSet(Range(0, oo, 1),
Range(0, oo, 1), Range(0, oo, 1), Range(0, oo, 1))
def test_JointPSpace_marginal_distribution():
T = MultivariateT('T', [0, 0], [[1, 0], [0, 1]], 2)
assert marginal_distribution(T, T[1])(x) == sqrt(2)*(x**2 + 2)/(
8*polar_lift(x**2/2 + 1)**Rational(5, 2))
assert integrate(marginal_distribution(T, 1)(x), (x, -oo, oo)) == 1
t = MultivariateT('T', [0, 0, 0], [[1, 0, 0], [0, 1, 0], [0, 0, 1]], 3)
assert comp(marginal_distribution(t, 0)(1).evalf(), 0.2, .01)
def test_JointRV():
x1, x2 = (Indexed('x', i) for i in (1, 2))
pdf = exp(-x1**2/2 + x1 - x2**2/2 - S.Half)/(2*pi)
X = JointRV('x', pdf)
assert density(X)(1, 2) == exp(-2)/(2*pi)
assert isinstance(X.pspace.distribution, JointDistributionHandmade)
assert marginal_distribution(X, 0)(2) == sqrt(2)*exp(Rational(-1, 2))/(2*sqrt(pi))
def test_expectation():
m = Normal('A', [x, y], [[1, 0], [0, 1]])
assert simplify(E(m[1])) == y
@XFAIL
def test_joint_vector_expectation():
m = Normal('A', [x, y], [[1, 0], [0, 1]])
assert E(m) == (x, y)
def test_sample_numpy():
distribs_numpy = [
MultivariateNormal("M", [3, 4], [[2, 1], [1, 2]]),
MultivariateBeta("B", [0.4, 5, 15, 50, 203]),
Multinomial("N", 50, [0.3, 0.2, 0.1, 0.25, 0.15])
]
size = 3
numpy = import_module('numpy')
if not numpy:
skip('Numpy is not installed. Abort tests for _sample_numpy.')
else:
for X in distribs_numpy:
samps = sample(X, size=size, library='numpy')
for sam in samps:
assert tuple(sam) in X.pspace.distribution.set
N_c = NegativeMultinomial('N', 3, 0.1, 0.1, 0.1)
raises(NotImplementedError, lambda: sample(N_c, library='numpy'))
def test_sample_scipy():
distribs_scipy = [
MultivariateNormal("M", [0, 0], [[0.1, 0.025], [0.025, 0.1]]),
MultivariateBeta("B", [0.4, 5, 15]),
Multinomial("N", 8, [0.3, 0.2, 0.1, 0.4])
]
size = 3
scipy = import_module('scipy')
if not scipy:
skip('Scipy not installed. Abort tests for _sample_scipy.')
else:
for X in distribs_scipy:
samps = sample(X, size=size)
samps2 = sample(X, size=(2, 2))
for sam in samps:
assert tuple(sam) in X.pspace.distribution.set
for i in range(2):
for j in range(2):
assert tuple(samps2[i][j]) in X.pspace.distribution.set
N_c = NegativeMultinomial('N', 3, 0.1, 0.1, 0.1)
raises(NotImplementedError, lambda: sample(N_c))
def test_sample_pymc3():
distribs_pymc3 = [
MultivariateNormal("M", [5, 2], [[1, 0], [0, 1]]),
MultivariateBeta("B", [0.4, 5, 15]),
Multinomial("N", 4, [0.3, 0.2, 0.1, 0.4])
]
size = 3
pymc3 = import_module('pymc3')
if not pymc3:
skip('PyMC3 is not installed. Abort tests for _sample_pymc3.')
else:
for X in distribs_pymc3:
samps = sample(X, size=size, library='pymc3')
for sam in samps:
assert tuple(sam.flatten()) in X.pspace.distribution.set
N_c = NegativeMultinomial('N', 3, 0.1, 0.1, 0.1)
raises(NotImplementedError, lambda: sample(N_c, library='pymc3'))
def test_sample_seed():
x1, x2 = (Indexed('x', i) for i in (1, 2))
pdf = exp(-x1**2/2 + x1 - x2**2/2 - S.Half)/(2*pi)
X = JointRV('x', pdf)
libraries = ['scipy', 'numpy', 'pymc3']
for lib in libraries:
try:
imported_lib = import_module(lib)
if imported_lib:
s0, s1, s2 = [], [], []
s0 = sample(X, size=10, library=lib, seed=0)
s1 = sample(X, size=10, library=lib, seed=0)
s2 = sample(X, size=10, library=lib, seed=1)
assert all(s0 == s1)
assert all(s1 != s2)
except NotImplementedError:
continue
def test_issue_21057():
m = Normal("x", [0, 0], [[0, 0], [0, 0]])
n = MultivariateNormal("x", [0, 0], [[0, 0], [0, 0]])
p = Normal("x", [0, 0], [[0, 0], [0, 1]])
assert m == n
libraries = ['scipy', 'numpy', 'pymc3']
for library in libraries:
try:
imported_lib = import_module(library)
if imported_lib:
s1 = sample(m, size=8)
s2 = sample(n, size=8)
s3 = sample(p, size=8)
assert tuple(s1.flatten()) == tuple(s2.flatten())
for s in s3:
assert tuple(s.flatten()) in p.pspace.distribution.set
except NotImplementedError:
continue
|
0a9967c34e8a8bde98ff87f1e81bbb9545e3c6821f5fbb4d25ab26f523b3e704 | from sympy.concrete.summations import Sum
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.numbers import (Integer, oo, pi)
from sympy.core.power import Pow
from sympy.core.relational import (Eq, Ne)
from sympy.core.symbol import (Dummy, Symbol, symbols)
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.delta_functions import DiracDelta
from sympy.functions.special.gamma_functions import gamma
from sympy.integrals.integrals import Integral
from sympy.simplify.simplify import simplify
from sympy.tensor.indexed import (Indexed, IndexedBase)
from sympy.functions.elementary.piecewise import ExprCondPair
from sympy.stats import (Poisson, Beta, Exponential, P,
Multinomial, MultivariateBeta)
from sympy.stats.crv_types import Normal
from sympy.stats.drv_types import PoissonDistribution
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
from sympy.stats.joint_rv import MarginalDistribution
from sympy.stats.rv import pspace, density
from sympy.testing.pytest import ignore_warnings
def test_density():
x = Symbol('x')
l = Symbol('l', positive=True)
rate = Beta(l, 2, 3)
X = Poisson(x, rate)
assert isinstance(pspace(X), CompoundPSpace)
assert density(X, Eq(rate, rate.symbol)) == PoissonDistribution(l)
N1 = Normal('N1', 0, 1)
N2 = Normal('N2', N1, 2)
assert density(N2)(0).doit() == sqrt(10)/(10*sqrt(pi))
assert simplify(density(N2, Eq(N1, 1))(x)) == \
sqrt(2)*exp(-(x - 1)**2/8)/(4*sqrt(pi))
assert simplify(density(N2)(x)) == sqrt(10)*exp(-x**2/10)/(10*sqrt(pi))
def test_MarginalDistribution():
a1, p1, p2 = symbols('a1 p1 p2', positive=True)
C = Multinomial('C', 2, p1, p2)
B = MultivariateBeta('B', a1, C[0])
MGR = MarginalDistribution(B, (C[0],))
mgrc = Mul(Symbol('B'), Piecewise(ExprCondPair(Mul(Integer(2),
Pow(Symbol('p1', positive=True), Indexed(IndexedBase(Symbol('C')),
Integer(0))), Pow(Symbol('p2', positive=True),
Indexed(IndexedBase(Symbol('C')), Integer(1))),
Pow(factorial(Indexed(IndexedBase(Symbol('C')), Integer(0))), Integer(-1)),
Pow(factorial(Indexed(IndexedBase(Symbol('C')), Integer(1))), Integer(-1))),
Eq(Add(Indexed(IndexedBase(Symbol('C')), Integer(0)),
Indexed(IndexedBase(Symbol('C')), Integer(1))), Integer(2))),
ExprCondPair(Integer(0), True)), Pow(gamma(Symbol('a1', positive=True)),
Integer(-1)), gamma(Add(Symbol('a1', positive=True),
Indexed(IndexedBase(Symbol('C')), Integer(0)))),
Pow(gamma(Indexed(IndexedBase(Symbol('C')), Integer(0))), Integer(-1)),
Pow(Indexed(IndexedBase(Symbol('B')), Integer(0)),
Add(Symbol('a1', positive=True), Integer(-1))),
Pow(Indexed(IndexedBase(Symbol('B')), Integer(1)),
Add(Indexed(IndexedBase(Symbol('C')), Integer(0)), Integer(-1))))
assert MGR(C) == mgrc
def test_compound_distribution():
Y = Poisson('Y', 1)
Z = Poisson('Z', Y)
assert isinstance(pspace(Z), CompoundPSpace)
assert isinstance(pspace(Z).distribution, CompoundDistribution)
assert Z.pspace.distribution.pdf(1).doit() == exp(-2)*exp(exp(-1))
def test_mix_expression():
Y, E = Poisson('Y', 1), Exponential('E', 1)
k = Dummy('k')
expr1 = Integral(Sum(exp(-1)*Integral(exp(-k)*DiracDelta(k - 2), (k, 0, oo)
)/factorial(k), (k, 0, oo)), (k, -oo, 0))
expr2 = Integral(Sum(exp(-1)*Integral(exp(-k)*DiracDelta(k - 2), (k, 0, oo)
)/factorial(k), (k, 0, oo)), (k, 0, oo))
assert P(Eq(Y + E, 1)) == 0
assert P(Ne(Y + E, 2)) == 1
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert P(E + Y < 2, evaluate=False).rewrite(Integral).dummy_eq(expr1)
assert P(E + Y > 2, evaluate=False).rewrite(Integral).dummy_eq(expr2)
|
8022093547c991fc8e0be5c4486e9ffc435a3300d1b0f886f7ad9097f1ca0ec6 | from sympy.concrete.summations import Sum
from sympy.core.mul import Mul
from sympy.core.numbers import (oo, pi)
from sympy.core.relational import Eq
from sympy.core.symbol import (Dummy, symbols)
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import sin
from sympy.integrals.integrals import Integral
from sympy.core.expr import unchanged
from sympy.stats import (Normal, Poisson, variance, Covariance, Variance,
Probability, Expectation, Moment, CentralMoment)
from sympy.stats.rv import probability, expectation
def test_literal_probability():
X = Normal('X', 2, 3)
Y = Normal('Y', 3, 4)
Z = Poisson('Z', 4)
W = Poisson('W', 3)
x = symbols('x', real=True)
y, w, z = symbols('y, w, z')
assert Probability(X > 0).evaluate_integral() == probability(X > 0)
assert Probability(X > x).evaluate_integral() == probability(X > x)
assert Probability(X > 0).rewrite(Integral).doit() == probability(X > 0)
assert Probability(X > x).rewrite(Integral).doit() == probability(X > x)
assert Expectation(X).evaluate_integral() == expectation(X)
assert Expectation(X).rewrite(Integral).doit() == expectation(X)
assert Expectation(X**2).evaluate_integral() == expectation(X**2)
assert Expectation(x*X).args == (x*X,)
assert Expectation(x*X).expand() == x*Expectation(X)
assert Expectation(2*X + 3*Y + z*X*Y).expand() == 2*Expectation(X) + 3*Expectation(Y) + z*Expectation(X*Y)
assert Expectation(2*X + 3*Y + z*X*Y).args == (2*X + 3*Y + z*X*Y,)
assert Expectation(sin(X)) == Expectation(sin(X)).expand()
assert Expectation(2*x*sin(X)*Y + y*X**2 + z*X*Y).expand() == 2*x*Expectation(sin(X)*Y) \
+ y*Expectation(X**2) + z*Expectation(X*Y)
assert Expectation(X + Y).expand() == Expectation(X) + Expectation(Y)
assert Expectation((X + Y)*(X - Y)).expand() == Expectation(X**2) - Expectation(Y**2)
assert Expectation((X + Y)*(X - Y)).expand().doit() == -12
assert Expectation(X + Y, evaluate=True).doit() == 5
assert Expectation(X + Expectation(Y)).doit() == 5
assert Expectation(X + Expectation(Y)).doit(deep=False) == 2 + Expectation(Expectation(Y))
assert Expectation(X + Expectation(Y + Expectation(2*X))).doit(deep=False) == 2 \
+ Expectation(Expectation(Y + Expectation(2*X)))
assert Expectation(X + Expectation(Y + Expectation(2*X))).doit() == 9
assert Expectation(Expectation(2*X)).doit() == 4
assert Expectation(Expectation(2*X)).doit(deep=False) == Expectation(2*X)
assert Expectation(4*Expectation(2*X)).doit(deep=False) == 4*Expectation(2*X)
assert Expectation((X + Y)**3).expand() == 3*Expectation(X*Y**2) +\
3*Expectation(X**2*Y) + Expectation(X**3) + Expectation(Y**3)
assert Expectation((X - Y)**3).expand() == 3*Expectation(X*Y**2) -\
3*Expectation(X**2*Y) + Expectation(X**3) - Expectation(Y**3)
assert Expectation((X - Y)**2).expand() == -2*Expectation(X*Y) +\
Expectation(X**2) + Expectation(Y**2)
assert Variance(w).args == (w,)
assert Variance(w).expand() == 0
assert Variance(X).evaluate_integral() == Variance(X).rewrite(Integral).doit() == variance(X)
assert Variance(X + z).args == (X + z,)
assert Variance(X + z).expand() == Variance(X)
assert Variance(X*Y).args == (Mul(X, Y),)
assert type(Variance(X*Y)) == Variance
assert Variance(z*X).expand() == z**2*Variance(X)
assert Variance(X + Y).expand() == Variance(X) + Variance(Y) + 2*Covariance(X, Y)
assert Variance(X + Y + Z + W).expand() == (Variance(X) + Variance(Y) + Variance(Z) + Variance(W) +
2 * Covariance(X, Y) + 2 * Covariance(X, Z) + 2 * Covariance(X, W) +
2 * Covariance(Y, Z) + 2 * Covariance(Y, W) + 2 * Covariance(W, Z))
assert Variance(X**2).evaluate_integral() == variance(X**2)
assert unchanged(Variance, X**2)
assert Variance(x*X**2).expand() == x**2*Variance(X**2)
assert Variance(sin(X)).args == (sin(X),)
assert Variance(sin(X)).expand() == Variance(sin(X))
assert Variance(x*sin(X)).expand() == x**2*Variance(sin(X))
assert Covariance(w, z).args == (w, z)
assert Covariance(w, z).expand() == 0
assert Covariance(X, w).expand() == 0
assert Covariance(w, X).expand() == 0
assert Covariance(X, Y).args == (X, Y)
assert type(Covariance(X, Y)) == Covariance
assert Covariance(z*X + 3, Y).expand() == z*Covariance(X, Y)
assert Covariance(X, X).args == (X, X)
assert Covariance(X, X).expand() == Variance(X)
assert Covariance(z*X + 3, w*Y + 4).expand() == w*z*Covariance(X,Y)
assert Covariance(X, Y) == Covariance(Y, X)
assert Covariance(X + Y, Z + W).expand() == Covariance(W, X) + Covariance(W, Y) + Covariance(X, Z) + Covariance(Y, Z)
assert Covariance(x*X + y*Y, z*Z + w*W).expand() == (x*w*Covariance(W, X) + w*y*Covariance(W, Y) +
x*z*Covariance(X, Z) + y*z*Covariance(Y, Z))
assert Covariance(x*X**2 + y*sin(Y), z*Y*Z**2 + w*W).expand() == (w*x*Covariance(W, X**2) + w*y*Covariance(sin(Y), W) +
x*z*Covariance(Y*Z**2, X**2) + y*z*Covariance(Y*Z**2, sin(Y)))
assert Covariance(X, X**2).expand() == Covariance(X, X**2)
assert Covariance(X, sin(X)).expand() == Covariance(sin(X), X)
assert Covariance(X**2, sin(X)*Y).expand() == Covariance(sin(X)*Y, X**2)
assert Covariance(w, X).evaluate_integral() == 0
def test_probability_rewrite():
X = Normal('X', 2, 3)
Y = Normal('Y', 3, 4)
Z = Poisson('Z', 4)
W = Poisson('W', 3)
x, y, w, z = symbols('x, y, w, z')
assert Variance(w).rewrite(Expectation) == 0
assert Variance(X).rewrite(Expectation) == Expectation(X ** 2) - Expectation(X) ** 2
assert Variance(X, condition=Y).rewrite(Expectation) == Expectation(X ** 2, Y) - Expectation(X, Y) ** 2
assert Variance(X, Y) != Expectation(X**2) - Expectation(X)**2
assert Variance(X + z).rewrite(Expectation) == Expectation((X + z) ** 2) - Expectation(X + z) ** 2
assert Variance(X * Y).rewrite(Expectation) == Expectation(X ** 2 * Y ** 2) - Expectation(X * Y) ** 2
assert Covariance(w, X).rewrite(Expectation) == -w*Expectation(X) + Expectation(w*X)
assert Covariance(X, Y).rewrite(Expectation) == Expectation(X*Y) - Expectation(X)*Expectation(Y)
assert Covariance(X, Y, condition=W).rewrite(Expectation) == Expectation(X * Y, W) - Expectation(X, W) * Expectation(Y, W)
w, x, z = symbols("W, x, z")
px = Probability(Eq(X, x))
pz = Probability(Eq(Z, z))
assert Expectation(X).rewrite(Probability) == Integral(x*px, (x, -oo, oo))
assert Expectation(Z).rewrite(Probability) == Sum(z*pz, (z, 0, oo))
assert Variance(X).rewrite(Probability) == Integral(x**2*px, (x, -oo, oo)) - Integral(x*px, (x, -oo, oo))**2
assert Variance(Z).rewrite(Probability) == Sum(z**2*pz, (z, 0, oo)) - Sum(z*pz, (z, 0, oo))**2
assert Covariance(w, X).rewrite(Probability) == \
-w*Integral(x*Probability(Eq(X, x)), (x, -oo, oo)) + Integral(w*x*Probability(Eq(X, x)), (x, -oo, oo))
# To test rewrite as sum function
assert Variance(X).rewrite(Sum) == Variance(X).rewrite(Integral)
assert Expectation(X).rewrite(Sum) == Expectation(X).rewrite(Integral)
assert Covariance(w, X).rewrite(Sum) == 0
assert Covariance(w, X).rewrite(Integral) == 0
assert Variance(X, condition=Y).rewrite(Probability) == Integral(x**2*Probability(Eq(X, x), Y), (x, -oo, oo)) - \
Integral(x*Probability(Eq(X, x), Y), (x, -oo, oo))**2
def test_symbolic_Moment():
mu = symbols('mu', real=True)
sigma = symbols('sigma', real=True, positive=True)
x = symbols('x')
X = Normal('X', mu, sigma)
M = Moment(X, 4, 2)
assert M.rewrite(Expectation) == Expectation((X - 2)**4)
assert M.rewrite(Probability) == Integral((x - 2)**4*Probability(Eq(X, x)),
(x, -oo, oo))
k = Dummy('k')
expri = Integral(sqrt(2)*(k - 2)**4*exp(-(k - \
mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (k, -oo, oo))
assert M.rewrite(Integral).dummy_eq(expri)
assert M.doit() == (mu**4 - 8*mu**3 + 6*mu**2*sigma**2 + \
24*mu**2 - 24*mu*sigma**2 - 32*mu + 3*sigma**4 + 24*sigma**2 + 16)
M = Moment(2, 5)
assert M.doit() == 2**5
def test_symbolic_CentralMoment():
mu = symbols('mu', real=True)
sigma = symbols('sigma', real=True, positive=True)
x = symbols('x')
X = Normal('X', mu, sigma)
CM = CentralMoment(X, 6)
assert CM.rewrite(Expectation) == Expectation((X - Expectation(X))**6)
assert CM.rewrite(Probability) == Integral((x - Integral(x*Probability(True),
(x, -oo, oo)))**6*Probability(Eq(X, x)), (x, -oo, oo))
k = Dummy('k')
expri = Integral(sqrt(2)*(k - Integral(sqrt(2)*k*exp(-(k - \
mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (k, -oo, oo)))**6*exp(-(k - \
mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (k, -oo, oo))
assert CM.rewrite(Integral).dummy_eq(expri)
assert CM.doit().simplify() == 15*sigma**6
CM = Moment(5, 5)
assert CM.doit() == 5**5
|
bc98394979ee0724f031a9a17ebfede7394f76ef875e342d2e8f767e33040527 | from sympy.concrete.summations import Sum
from sympy.core.containers import Tuple
from sympy.core.function import Lambda
from sympy.core.numbers import (Float, Rational, oo, pi)
from sympy.core.relational import (Eq, Ge, Gt, Le, Lt, Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.error_functions import erf
from sympy.functions.special.gamma_functions import (gamma, lowergamma)
from sympy.logic.boolalg import (And, Not)
from sympy.matrices.dense import Matrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.immutable import ImmutableMatrix
from sympy.sets.contains import Contains
from sympy.sets.fancysets import Range
from sympy.sets.sets import (FiniteSet, Interval)
from sympy.stats import (DiscreteMarkovChain, P, TransitionMatrixOf, E,
StochasticStateSpaceOf, variance, ContinuousMarkovChain,
BernoulliProcess, PoissonProcess, WienerProcess,
GammaProcess, sample_stochastic_process)
from sympy.stats.joint_rv import JointDistribution
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.rv import RandomIndexedSymbol
from sympy.stats.symbolic_probability import Probability, Expectation
from sympy.testing.pytest import raises, skip, ignore_warnings
from sympy.external import import_module
from sympy.stats.frv_types import BernoulliDistribution
from sympy.stats.drv_types import PoissonDistribution
from sympy.stats.crv_types import NormalDistribution, GammaDistribution
from sympy.core.symbol import Str
def test_DiscreteMarkovChain():
# pass only the name
X = DiscreteMarkovChain("X")
assert isinstance(X.state_space, Range)
assert X.index_set == S.Naturals0
assert isinstance(X.transition_probabilities, MatrixSymbol)
t = symbols('t', positive=True, integer=True)
assert isinstance(X[t], RandomIndexedSymbol)
assert E(X[0]) == Expectation(X[0])
raises(TypeError, lambda: DiscreteMarkovChain(1))
raises(NotImplementedError, lambda: X(t))
raises(NotImplementedError, lambda: X.communication_classes())
raises(NotImplementedError, lambda: X.canonical_form())
raises(NotImplementedError, lambda: X.decompose())
nz = Symbol('n', integer=True)
TZ = MatrixSymbol('M', nz, nz)
SZ = Range(nz)
YZ = DiscreteMarkovChain('Y', SZ, TZ)
assert P(Eq(YZ[2], 1), Eq(YZ[1], 0)) == TZ[0, 1]
raises(ValueError, lambda: sample_stochastic_process(t))
raises(ValueError, lambda: next(sample_stochastic_process(X)))
# pass name and state_space
# any hashable object should be a valid state
# states should be valid as a tuple/set/list/Tuple/Range
sym, rainy, cloudy, sunny = symbols('a Rainy Cloudy Sunny', real=True)
state_spaces = [(1, 2, 3), [Str('Hello'), sym, DiscreteMarkovChain],
Tuple(1, exp(sym), Str('World'), sympify=False), Range(-1, 5, 2),
[rainy, cloudy, sunny]]
chains = [DiscreteMarkovChain("Y", state_space) for state_space in state_spaces]
for i, Y in enumerate(chains):
assert isinstance(Y.transition_probabilities, MatrixSymbol)
assert Y.state_space == state_spaces[i] or Y.state_space == FiniteSet(*state_spaces[i])
assert Y.number_of_states == 3
with ignore_warnings(UserWarning): # TODO: Restore tests once warnings are removed
assert P(Eq(Y[2], 1), Eq(Y[0], 2), evaluate=False) == Probability(Eq(Y[2], 1), Eq(Y[0], 2))
assert E(Y[0]) == Expectation(Y[0])
raises(ValueError, lambda: next(sample_stochastic_process(Y)))
raises(TypeError, lambda: DiscreteMarkovChain("Y", dict((1, 1))))
Y = DiscreteMarkovChain("Y", Range(1, t, 2))
assert Y.number_of_states == ceiling((t-1)/2)
# pass name and transition_probabilities
chains = [DiscreteMarkovChain("Y", trans_probs=Matrix([[]])),
DiscreteMarkovChain("Y", trans_probs=Matrix([[0, 1], [1, 0]])),
DiscreteMarkovChain("Y", trans_probs=Matrix([[pi, 1-pi], [sym, 1-sym]]))]
for Z in chains:
assert Z.number_of_states == Z.transition_probabilities.shape[0]
assert isinstance(Z.transition_probabilities, ImmutableMatrix)
# pass name, state_space and transition_probabilities
T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
TS = MatrixSymbol('T', 3, 3)
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
YS = DiscreteMarkovChain("Y", ['One', 'Two', 3], TS)
assert Y.joint_distribution(1, Y[2], 3) == JointDistribution(Y[1], Y[2], Y[3])
raises(ValueError, lambda: Y.joint_distribution(Y[1].symbol, Y[2].symbol))
assert P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2) == Float(0.36, 2)
assert (P(Eq(YS[3], 2), Eq(YS[1], 1)) -
(TS[0, 2]*TS[1, 0] + TS[1, 1]*TS[1, 2] + TS[1, 2]*TS[2, 2])).simplify() == 0
assert P(Eq(YS[1], 1), Eq(YS[2], 2)) == Probability(Eq(YS[1], 1))
assert P(Eq(YS[3], 3), Eq(YS[1], 1)) == TS[0, 2]*TS[1, 0] + TS[1, 1]*TS[1, 2] + TS[1, 2]*TS[2, 2]
TO = Matrix([[0.25, 0.75, 0],[0, 0.25, 0.75],[0.75, 0, 0.25]])
assert P(Eq(Y[3], 2), Eq(Y[1], 1) & TransitionMatrixOf(Y, TO)).round(3) == Float(0.375, 3)
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert E(Y[3], evaluate=False) == Expectation(Y[3])
assert E(Y[3], Eq(Y[2], 1)).round(2) == Float(1.1, 3)
TSO = MatrixSymbol('T', 4, 4)
raises(ValueError, lambda: str(P(Eq(YS[3], 2), Eq(YS[1], 1) & TransitionMatrixOf(YS, TSO))))
raises(TypeError, lambda: DiscreteMarkovChain("Z", [0, 1, 2], symbols('M')))
raises(ValueError, lambda: DiscreteMarkovChain("Z", [0, 1, 2], MatrixSymbol('T', 3, 4)))
raises(ValueError, lambda: E(Y[3], Eq(Y[2], 6)))
raises(ValueError, lambda: E(Y[2], Eq(Y[3], 1)))
# extended tests for probability queries
TO1 = Matrix([[Rational(1, 4), Rational(3, 4), 0],[Rational(1, 3), Rational(1, 3), Rational(1, 3)],[0, Rational(1, 4), Rational(3, 4)]])
assert P(And(Eq(Y[2], 1), Eq(Y[1], 1), Eq(Y[0], 0)),
Eq(Probability(Eq(Y[0], 0)), Rational(1, 4)) & TransitionMatrixOf(Y, TO1)) == Rational(1, 16)
assert P(And(Eq(Y[2], 1), Eq(Y[1], 1), Eq(Y[0], 0)), TransitionMatrixOf(Y, TO1)) == \
Probability(Eq(Y[0], 0))/4
assert P(Lt(X[1], 2) & Gt(X[1], 0), Eq(X[0], 2) &
StochasticStateSpaceOf(X, [0, 1, 2]) & TransitionMatrixOf(X, TO1)) == Rational(1, 4)
assert P(Lt(X[1], 2) & Gt(X[1], 0), Eq(X[0], 2) &
StochasticStateSpaceOf(X, [None, 'None', 1]) & TransitionMatrixOf(X, TO1)) == Rational(1, 4)
assert P(Ne(X[1], 2) & Ne(X[1], 1), Eq(X[0], 2) &
StochasticStateSpaceOf(X, [0, 1, 2]) & TransitionMatrixOf(X, TO1)) is S.Zero
assert P(Ne(X[1], 2) & Ne(X[1], 1), Eq(X[0], 2) &
StochasticStateSpaceOf(X, [None, 'None', 1]) & TransitionMatrixOf(X, TO1)) is S.Zero
assert P(And(Eq(Y[2], 1), Eq(Y[1], 1), Eq(Y[0], 0)), Eq(Y[1], 1)) == 0.1*Probability(Eq(Y[0], 0))
# testing properties of Markov chain
TO2 = Matrix([[S.One, 0, 0],[Rational(1, 3), Rational(1, 3), Rational(1, 3)],[0, Rational(1, 4), Rational(3, 4)]])
TO3 = Matrix([[Rational(1, 4), Rational(3, 4), 0],[Rational(1, 3), Rational(1, 3), Rational(1, 3)], [0, Rational(1, 4), Rational(3, 4)]])
Y2 = DiscreteMarkovChain('Y', trans_probs=TO2)
Y3 = DiscreteMarkovChain('Y', trans_probs=TO3)
assert Y3.fundamental_matrix() == ImmutableMatrix([[176, 81, -132], [36, 141, -52], [-44, -39, 208]])/125
assert Y2.is_absorbing_chain() == True
assert Y3.is_absorbing_chain() == False
assert Y2.canonical_form() == ([0, 1, 2], TO2)
assert Y3.canonical_form() == ([0, 1, 2], TO3)
assert Y2.decompose() == ([0, 1, 2], TO2[0:1, 0:1], TO2[1:3, 0:1], TO2[1:3, 1:3])
assert Y3.decompose() == ([0, 1, 2], TO3, Matrix(0, 3, []), Matrix(0, 0, []))
TO4 = Matrix([[Rational(1, 5), Rational(2, 5), Rational(2, 5)], [Rational(1, 10), S.Half, Rational(2, 5)], [Rational(3, 5), Rational(3, 10), Rational(1, 10)]])
Y4 = DiscreteMarkovChain('Y', trans_probs=TO4)
w = ImmutableMatrix([[Rational(11, 39), Rational(16, 39), Rational(4, 13)]])
assert Y4.limiting_distribution == w
assert Y4.is_regular() == True
assert Y4.is_ergodic() == True
TS1 = MatrixSymbol('T', 3, 3)
Y5 = DiscreteMarkovChain('Y', trans_probs=TS1)
assert Y5.limiting_distribution(w, TO4).doit() == True
assert Y5.stationary_distribution(condition_set=True).subs(TS1, TO4).contains(w).doit() == S.true
TO6 = Matrix([[S.One, 0, 0, 0, 0],[S.Half, 0, S.Half, 0, 0],[0, S.Half, 0, S.Half, 0], [0, 0, S.Half, 0, S.Half], [0, 0, 0, 0, 1]])
Y6 = DiscreteMarkovChain('Y', trans_probs=TO6)
assert Y6.fundamental_matrix() == ImmutableMatrix([[Rational(3, 2), S.One, S.Half], [S.One, S(2), S.One], [S.Half, S.One, Rational(3, 2)]])
assert Y6.absorbing_probabilities() == ImmutableMatrix([[Rational(3, 4), Rational(1, 4)], [S.Half, S.Half], [Rational(1, 4), Rational(3, 4)]])
TO7 = Matrix([[Rational(1, 2), Rational(1, 4), Rational(1, 4)], [Rational(1, 2), 0, Rational(1, 2)], [Rational(1, 4), Rational(1, 4), Rational(1, 2)]])
Y7 = DiscreteMarkovChain('Y', trans_probs=TO7)
assert Y7.is_absorbing_chain() == False
assert Y7.fundamental_matrix() == ImmutableMatrix([[Rational(86, 75), Rational(1, 25), Rational(-14, 75)],
[Rational(2, 25), Rational(21, 25), Rational(2, 25)],
[Rational(-14, 75), Rational(1, 25), Rational(86, 75)]])
# test for zero-sized matrix functionality
X = DiscreteMarkovChain('X', trans_probs=Matrix([[]]))
assert X.number_of_states == 0
assert X.stationary_distribution() == Matrix([[]])
assert X.communication_classes() == []
assert X.canonical_form() == ([], Matrix([[]]))
assert X.decompose() == ([], Matrix([[]]), Matrix([[]]), Matrix([[]]))
assert X.is_regular() == False
assert X.is_ergodic() == False
# test communication_class
# see https://drive.google.com/drive/folders/1HbxLlwwn2b3U8Lj7eb_ASIUb5vYaNIjg?usp=sharing
# tutorial 2.pdf
TO7 = Matrix([[0, 5, 5, 0, 0],
[0, 0, 0, 10, 0],
[5, 0, 5, 0, 0],
[0, 10, 0, 0, 0],
[0, 3, 0, 3, 4]])/10
Y7 = DiscreteMarkovChain('Y', trans_probs=TO7)
tuples = Y7.communication_classes()
classes, recurrence, periods = list(zip(*tuples))
assert classes == ([1, 3], [0, 2], [4])
assert recurrence == (True, False, False)
assert periods == (2, 1, 1)
TO8 = Matrix([[0, 0, 0, 10, 0, 0],
[5, 0, 5, 0, 0, 0],
[0, 4, 0, 0, 0, 6],
[10, 0, 0, 0, 0, 0],
[0, 10, 0, 0, 0, 0],
[0, 0, 0, 5, 5, 0]])/10
Y8 = DiscreteMarkovChain('Y', trans_probs=TO8)
tuples = Y8.communication_classes()
classes, recurrence, periods = list(zip(*tuples))
assert classes == ([0, 3], [1, 2, 5, 4])
assert recurrence == (True, False)
assert periods == (2, 2)
TO9 = Matrix([[2, 0, 0, 3, 0, 0, 3, 2, 0, 0],
[0, 10, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 0, 0, 0, 0, 0, 3, 3],
[0, 0, 0, 3, 0, 0, 6, 1, 0, 0],
[0, 0, 0, 0, 5, 5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 10, 0, 0, 0, 0],
[4, 0, 0, 5, 0, 0, 1, 0, 0, 0],
[2, 0, 0, 4, 0, 0, 2, 2, 0, 0],
[3, 0, 1, 0, 0, 0, 0, 0, 4, 2],
[0, 0, 4, 0, 0, 0, 0, 0, 3, 3]])/10
Y9 = DiscreteMarkovChain('Y', trans_probs=TO9)
tuples = Y9.communication_classes()
classes, recurrence, periods = list(zip(*tuples))
assert classes == ([0, 3, 6, 7], [1], [2, 8, 9], [5], [4])
assert recurrence == (True, True, False, True, False)
assert periods == (1, 1, 1, 1, 1)
# test canonical form
# see https://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
# example 11.13
T = Matrix([[1, 0, 0, 0, 0],
[S(1) / 2, 0, S(1) / 2, 0, 0],
[0, S(1) / 2, 0, S(1) / 2, 0],
[0, 0, S(1) / 2, 0, S(1) / 2],
[0, 0, 0, 0, S(1)]])
DW = DiscreteMarkovChain('DW', [0, 1, 2, 3, 4], T)
states, A, B, C = DW.decompose()
assert states == [0, 4, 1, 2, 3]
assert A == Matrix([[1, 0], [0, 1]])
assert B == Matrix([[S(1)/2, 0], [0, 0], [0, S(1)/2]])
assert C == Matrix([[0, S(1)/2, 0], [S(1)/2, 0, S(1)/2], [0, S(1)/2, 0]])
states, new_matrix = DW.canonical_form()
assert states == [0, 4, 1, 2, 3]
assert new_matrix == Matrix([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[S(1)/2, 0, 0, S(1)/2, 0],
[0, 0, S(1)/2, 0, S(1)/2],
[0, S(1)/2, 0, S(1)/2, 0]])
# test regular and ergodic
# https://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
T = Matrix([[0, 4, 0, 0, 0],
[1, 0, 3, 0, 0],
[0, 2, 0, 2, 0],
[0, 0, 3, 0, 1],
[0, 0, 0, 4, 0]])/4
X = DiscreteMarkovChain('X', trans_probs=T)
assert not X.is_regular()
assert X.is_ergodic()
T = Matrix([[0, 1], [1, 0]])
X = DiscreteMarkovChain('X', trans_probs=T)
assert not X.is_regular()
assert X.is_ergodic()
# http://www.math.wisc.edu/~valko/courses/331/MC2.pdf
T = Matrix([[2, 1, 1],
[2, 0, 2],
[1, 1, 2]])/4
X = DiscreteMarkovChain('X', trans_probs=T)
assert X.is_regular()
assert X.is_ergodic()
# https://docs.ufpr.br/~lucambio/CE222/1S2014/Kemeny-Snell1976.pdf
T = Matrix([[1, 1], [1, 1]])/2
X = DiscreteMarkovChain('X', trans_probs=T)
assert X.is_regular()
assert X.is_ergodic()
# test is_absorbing_chain
T = Matrix([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
X = DiscreteMarkovChain('X', trans_probs=T)
assert not X.is_absorbing_chain()
# https://en.wikipedia.org/wiki/Absorbing_Markov_chain
T = Matrix([[1, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 0, 0, 2]])/2
X = DiscreteMarkovChain('X', trans_probs=T)
assert X.is_absorbing_chain()
T = Matrix([[2, 0, 0, 0, 0],
[1, 0, 1, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 0, 2]])/2
X = DiscreteMarkovChain('X', trans_probs=T)
assert X.is_absorbing_chain()
# test custom state space
Y10 = DiscreteMarkovChain('Y', [1, 2, 3], TO2)
tuples = Y10.communication_classes()
classes, recurrence, periods = list(zip(*tuples))
assert classes == ([1], [2, 3])
assert recurrence == (True, False)
assert periods == (1, 1)
assert Y10.canonical_form() == ([1, 2, 3], TO2)
assert Y10.decompose() == ([1, 2, 3], TO2[0:1, 0:1], TO2[1:3, 0:1], TO2[1:3, 1:3])
# testing miscellaneous queries
T = Matrix([[S.Half, Rational(1, 4), Rational(1, 4)],
[Rational(1, 3), 0, Rational(2, 3)],
[S.Half, S.Half, 0]])
X = DiscreteMarkovChain('X', [0, 1, 2], T)
assert P(Eq(X[1], 2) & Eq(X[2], 1) & Eq(X[3], 0),
Eq(P(Eq(X[1], 0)), Rational(1, 4)) & Eq(P(Eq(X[1], 1)), Rational(1, 4))) == Rational(1, 12)
assert P(Eq(X[2], 1) | Eq(X[2], 2), Eq(X[1], 1)) == Rational(2, 3)
assert P(Eq(X[2], 1) & Eq(X[2], 2), Eq(X[1], 1)) is S.Zero
assert P(Ne(X[2], 2), Eq(X[1], 1)) == Rational(1, 3)
assert E(X[1]**2, Eq(X[0], 1)) == Rational(8, 3)
assert variance(X[1], Eq(X[0], 1)) == Rational(8, 9)
raises(ValueError, lambda: E(X[1], Eq(X[2], 1)))
raises(ValueError, lambda: DiscreteMarkovChain('X', [0, 1], T))
# testing miscellaneous queries with different state space
X = DiscreteMarkovChain('X', ['A', 'B', 'C'], T)
assert P(Eq(X[1], 2) & Eq(X[2], 1) & Eq(X[3], 0),
Eq(P(Eq(X[1], 0)), Rational(1, 4)) & Eq(P(Eq(X[1], 1)), Rational(1, 4))) == Rational(1, 12)
assert P(Eq(X[2], 1) | Eq(X[2], 2), Eq(X[1], 1)) == Rational(2, 3)
assert P(Eq(X[2], 1) & Eq(X[2], 2), Eq(X[1], 1)) is S.Zero
assert P(Ne(X[2], 2), Eq(X[1], 1)) == Rational(1, 3)
a = X.state_space.args[0]
c = X.state_space.args[2]
assert (E(X[1] ** 2, Eq(X[0], 1)) - (a**2/3 + 2*c**2/3)).simplify() == 0
assert (variance(X[1], Eq(X[0], 1)) - (2*(-a/3 + c/3)**2/3 + (2*a/3 - 2*c/3)**2/3)).simplify() == 0
raises(ValueError, lambda: E(X[1], Eq(X[2], 1)))
#testing queries with multiple RandomIndexedSymbols
T = Matrix([[Rational(5, 10), Rational(3, 10), Rational(2, 10)], [Rational(2, 10), Rational(7, 10), Rational(1, 10)], [Rational(3, 10), Rational(3, 10), Rational(4, 10)]])
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
assert P(Eq(Y[7], Y[5]), Eq(Y[2], 0)).round(5) == Float(0.44428, 5)
assert P(Gt(Y[3], Y[1]), Eq(Y[0], 0)).round(2) == Float(0.36, 2)
assert P(Le(Y[5], Y[10]), Eq(Y[4], 2)).round(6) == Float(0.583120, 6)
assert Float(P(Eq(Y[10], Y[5]), Eq(Y[4], 1)), 14) == Float(1 - P(Ne(Y[10], Y[5]), Eq(Y[4], 1)), 14)
assert Float(P(Gt(Y[8], Y[9]), Eq(Y[3], 2)), 14) == Float(1 - P(Le(Y[8], Y[9]), Eq(Y[3], 2)), 14)
assert Float(P(Lt(Y[1], Y[4]), Eq(Y[0], 0)), 14) == Float(1 - P(Ge(Y[1], Y[4]), Eq(Y[0], 0)), 14)
assert P(Eq(Y[5], Y[10]), Eq(Y[2], 1)) == P(Eq(Y[10], Y[5]), Eq(Y[2], 1))
assert P(Gt(Y[1], Y[2]), Eq(Y[0], 1)) == P(Lt(Y[2], Y[1]), Eq(Y[0], 1))
assert P(Ge(Y[7], Y[6]), Eq(Y[4], 1)) == P(Le(Y[6], Y[7]), Eq(Y[4], 1))
#test symbolic queries
a, b, c, d = symbols('a b c d')
T = Matrix([[Rational(1, 10), Rational(4, 10), Rational(5, 10)], [Rational(3, 10), Rational(4, 10), Rational(3, 10)], [Rational(7, 10), Rational(2, 10), Rational(1, 10)]])
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
query = P(Eq(Y[a], b), Eq(Y[c], d))
assert query.subs({a:10, b:2, c:5, d:1}).evalf().round(4) == P(Eq(Y[10], 2), Eq(Y[5], 1)).round(4)
assert query.subs({a:15, b:0, c:10, d:1}).evalf().round(4) == P(Eq(Y[15], 0), Eq(Y[10], 1)).round(4)
query_gt = P(Gt(Y[a], b), Eq(Y[c], d))
query_le = P(Le(Y[a], b), Eq(Y[c], d))
assert query_gt.subs({a:5, b:2, c:1, d:0}).evalf() + query_le.subs({a:5, b:2, c:1, d:0}).evalf() == 1
query_ge = P(Ge(Y[a], b), Eq(Y[c], d))
query_lt = P(Lt(Y[a], b), Eq(Y[c], d))
assert query_ge.subs({a:4, b:1, c:0, d:2}).evalf() + query_lt.subs({a:4, b:1, c:0, d:2}).evalf() == 1
#test issue 20078
assert (2*Y[1] + 3*Y[1]).simplify() == 5*Y[1]
assert (2*Y[1] - 3*Y[1]).simplify() == -Y[1]
assert (2*(0.25*Y[1])).simplify() == 0.5*Y[1]
assert ((2*Y[1]) * (0.25*Y[1])).simplify() == 0.5*Y[1]**2
assert (Y[1]**2 + Y[1]**3).simplify() == (Y[1] + 1)*Y[1]**2
def test_sample_stochastic_process():
if not import_module('scipy'):
skip('SciPy Not installed. Skip sampling tests')
import random
random.seed(0)
numpy = import_module('numpy')
if numpy:
numpy.random.seed(0) # scipy uses numpy to sample so to set its seed
T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
for samps in range(10):
assert next(sample_stochastic_process(Y)) in Y.state_space
Z = DiscreteMarkovChain("Z", ['1', 1, 0], T)
for samps in range(10):
assert next(sample_stochastic_process(Z)) in Z.state_space
T = Matrix([[S.Half, Rational(1, 4), Rational(1, 4)],
[Rational(1, 3), 0, Rational(2, 3)],
[S.Half, S.Half, 0]])
X = DiscreteMarkovChain('X', [0, 1, 2], T)
for samps in range(10):
assert next(sample_stochastic_process(X)) in X.state_space
W = DiscreteMarkovChain('W', [1, pi, oo], T)
for samps in range(10):
assert next(sample_stochastic_process(W)) in W.state_space
def test_ContinuousMarkovChain():
T1 = Matrix([[S(-2), S(2), S.Zero],
[S.Zero, S.NegativeOne, S.One],
[Rational(3, 2), Rational(3, 2), S(-3)]])
C1 = ContinuousMarkovChain('C', [0, 1, 2], T1)
assert C1.limiting_distribution() == ImmutableMatrix([[Rational(3, 19), Rational(12, 19), Rational(4, 19)]])
T2 = Matrix([[-S.One, S.One, S.Zero], [S.One, -S.One, S.Zero], [S.Zero, S.One, -S.One]])
C2 = ContinuousMarkovChain('C', [0, 1, 2], T2)
A, t = C2.generator_matrix, symbols('t', positive=True)
assert C2.transition_probabilities(A)(t) == Matrix([[S.Half + exp(-2*t)/2, S.Half - exp(-2*t)/2, 0],
[S.Half - exp(-2*t)/2, S.Half + exp(-2*t)/2, 0],
[S.Half - exp(-t) + exp(-2*t)/2, S.Half - exp(-2*t)/2, exp(-t)]])
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert P(Eq(C2(1), 1), Eq(C2(0), 1), evaluate=False) == Probability(Eq(C2(1), 1), Eq(C2(0), 1))
assert P(Eq(C2(1), 1), Eq(C2(0), 1)) == exp(-2)/2 + S.Half
assert P(Eq(C2(1), 0) & Eq(C2(2), 1) & Eq(C2(3), 1),
Eq(P(Eq(C2(1), 0)), S.Half)) == (Rational(1, 4) - exp(-2)/4)*(exp(-2)/2 + S.Half)
assert P(Not(Eq(C2(1), 0) & Eq(C2(2), 1) & Eq(C2(3), 2)) |
(Eq(C2(1), 0) & Eq(C2(2), 1) & Eq(C2(3), 2)),
Eq(P(Eq(C2(1), 0)), Rational(1, 4)) & Eq(P(Eq(C2(1), 1)), Rational(1, 4))) is S.One
assert E(C2(Rational(3, 2)), Eq(C2(0), 2)) == -exp(-3)/2 + 2*exp(Rational(-3, 2)) + S.Half
assert variance(C2(Rational(3, 2)), Eq(C2(0), 1)) == ((S.Half - exp(-3)/2)**2*(exp(-3)/2 + S.Half)
+ (Rational(-1, 2) - exp(-3)/2)**2*(S.Half - exp(-3)/2))
raises(KeyError, lambda: P(Eq(C2(1), 0), Eq(P(Eq(C2(1), 1)), S.Half)))
assert P(Eq(C2(1), 0), Eq(P(Eq(C2(5), 1)), S.Half)) == Probability(Eq(C2(1), 0))
TS1 = MatrixSymbol('G', 3, 3)
CS1 = ContinuousMarkovChain('C', [0, 1, 2], TS1)
A = CS1.generator_matrix
assert CS1.transition_probabilities(A)(t) == exp(t*A)
C3 = ContinuousMarkovChain('C', [Symbol('0'), Symbol('1'), Symbol('2')], T2)
assert P(Eq(C3(1), 1), Eq(C3(0), 1)) == exp(-2)/2 + S.Half
assert P(Eq(C3(1), Symbol('1')), Eq(C3(0), Symbol('1'))) == exp(-2)/2 + S.Half
#test probability queries
G = Matrix([[-S(1), Rational(1, 10), Rational(9, 10)], [Rational(2, 5), -S(1), Rational(3, 5)], [Rational(1, 2), Rational(1, 2), -S(1)]])
C = ContinuousMarkovChain('C', state_space=[0, 1, 2], gen_mat=G)
assert P(Eq(C(7.385), C(3.19)), Eq(C(0.862), 0)).round(5) == Float(0.35469, 5)
assert P(Gt(C(98.715), C(19.807)), Eq(C(11.314), 2)).round(5) == Float(0.32452, 5)
assert P(Le(C(5.9), C(10.112)), Eq(C(4), 1)).round(6) == Float(0.675214, 6)
assert Float(P(Eq(C(7.32), C(2.91)), Eq(C(2.63), 1)), 14) == Float(1 - P(Ne(C(7.32), C(2.91)), Eq(C(2.63), 1)), 14)
assert Float(P(Gt(C(3.36), C(1.101)), Eq(C(0.8), 2)), 14) == Float(1 - P(Le(C(3.36), C(1.101)), Eq(C(0.8), 2)), 14)
assert Float(P(Lt(C(4.9), C(2.79)), Eq(C(1.61), 0)), 14) == Float(1 - P(Ge(C(4.9), C(2.79)), Eq(C(1.61), 0)), 14)
assert P(Eq(C(5.243), C(10.912)), Eq(C(2.174), 1)) == P(Eq(C(10.912), C(5.243)), Eq(C(2.174), 1))
assert P(Gt(C(2.344), C(9.9)), Eq(C(1.102), 1)) == P(Lt(C(9.9), C(2.344)), Eq(C(1.102), 1))
assert P(Ge(C(7.87), C(1.008)), Eq(C(0.153), 1)) == P(Le(C(1.008), C(7.87)), Eq(C(0.153), 1))
#test symbolic queries
a, b, c, d = symbols('a b c d')
query = P(Eq(C(a), b), Eq(C(c), d))
assert query.subs({a:3.65, b:2, c:1.78, d:1}).evalf().round(10) == P(Eq(C(3.65), 2), Eq(C(1.78), 1)).round(10)
query_gt = P(Gt(C(a), b), Eq(C(c), d))
query_le = P(Le(C(a), b), Eq(C(c), d))
assert query_gt.subs({a:13.2, b:0, c:3.29, d:2}).evalf() + query_le.subs({a:13.2, b:0, c:3.29, d:2}).evalf() == 1
query_ge = P(Ge(C(a), b), Eq(C(c), d))
query_lt = P(Lt(C(a), b), Eq(C(c), d))
assert query_ge.subs({a:7.43, b:1, c:1.45, d:0}).evalf() + query_lt.subs({a:7.43, b:1, c:1.45, d:0}).evalf() == 1
#test issue 20078
assert (2*C(1) + 3*C(1)).simplify() == 5*C(1)
assert (2*C(1) - 3*C(1)).simplify() == -C(1)
assert (2*(0.25*C(1))).simplify() == 0.5*C(1)
assert (2*C(1) * 0.25*C(1)).simplify() == 0.5*C(1)**2
assert (C(1)**2 + C(1)**3).simplify() == (C(1) + 1)*C(1)**2
def test_BernoulliProcess():
B = BernoulliProcess("B", p=0.6, success=1, failure=0)
assert B.state_space == FiniteSet(0, 1)
assert B.index_set == S.Naturals0
assert B.success == 1
assert B.failure == 0
X = BernoulliProcess("X", p=Rational(1,3), success='H', failure='T')
assert X.state_space == FiniteSet('H', 'T')
H, T = symbols("H,T")
assert E(X[1]+X[2]*X[3]) == H**2/9 + 4*H*T/9 + H/3 + 4*T**2/9 + 2*T/3
t, x = symbols('t, x', positive=True, integer=True)
assert isinstance(B[t], RandomIndexedSymbol)
raises(ValueError, lambda: BernoulliProcess("X", p=1.1, success=1, failure=0))
raises(NotImplementedError, lambda: B(t))
raises(IndexError, lambda: B[-3])
assert B.joint_distribution(B[3], B[9]) == JointDistributionHandmade(Lambda((B[3], B[9]),
Piecewise((0.6, Eq(B[3], 1)), (0.4, Eq(B[3], 0)), (0, True))
*Piecewise((0.6, Eq(B[9], 1)), (0.4, Eq(B[9], 0)), (0, True))))
assert B.joint_distribution(2, B[4]) == JointDistributionHandmade(Lambda((B[2], B[4]),
Piecewise((0.6, Eq(B[2], 1)), (0.4, Eq(B[2], 0)), (0, True))
*Piecewise((0.6, Eq(B[4], 1)), (0.4, Eq(B[4], 0)), (0, True))))
# Test for the sum distribution of Bernoulli Process RVs
Y = B[1] + B[2] + B[3]
assert P(Eq(Y, 0)).round(2) == Float(0.06, 1)
assert P(Eq(Y, 2)).round(2) == Float(0.43, 2)
assert P(Eq(Y, 4)).round(2) == 0
assert P(Gt(Y, 1)).round(2) == Float(0.65, 2)
# Test for independency of each Random Indexed variable
assert P(Eq(B[1], 0) & Eq(B[2], 1) & Eq(B[3], 0) & Eq(B[4], 1)).round(2) == Float(0.06, 1)
assert E(2 * B[1] + B[2]).round(2) == Float(1.80, 3)
assert E(2 * B[1] + B[2] + 5).round(2) == Float(6.80, 3)
assert E(B[2] * B[4] + B[10]).round(2) == Float(0.96, 2)
assert E(B[2] > 0, Eq(B[1],1) & Eq(B[2],1)).round(2) == Float(0.60,2)
assert E(B[1]) == 0.6
assert P(B[1] > 0).round(2) == Float(0.60, 2)
assert P(B[1] < 1).round(2) == Float(0.40, 2)
assert P(B[1] > 0, B[2] <= 1).round(2) == Float(0.60, 2)
assert P(B[12] * B[5] > 0).round(2) == Float(0.36, 2)
assert P(B[12] * B[5] > 0, B[4] < 1).round(2) == Float(0.36, 2)
assert P(Eq(B[2], 1), B[2] > 0) == 1
assert P(Eq(B[5], 3)) == 0
assert P(Eq(B[1], 1), B[1] < 0) == 0
assert P(B[2] > 0, Eq(B[2], 1)) == 1
assert P(B[2] < 0, Eq(B[2], 1)) == 0
assert P(B[2] > 0, B[2]==7) == 0
assert P(B[5] > 0, B[5]) == BernoulliDistribution(0.6, 0, 1)
raises(ValueError, lambda: P(3))
raises(ValueError, lambda: P(B[3] > 0, 3))
# test issue 19456
expr = Sum(B[t], (t, 0, 4))
expr2 = Sum(B[t], (t, 1, 3))
expr3 = Sum(B[t]**2, (t, 1, 3))
assert expr.doit() == B[0] + B[1] + B[2] + B[3] + B[4]
assert expr2.doit() == Y
assert expr3.doit() == B[1]**2 + B[2]**2 + B[3]**2
assert B[2*t].free_symbols == {B[2*t], t}
assert B[4].free_symbols == {B[4]}
assert B[x*t].free_symbols == {B[x*t], x, t}
#test issue 20078
assert (2*B[t] + 3*B[t]).simplify() == 5*B[t]
assert (2*B[t] - 3*B[t]).simplify() == -B[t]
assert (2*(0.25*B[t])).simplify() == 0.5*B[t]
assert (2*B[t] * 0.25*B[t]).simplify() == 0.5*B[t]**2
assert (B[t]**2 + B[t]**3).simplify() == (B[t] + 1)*B[t]**2
def test_PoissonProcess():
X = PoissonProcess("X", 3)
assert X.state_space == S.Naturals0
assert X.index_set == Interval(0, oo)
assert X.lamda == 3
t, d, x, y = symbols('t d x y', positive=True)
assert isinstance(X(t), RandomIndexedSymbol)
assert X.distribution(t) == PoissonDistribution(3*t)
raises(ValueError, lambda: PoissonProcess("X", -1))
raises(NotImplementedError, lambda: X[t])
raises(IndexError, lambda: X(-5))
assert X.joint_distribution(X(2), X(3)) == JointDistributionHandmade(Lambda((X(2), X(3)),
6**X(2)*9**X(3)*exp(-15)/(factorial(X(2))*factorial(X(3)))))
assert X.joint_distribution(4, 6) == JointDistributionHandmade(Lambda((X(4), X(6)),
12**X(4)*18**X(6)*exp(-30)/(factorial(X(4))*factorial(X(6)))))
assert P(X(t) < 1) == exp(-3*t)
assert P(Eq(X(t), 0), Contains(t, Interval.Lopen(3, 5))) == exp(-6) # exp(-2*lamda)
res = P(Eq(X(t), 1), Contains(t, Interval.Lopen(3, 4)))
assert res == 3*exp(-3)
# Equivalent to P(Eq(X(t), 1))**4 because of non-overlapping intervals
assert P(Eq(X(t), 1) & Eq(X(d), 1) & Eq(X(x), 1) & Eq(X(y), 1), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Lopen(1, 2)) & Contains(x, Interval.Lopen(2, 3))
& Contains(y, Interval.Lopen(3, 4))) == res**4
# Return Probability because of overlapping intervals
assert P(Eq(X(t), 2) & Eq(X(d), 3), Contains(t, Interval.Lopen(0, 2))
& Contains(d, Interval.Ropen(2, 4))) == \
Probability(Eq(X(d), 3) & Eq(X(t), 2), Contains(t, Interval.Lopen(0, 2))
& Contains(d, Interval.Ropen(2, 4)))
raises(ValueError, lambda: P(Eq(X(t), 2) & Eq(X(d), 3),
Contains(t, Interval.Lopen(0, 4)) & Contains(d, Interval.Lopen(3, oo)))) # no bound on d
assert P(Eq(X(3), 2)) == 81*exp(-9)/2
assert P(Eq(X(t), 2), Contains(t, Interval.Lopen(0, 5))) == 225*exp(-15)/2
# Check that probability works correctly by adding it to 1
res1 = P(X(t) <= 3, Contains(t, Interval.Lopen(0, 5)))
res2 = P(X(t) > 3, Contains(t, Interval.Lopen(0, 5)))
assert res1 == 691*exp(-15)
assert (res1 + res2).simplify() == 1
# Check Not and Or
assert P(Not(Eq(X(t), 2) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) & \
Contains(d, Interval.Lopen(7, 8))).simplify() == -18*exp(-6) + 234*exp(-9) + 1
assert P(Eq(X(t), 2) | Ne(X(t), 4), Contains(t, Interval.Ropen(2, 4))) == 1 - 36*exp(-6)
raises(ValueError, lambda: P(X(t) > 2, X(t) + X(d)))
assert E(X(t)) == 3*t # property of the distribution at a given timestamp
assert E(X(t)**2 + X(d)*2 + X(y)**3, Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Lopen(1, 2)) & Contains(y, Interval.Ropen(3, 4))) == 75
assert E(X(t)**2, Contains(t, Interval.Lopen(0, 1))) == 12
assert E(x*(X(t) + X(d))*(X(t)**2+X(d)**2), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Ropen(1, 2))) == \
Expectation(x*(X(d) + X(t))*(X(d)**2 + X(t)**2), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Ropen(1, 2)))
# Value Error because of infinite time bound
raises(ValueError, lambda: E(X(t)**3, Contains(t, Interval.Lopen(1, oo))))
# Equivalent to E(X(t)**2) - E(X(d)**2) == E(X(1)**2) - E(X(1)**2) == 0
assert E((X(t) + X(d))*(X(t) - X(d)), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Lopen(1, 2))) == 0
assert E(X(2) + x*E(X(5))) == 15*x + 6
assert E(x*X(1) + y) == 3*x + y
assert P(Eq(X(1), 2) & Eq(X(t), 3), Contains(t, Interval.Lopen(1, 2))) == 81*exp(-6)/4
Y = PoissonProcess("Y", 6)
Z = X + Y
assert Z.lamda == X.lamda + Y.lamda == 9
raises(ValueError, lambda: X + 5) # should be added be only PoissonProcess instance
N, M = Z.split(4, 5)
assert N.lamda == 4
assert M.lamda == 5
raises(ValueError, lambda: Z.split(3, 2)) # 2+3 != 9
raises(ValueError, lambda :P(Eq(X(t), 0), Contains(t, Interval.Lopen(1, 3)) & Eq(X(1), 0)))
# check if it handles queries with two random variables in one args
res1 = P(Eq(N(3), N(5)))
assert res1 == P(Eq(N(t), 0), Contains(t, Interval(3, 5)))
res2 = P(N(3) > N(1))
assert res2 == P((N(t) > 0), Contains(t, Interval(1, 3)))
assert P(N(3) < N(1)) == 0 # condition is not possible
res3 = P(N(3) <= N(1)) # holds only for Eq(N(3), N(1))
assert res3 == P(Eq(N(t), 0), Contains(t, Interval(1, 3)))
# tests from https://www.probabilitycourse.com/chapter11/11_1_2_basic_concepts_of_the_poisson_process.php
X = PoissonProcess('X', 10) # 11.1
assert P(Eq(X(S(1)/3), 3) & Eq(X(1), 10)) == exp(-10)*Rational(8000000000, 11160261)
assert P(Eq(X(1), 1), Eq(X(S(1)/3), 3)) == 0
assert P(Eq(X(1), 10), Eq(X(S(1)/3), 3)) == P(Eq(X(S(2)/3), 7))
X = PoissonProcess('X', 2) # 11.2
assert P(X(S(1)/2) < 1) == exp(-1)
assert P(X(3) < 1, Eq(X(1), 0)) == exp(-4)
assert P(Eq(X(4), 3), Eq(X(2), 3)) == exp(-4)
X = PoissonProcess('X', 3)
assert P(Eq(X(2), 5) & Eq(X(1), 2)) == Rational(81, 4)*exp(-6)
# check few properties
assert P(X(2) <= 3, X(1)>=1) == 3*P(Eq(X(1), 0)) + 2*P(Eq(X(1), 1)) + P(Eq(X(1), 2))
assert P(X(2) <= 3, X(1) > 1) == 2*P(Eq(X(1), 0)) + 1*P(Eq(X(1), 1))
assert P(Eq(X(2), 5) & Eq(X(1), 2)) == P(Eq(X(1), 3))*P(Eq(X(1), 2))
assert P(Eq(X(3), 4), Eq(X(1), 3)) == P(Eq(X(2), 1))
#test issue 20078
assert (2*X(t) + 3*X(t)).simplify() == 5*X(t)
assert (2*X(t) - 3*X(t)).simplify() == -X(t)
assert (2*(0.25*X(t))).simplify() == 0.5*X(t)
assert (2*X(t) * 0.25*X(t)).simplify() == 0.5*X(t)**2
assert (X(t)**2 + X(t)**3).simplify() == (X(t) + 1)*X(t)**2
def test_WienerProcess():
X = WienerProcess("X")
assert X.state_space == S.Reals
assert X.index_set == Interval(0, oo)
t, d, x, y = symbols('t d x y', positive=True)
assert isinstance(X(t), RandomIndexedSymbol)
assert X.distribution(t) == NormalDistribution(0, sqrt(t))
raises(ValueError, lambda: PoissonProcess("X", -1))
raises(NotImplementedError, lambda: X[t])
raises(IndexError, lambda: X(-2))
assert X.joint_distribution(X(2), X(3)) == JointDistributionHandmade(
Lambda((X(2), X(3)), sqrt(6)*exp(-X(2)**2/4)*exp(-X(3)**2/6)/(12*pi)))
assert X.joint_distribution(4, 6) == JointDistributionHandmade(
Lambda((X(4), X(6)), sqrt(6)*exp(-X(4)**2/8)*exp(-X(6)**2/12)/(24*pi)))
assert P(X(t) < 3).simplify() == erf(3*sqrt(2)/(2*sqrt(t)))/2 + S(1)/2
assert P(X(t) > 2, Contains(t, Interval.Lopen(3, 7))).simplify() == S(1)/2 -\
erf(sqrt(2)/2)/2
# Equivalent to P(X(1)>1)**4
assert P((X(t) > 4) & (X(d) > 3) & (X(x) > 2) & (X(y) > 1),
Contains(t, Interval.Lopen(0, 1)) & Contains(d, Interval.Lopen(1, 2))
& Contains(x, Interval.Lopen(2, 3)) & Contains(y, Interval.Lopen(3, 4))).simplify() ==\
(1 - erf(sqrt(2)/2))*(1 - erf(sqrt(2)))*(1 - erf(3*sqrt(2)/2))*(1 - erf(2*sqrt(2)))/16
# Contains an overlapping interval so, return Probability
assert P((X(t)< 2) & (X(d)> 3), Contains(t, Interval.Lopen(0, 2))
& Contains(d, Interval.Ropen(2, 4))) == Probability((X(d) > 3) & (X(t) < 2),
Contains(d, Interval.Ropen(2, 4)) & Contains(t, Interval.Lopen(0, 2)))
assert str(P(Not((X(t) < 5) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) &
Contains(d, Interval.Lopen(7, 8))).simplify()) == \
'-(1 - erf(3*sqrt(2)/2))*(2 - erfc(5/2))/4 + 1'
# Distribution has mean 0 at each timestamp
assert E(X(t)) == 0
assert E(x*(X(t) + X(d))*(X(t)**2+X(d)**2), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Ropen(1, 2))) == Expectation(x*(X(d) + X(t))*(X(d)**2 + X(t)**2),
Contains(d, Interval.Ropen(1, 2)) & Contains(t, Interval.Lopen(0, 1)))
assert E(X(t) + x*E(X(3))) == 0
#test issue 20078
assert (2*X(t) + 3*X(t)).simplify() == 5*X(t)
assert (2*X(t) - 3*X(t)).simplify() == -X(t)
assert (2*(0.25*X(t))).simplify() == 0.5*X(t)
assert (2*X(t) * 0.25*X(t)).simplify() == 0.5*X(t)**2
assert (X(t)**2 + X(t)**3).simplify() == (X(t) + 1)*X(t)**2
def test_GammaProcess_symbolic():
t, d, x, y, g, l = symbols('t d x y g l', positive=True)
X = GammaProcess("X", l, g)
raises(NotImplementedError, lambda: X[t])
raises(IndexError, lambda: X(-1))
assert isinstance(X(t), RandomIndexedSymbol)
assert X.state_space == Interval(0, oo)
assert X.distribution(t) == GammaDistribution(g*t, 1/l)
assert X.joint_distribution(5, X(3)) == JointDistributionHandmade(Lambda(
(X(5), X(3)), l**(8*g)*exp(-l*X(3))*exp(-l*X(5))*X(3)**(3*g - 1)*X(5)**(5*g
- 1)/(gamma(3*g)*gamma(5*g))))
# property of the gamma process at any given timestamp
assert E(X(t)) == g*t/l
assert variance(X(t)).simplify() == g*t/l**2
# Equivalent to E(2*X(1)) + E(X(1)**2) + E(X(1)**3), where E(X(1)) == g/l
assert E(X(t)**2 + X(d)*2 + X(y)**3, Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Lopen(1, 2)) & Contains(y, Interval.Ropen(3, 4))) == \
2*g/l + (g**2 + g)/l**2 + (g**3 + 3*g**2 + 2*g)/l**3
assert P(X(t) > 3, Contains(t, Interval.Lopen(3, 4))).simplify() == \
1 - lowergamma(g, 3*l)/gamma(g) # equivalent to P(X(1)>3)
#test issue 20078
assert (2*X(t) + 3*X(t)).simplify() == 5*X(t)
assert (2*X(t) - 3*X(t)).simplify() == -X(t)
assert (2*(0.25*X(t))).simplify() == 0.5*X(t)
assert (2*X(t) * 0.25*X(t)).simplify() == 0.5*X(t)**2
assert (X(t)**2 + X(t)**3).simplify() == (X(t) + 1)*X(t)**2
def test_GammaProcess_numeric():
t, d, x, y = symbols('t d x y', positive=True)
X = GammaProcess("X", 1, 2)
assert X.state_space == Interval(0, oo)
assert X.index_set == Interval(0, oo)
assert X.lamda == 1
assert X.gamma == 2
raises(ValueError, lambda: GammaProcess("X", -1, 2))
raises(ValueError, lambda: GammaProcess("X", 0, -2))
raises(ValueError, lambda: GammaProcess("X", -1, -2))
# all are independent because of non-overlapping intervals
assert P((X(t) > 4) & (X(d) > 3) & (X(x) > 2) & (X(y) > 1), Contains(t,
Interval.Lopen(0, 1)) & Contains(d, Interval.Lopen(1, 2)) & Contains(x,
Interval.Lopen(2, 3)) & Contains(y, Interval.Lopen(3, 4))).simplify() == \
120*exp(-10)
# Check working with Not and Or
assert P(Not((X(t) < 5) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) &
Contains(d, Interval.Lopen(7, 8))).simplify() == -4*exp(-3) + 472*exp(-8)/3 + 1
assert P((X(t) > 2) | (X(t) < 4), Contains(t, Interval.Ropen(1, 4))).simplify() == \
-643*exp(-4)/15 + 109*exp(-2)/15 + 1
assert E(X(t)) == 2*t # E(X(t)) == gamma*t/l
assert E(X(2) + x*E(X(5))) == 10*x + 4
|
186dfdf00112312a5c79a4d3d812285c1ce40406a502229d0f01a177ca26a399 | from sympy.concrete.products import Product
from sympy.core.numbers import pi
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, symbols)
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.special.gamma_functions import gamma
from sympy.matrices import Determinant, Matrix, Trace, MatrixSymbol, MatrixSet
from sympy.stats import density, sample
from sympy.stats.matrix_distributions import (MatrixGammaDistribution,
MatrixGamma, MatrixPSpace, Wishart, MatrixNormal, MatrixStudentT)
from sympy.testing.pytest import raises, skip
from sympy.external import import_module
def test_MatrixPSpace():
M = MatrixGammaDistribution(1, 2, [[2, 1], [1, 2]])
MP = MatrixPSpace('M', M, 2, 2)
assert MP.distribution == M
raises(ValueError, lambda: MatrixPSpace('M', M, 1.2, 2))
def test_MatrixGamma():
M = MatrixGamma('M', 1, 2, [[1, 0], [0, 1]])
assert M.pspace.distribution.set == MatrixSet(2, 2, S.Reals)
assert isinstance(density(M), MatrixGammaDistribution)
X = MatrixSymbol('X', 2, 2)
num = exp(Trace(Matrix([[-S(1)/2, 0], [0, -S(1)/2]])*X))
assert density(M)(X).doit() == num/(4*pi*sqrt(Determinant(X)))
assert density(M)([[2, 1], [1, 2]]).doit() == sqrt(3)*exp(-2)/(12*pi)
X = MatrixSymbol('X', 1, 2)
Y = MatrixSymbol('Y', 1, 2)
assert density(M)([X, Y]).doit() == exp(-X[0, 0]/2 - Y[0, 1]/2)/(4*pi*sqrt(
X[0, 0]*Y[0, 1] - X[0, 1]*Y[0, 0]))
# symbolic
a, b = symbols('a b', positive=True)
d = symbols('d', positive=True, integer=True)
Y = MatrixSymbol('Y', d, d)
Z = MatrixSymbol('Z', 2, 2)
SM = MatrixSymbol('SM', d, d)
M2 = MatrixGamma('M2', a, b, SM)
M3 = MatrixGamma('M3', 2, 3, [[2, 1], [1, 2]])
k = Dummy('k')
exprd = pi**(-d*(d - 1)/4)*b**(-a*d)*exp(Trace((-1/b)*SM**(-1)*Y)
)*Determinant(SM)**(-a)*Determinant(Y)**(a - d/2 - S(1)/2)/Product(
gamma(-k/2 + a + S(1)/2), (k, 1, d))
assert density(M2)(Y).dummy_eq(exprd)
raises(NotImplementedError, lambda: density(M3 + M)(Z))
raises(ValueError, lambda: density(M)(1))
raises(ValueError, lambda: MatrixGamma('M', -1, 2, [[1, 0], [0, 1]]))
raises(ValueError, lambda: MatrixGamma('M', -1, -2, [[1, 0], [0, 1]]))
raises(ValueError, lambda: MatrixGamma('M', -1, 2, [[1, 0], [2, 1]]))
raises(ValueError, lambda: MatrixGamma('M', -1, 2, [[1, 0], [0]]))
def test_Wishart():
W = Wishart('W', 5, [[1, 0], [0, 1]])
assert W.pspace.distribution.set == MatrixSet(2, 2, S.Reals)
X = MatrixSymbol('X', 2, 2)
term1 = exp(Trace(Matrix([[-S(1)/2, 0], [0, -S(1)/2]])*X))
assert density(W)(X).doit() == term1 * Determinant(X)/(24*pi)
assert density(W)([[2, 1], [1, 2]]).doit() == exp(-2)/(8*pi)
n = symbols('n', positive=True)
d = symbols('d', positive=True, integer=True)
Y = MatrixSymbol('Y', d, d)
SM = MatrixSymbol('SM', d, d)
W = Wishart('W', n, SM)
k = Dummy('k')
exprd = 2**(-d*n/2)*pi**(-d*(d - 1)/4)*exp(Trace(-(S(1)/2)*SM**(-1)*Y)
)*Determinant(SM)**(-n/2)*Determinant(Y)**(
-d/2 + n/2 - S(1)/2)/Product(gamma(-k/2 + n/2 + S(1)/2), (k, 1, d))
assert density(W)(Y).dummy_eq(exprd)
raises(ValueError, lambda: density(W)(1))
raises(ValueError, lambda: Wishart('W', -1, [[1, 0], [0, 1]]))
raises(ValueError, lambda: Wishart('W', -1, [[1, 0], [2, 1]]))
raises(ValueError, lambda: Wishart('W', 2, [[1, 0], [0]]))
def test_MatrixNormal():
M = MatrixNormal('M', [[5, 6]], [4], [[2, 1], [1, 2]])
assert M.pspace.distribution.set == MatrixSet(1, 2, S.Reals)
X = MatrixSymbol('X', 1, 2)
term1 = exp(-Trace(Matrix([[ S(2)/3, -S(1)/3], [-S(1)/3, S(2)/3]])*(
Matrix([[-5], [-6]]) + X.T)*Matrix([[1/4]])*(Matrix([[-5, -6]]) + X))/2)
assert density(M)(X).doit() == term1/(24*pi)
assert density(M)([[7, 8]]).doit() == exp(-S(1)/3)/(24*pi)
d, n = symbols('d n', positive=True, integer=True)
SM2 = MatrixSymbol('SM2', d, d)
SM1 = MatrixSymbol('SM1', n, n)
LM = MatrixSymbol('LM', n, d)
Y = MatrixSymbol('Y', n, d)
M = MatrixNormal('M', LM, SM1, SM2)
exprd = 4*(2*pi)**(-d*n/2)*exp(-Trace(SM2**(-1)*(-LM.T + Y.T)*SM1**(-1)*(-LM + Y)
)/2)*Determinant(SM1)**(-d)*Determinant(SM2)**(-n)
assert density(M)(Y).doit() == exprd
raises(ValueError, lambda: density(M)(1))
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [0, 1]], [[1, 0], [2, 1]]))
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [2, 1]], [[1, 0], [0, 1]]))
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [0, 1]], [[1, 0], [0, 1]]))
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [2]], [[1, 0], [0, 1]]))
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [2, 1]], [[1, 0], [0]]))
raises(ValueError, lambda: MatrixNormal('M', [[1, 2]], [[1, 0], [0, 1]], [[1, 0]]))
raises(ValueError, lambda: MatrixNormal('M', [[1, 2]], [1], [[1, 0]]))
def test_MatrixStudentT():
M = MatrixStudentT('M', 2, [[5, 6]], [[2, 1], [1, 2]], [4])
assert M.pspace.distribution.set == MatrixSet(1, 2, S.Reals)
X = MatrixSymbol('X', 1, 2)
D = pi ** (-1.0) * Determinant(Matrix([[4]])) ** (-1.0) * Determinant(Matrix([[2, 1], [1, 2]])) \
** (-0.5) / Determinant(Matrix([[S(1) / 4]]) * (Matrix([[-5, -6]]) + X)
* Matrix([[S(2) / 3, -S(1) / 3], [-S(1) / 3, S(2) / 3]]) * (
Matrix([[-5], [-6]]) + X.T) + Matrix([[1]])) ** 2
assert density(M)(X) == D
v = symbols('v', positive=True)
n, p = 1, 2
Omega = MatrixSymbol('Omega', p, p)
Sigma = MatrixSymbol('Sigma', n, n)
Location = MatrixSymbol('Location', n, p)
Y = MatrixSymbol('Y', n, p)
M = MatrixStudentT('M', v, Location, Omega, Sigma)
exprd = gamma(v/2 + 1)*Determinant(Matrix([[1]]) + Sigma**(-1)*(-Location + Y)*Omega**(-1)*(-Location.T + Y.T))**(-v/2 - 1) / \
(pi*gamma(v/2)*sqrt(Determinant(Omega))*Determinant(Sigma))
assert density(M)(Y) == exprd
raises(ValueError, lambda: density(M)(1))
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [0, 1]], [[1, 0], [2, 1]]))
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [2, 1]], [[1, 0], [0, 1]]))
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [0, 1]], [[1, 0], [0, 1]]))
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [2]], [[1, 0], [0, 1]]))
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [2, 1]], [[1], [2]]))
raises(ValueError, lambda: MatrixStudentT('M', 1, [[1, 2]], [[1, 0], [0, 1]], [[1, 0]]))
raises(ValueError, lambda: MatrixStudentT('M', 1, [[1, 2]], [1], [[1, 0]]))
raises(ValueError, lambda: MatrixStudentT('M', -1, [1, 2], [[1, 0], [0, 1]], [4]))
def test_sample_scipy():
distribs_scipy = [
MatrixNormal('M', [[5, 6]], [4], [[2, 1], [1, 2]]),
Wishart('W', 5, [[1, 0], [0, 1]])
]
size = 5
scipy = import_module('scipy')
if not scipy:
skip('Scipy not installed. Abort tests for _sample_scipy.')
else:
for X in distribs_scipy:
samps = sample(X, size=size)
for sam in samps:
assert Matrix(sam) in X.pspace.distribution.set
M = MatrixGamma('M', 1, 2, [[1, 0], [0, 1]])
raises(NotImplementedError, lambda: sample(M, size=3))
def test_sample_pymc3():
distribs_pymc3 = [
MatrixNormal('M', [[5, 6], [3, 4]], [[1, 0], [0, 1]], [[2, 1], [1, 2]]),
Wishart('W', 7, [[2, 1], [1, 2]])
]
size = 3
pymc3 = import_module('pymc3')
if not pymc3:
skip('PyMC3 is not installed. Abort tests for _sample_pymc3.')
else:
for X in distribs_pymc3:
samps = sample(X, size=size, library='pymc3')
for sam in samps:
assert Matrix(sam) in X.pspace.distribution.set
M = MatrixGamma('M', 1, 2, [[1, 0], [0, 1]])
raises(NotImplementedError, lambda: sample(M, size=3))
def test_sample_seed():
X = MatrixNormal('M', [[5, 6], [3, 4]], [[1, 0], [0, 1]], [[2, 1], [1, 2]])
libraries = ['scipy', 'numpy', 'pymc3']
for lib in libraries:
try:
imported_lib = import_module(lib)
if imported_lib:
s0, s1, s2 = [], [], []
s0 = sample(X, size=10, library=lib, seed=0)
s1 = sample(X, size=10, library=lib, seed=0)
s2 = sample(X, size=10, library=lib, seed=1)
for i in range(10):
assert (s0[i] == s1[i]).all()
assert (s1[i] != s2[i]).all()
except NotImplementedError:
continue
|
a181e46fad72d9ab4984651698b3e93eb3e7525dd15d5be7537038fa7aafa6d6 | from sympy.concrete.summations import Sum
from sympy.core.numbers import (I, Rational, oo, pi)
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol)
from sympy.functions.elementary.complexes import (im, re)
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.bessel import besseli
from sympy.functions.special.beta_functions import beta
from sympy.functions.special.zeta_functions import zeta
from sympy.sets.sets import FiniteSet
from sympy.simplify.simplify import simplify
from sympy.utilities.lambdify import lambdify
from sympy.core.relational import Eq, Ne
from sympy.functions.elementary.exponential import exp
from sympy.logic.boolalg import Or
from sympy.sets.fancysets import Range
from sympy.stats import (P, E, variance, density, characteristic_function,
where, moment_generating_function, skewness, cdf,
kurtosis, coskewness)
from sympy.stats.drv_types import (PoissonDistribution, GeometricDistribution,
FlorySchulz, Poisson, Geometric, Hermite, Logarithmic,
NegativeBinomial, Skellam, YuleSimon, Zeta,
DiscreteRV)
from sympy.testing.pytest import slow, nocache_fail, raises, ignore_warnings
from sympy.stats.symbolic_probability import Expectation
x = Symbol('x')
def test_PoissonDistribution():
l = 3
p = PoissonDistribution(l)
assert abs(p.cdf(10).evalf() - 1) < .001
assert abs(p.cdf(10.4).evalf() - 1) < .001
assert p.expectation(x, x) == l
assert p.expectation(x**2, x) - p.expectation(x, x)**2 == l
def test_Poisson():
l = 3
x = Poisson('x', l)
assert E(x) == l
assert variance(x) == l
assert density(x) == PoissonDistribution(l)
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert isinstance(E(x, evaluate=False), Expectation)
assert isinstance(E(2*x, evaluate=False), Expectation)
# issue 8248
assert x.pspace.compute_expectation(1) == 1
def test_FlorySchulz():
a = Symbol("a")
z = Symbol("z")
x = FlorySchulz('x', a)
assert E(x) == (2 - a)/a
assert (variance(x) - 2*(1 - a)/a**2).simplify() == S(0)
assert density(x)(z) == a**2*z*(1 - a)**(z - 1)
@slow
def test_GeometricDistribution():
p = S.One / 5
d = GeometricDistribution(p)
assert d.expectation(x, x) == 1/p
assert d.expectation(x**2, x) - d.expectation(x, x)**2 == (1-p)/p**2
assert abs(d.cdf(20000).evalf() - 1) < .001
assert abs(d.cdf(20000.8).evalf() - 1) < .001
G = Geometric('G', p=S(1)/4)
assert cdf(G)(S(7)/2) == P(G <= S(7)/2)
X = Geometric('X', Rational(1, 5))
Y = Geometric('Y', Rational(3, 10))
assert coskewness(X, X + Y, X + 2*Y).simplify() == sqrt(230)*Rational(81, 1150)
def test_Hermite():
a1 = Symbol("a1", positive=True)
a2 = Symbol("a2", negative=True)
raises(ValueError, lambda: Hermite("H", a1, a2))
a1 = Symbol("a1", negative=True)
a2 = Symbol("a2", positive=True)
raises(ValueError, lambda: Hermite("H", a1, a2))
a1 = Symbol("a1", positive=True)
x = Symbol("x")
H = Hermite("H", a1, a2)
assert moment_generating_function(H)(x) == exp(a1*(exp(x) - 1)
+ a2*(exp(2*x) - 1))
assert characteristic_function(H)(x) == exp(a1*(exp(I*x) - 1)
+ a2*(exp(2*I*x) - 1))
assert E(H) == a1 + 2*a2
H = Hermite("H", a1=5, a2=4)
assert density(H)(2) == 33*exp(-9)/2
assert E(H) == 13
assert variance(H) == 21
assert kurtosis(H) == Rational(464,147)
assert skewness(H) == 37*sqrt(21)/441
def test_Logarithmic():
p = S.Half
x = Logarithmic('x', p)
assert E(x) == -p / ((1 - p) * log(1 - p))
assert variance(x) == -1/log(2)**2 + 2/log(2)
assert E(2*x**2 + 3*x + 4) == 4 + 7 / log(2)
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert isinstance(E(x, evaluate=False), Expectation)
@nocache_fail
def test_negative_binomial():
r = 5
p = S.One / 3
x = NegativeBinomial('x', r, p)
assert E(x) == p*r / (1-p)
# This hangs when run with the cache disabled:
assert variance(x) == p*r / (1-p)**2
assert E(x**5 + 2*x + 3) == Rational(9207, 4)
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert isinstance(E(x, evaluate=False), Expectation)
def test_skellam():
mu1 = Symbol('mu1')
mu2 = Symbol('mu2')
z = Symbol('z')
X = Skellam('x', mu1, mu2)
assert density(X)(z) == (mu1/mu2)**(z/2) * \
exp(-mu1 - mu2)*besseli(z, 2*sqrt(mu1*mu2))
assert skewness(X).expand() == mu1/(mu1*sqrt(mu1 + mu2) + mu2 *
sqrt(mu1 + mu2)) - mu2/(mu1*sqrt(mu1 + mu2) + mu2*sqrt(mu1 + mu2))
assert variance(X).expand() == mu1 + mu2
assert E(X) == mu1 - mu2
assert characteristic_function(X)(z) == exp(
mu1*exp(I*z) - mu1 - mu2 + mu2*exp(-I*z))
assert moment_generating_function(X)(z) == exp(
mu1*exp(z) - mu1 - mu2 + mu2*exp(-z))
def test_yule_simon():
from sympy.core.singleton import S
rho = S(3)
x = YuleSimon('x', rho)
assert simplify(E(x)) == rho / (rho - 1)
assert simplify(variance(x)) == rho**2 / ((rho - 1)**2 * (rho - 2))
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert isinstance(E(x, evaluate=False), Expectation)
# To test the cdf function
assert cdf(x)(x) == Piecewise((-beta(floor(x), 4)*floor(x) + 1, x >= 1), (0, True))
def test_zeta():
s = S(5)
x = Zeta('x', s)
assert E(x) == zeta(s-1) / zeta(s)
assert simplify(variance(x)) == (
zeta(s) * zeta(s-2) - zeta(s-1)**2) / zeta(s)**2
def test_discrete_probability():
X = Geometric('X', Rational(1, 5))
Y = Poisson('Y', 4)
G = Geometric('e', x)
assert P(Eq(X, 3)) == Rational(16, 125)
assert P(X < 3) == Rational(9, 25)
assert P(X > 3) == Rational(64, 125)
assert P(X >= 3) == Rational(16, 25)
assert P(X <= 3) == Rational(61, 125)
assert P(Ne(X, 3)) == Rational(109, 125)
assert P(Eq(Y, 3)) == 32*exp(-4)/3
assert P(Y < 3) == 13*exp(-4)
assert P(Y > 3).equals(32*(Rational(-71, 32) + 3*exp(4)/32)*exp(-4)/3)
assert P(Y >= 3).equals(32*(Rational(-39, 32) + 3*exp(4)/32)*exp(-4)/3)
assert P(Y <= 3) == 71*exp(-4)/3
assert P(Ne(Y, 3)).equals(
13*exp(-4) + 32*(Rational(-71, 32) + 3*exp(4)/32)*exp(-4)/3)
assert P(X < S.Infinity) is S.One
assert P(X > S.Infinity) is S.Zero
assert P(G < 3) == x*(2-x)
assert P(Eq(G, 3)) == x*(-x + 1)**2
def test_DiscreteRV():
p = S(1)/2
x = Symbol('x', integer=True, positive=True)
pdf = p*(1 - p)**(x - 1) # pdf of Geometric Distribution
D = DiscreteRV(x, pdf, set=S.Naturals, check=True)
assert E(D) == E(Geometric('G', S(1)/2)) == 2
assert P(D > 3) == S(1)/8
assert D.pspace.domain.set == S.Naturals
raises(ValueError, lambda: DiscreteRV(x, x, FiniteSet(*range(4)), check=True))
# purposeful invalid pmf but it should not raise since check=False
# see test_drv_types.test_ContinuousRV for explanation
X = DiscreteRV(x, 1/x, S.Naturals)
assert P(X < 2) == 1
assert E(X) == oo
def test_precomputed_characteristic_functions():
import mpmath
def test_cf(dist, support_lower_limit, support_upper_limit):
pdf = density(dist)
t = S('t')
x = S('x')
# first function is the hardcoded CF of the distribution
cf1 = lambdify([t], characteristic_function(dist)(t), 'mpmath')
# second function is the Fourier transform of the density function
f = lambdify([x, t], pdf(x)*exp(I*x*t), 'mpmath')
cf2 = lambda t: mpmath.nsum(lambda x: f(x, t), [
support_lower_limit, support_upper_limit], maxdegree=10)
# compare the two functions at various points
for test_point in [2, 5, 8, 11]:
n1 = cf1(test_point)
n2 = cf2(test_point)
assert abs(re(n1) - re(n2)) < 1e-12
assert abs(im(n1) - im(n2)) < 1e-12
test_cf(Geometric('g', Rational(1, 3)), 1, mpmath.inf)
test_cf(Logarithmic('l', Rational(1, 5)), 1, mpmath.inf)
test_cf(NegativeBinomial('n', 5, Rational(1, 7)), 0, mpmath.inf)
test_cf(Poisson('p', 5), 0, mpmath.inf)
test_cf(YuleSimon('y', 5), 1, mpmath.inf)
test_cf(Zeta('z', 5), 1, mpmath.inf)
def test_moment_generating_functions():
t = S('t')
geometric_mgf = moment_generating_function(Geometric('g', S.Half))(t)
assert geometric_mgf.diff(t).subs(t, 0) == 2
logarithmic_mgf = moment_generating_function(Logarithmic('l', S.Half))(t)
assert logarithmic_mgf.diff(t).subs(t, 0) == 1/log(2)
negative_binomial_mgf = moment_generating_function(
NegativeBinomial('n', 5, Rational(1, 3)))(t)
assert negative_binomial_mgf.diff(t).subs(t, 0) == Rational(5, 2)
poisson_mgf = moment_generating_function(Poisson('p', 5))(t)
assert poisson_mgf.diff(t).subs(t, 0) == 5
skellam_mgf = moment_generating_function(Skellam('s', 1, 1))(t)
assert skellam_mgf.diff(t).subs(
t, 2) == (-exp(-2) + exp(2))*exp(-2 + exp(-2) + exp(2))
yule_simon_mgf = moment_generating_function(YuleSimon('y', 3))(t)
assert simplify(yule_simon_mgf.diff(t).subs(t, 0)) == Rational(3, 2)
zeta_mgf = moment_generating_function(Zeta('z', 5))(t)
assert zeta_mgf.diff(t).subs(t, 0) == pi**4/(90*zeta(5))
def test_Or():
X = Geometric('X', S.Half)
P(Or(X < 3, X > 4)) == Rational(13, 16)
P(Or(X > 2, X > 1)) == P(X > 1)
P(Or(X >= 3, X < 3)) == 1
def test_where():
X = Geometric('X', Rational(1, 5))
Y = Poisson('Y', 4)
assert where(X**2 > 4).set == Range(3, S.Infinity, 1)
assert where(X**2 >= 4).set == Range(2, S.Infinity, 1)
assert where(Y**2 < 9).set == Range(0, 3, 1)
assert where(Y**2 <= 9).set == Range(0, 4, 1)
def test_conditional():
X = Geometric('X', Rational(2, 3))
Y = Poisson('Y', 3)
assert P(X > 2, X > 3) == 1
assert P(X > 3, X > 2) == Rational(1, 3)
assert P(Y > 2, Y < 2) == 0
assert P(Eq(Y, 3), Y >= 0) == 9*exp(-3)/2
assert P(Eq(Y, 3), Eq(Y, 2)) == 0
assert P(X < 2, Eq(X, 2)) == 0
assert P(X > 2, Eq(X, 3)) == 1
def test_product_spaces():
X1 = Geometric('X1', S.Half)
X2 = Geometric('X2', Rational(1, 3))
#assert str(P(X1 + X2 < 3, evaluate=False)) == """Sum(Piecewise((2**(X2 - n - 2)*(2/3)**(X2 - 1)/6, """\
# + """(-X2 + n + 3 >= 1) & (-X2 + n + 3 < oo)), (0, True)), (X2, 1, oo), (n, -oo, -1))"""
n = Dummy('n')
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert P(X1 + X2 < 3, evaluate=False).rewrite(Sum).dummy_eq(Sum(Piecewise((2**(-n)/4,
n + 2 >= 1), (0, True)), (n, -oo, -1))/3)
#assert str(P(X1 + X2 > 3)) == """Sum(Piecewise((2**(X2 - n - 2)*(2/3)**(X2 - 1)/6, """ +\
# """(-X2 + n + 3 >= 1) & (-X2 + n + 3 < oo)), (0, True)), (X2, 1, oo), (n, 1, oo))"""
assert P(X1 + X2 > 3).dummy_eq(Sum(Piecewise((2**(X2 - n - 2)*(Rational(2, 3))**(X2 - 1)/6,
-X2 + n + 3 >= 1), (0, True)),
(X2, 1, oo), (n, 1, oo)))
# assert str(P(Eq(X1 + X2, 3))) == """Sum(Piecewise((2**(X2 - 2)*(2/3)**(X2 - 1)/6, """ +\
# """X2 <= 2), (0, True)), (X2, 1, oo))"""
assert P(Eq(X1 + X2, 3)) == Rational(1, 12)
|
fa96e716bde186f4c3059cef3fa25d193d5d926c129847bba0416e6e289ac34c | from sympy.concrete.products import Product
from sympy.core.function import Lambda
from sympy.core.numbers import (I, Rational, pi)
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.integrals.integrals import Integral
from sympy.matrices.dense import Matrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.expressions.trace import Trace
from sympy.tensor.indexed import IndexedBase
from sympy.stats import (GaussianUnitaryEnsemble as GUE, density,
GaussianOrthogonalEnsemble as GOE,
GaussianSymplecticEnsemble as GSE,
joint_eigen_distribution,
CircularUnitaryEnsemble as CUE,
CircularOrthogonalEnsemble as COE,
CircularSymplecticEnsemble as CSE,
JointEigenDistribution,
level_spacing_distribution,
Normal, Beta)
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.rv import RandomMatrixSymbol
from sympy.stats.random_matrix_models import GaussianEnsemble, RandomMatrixPSpace
from sympy.testing.pytest import raises
def test_GaussianEnsemble():
G = GaussianEnsemble('G', 3)
assert density(G) == G.pspace.model
raises(ValueError, lambda: GaussianEnsemble('G', 3.5))
def test_GaussianUnitaryEnsemble():
H = RandomMatrixSymbol('H', 3, 3)
G = GUE('U', 3)
assert density(G)(H) == sqrt(2)*exp(-3*Trace(H**2)/2)/(4*pi**Rational(9, 2))
i, j = (Dummy('i', integer=True, positive=True),
Dummy('j', integer=True, positive=True))
l = IndexedBase('l')
assert joint_eigen_distribution(G).dummy_eq(
Lambda((l[1], l[2], l[3]),
27*sqrt(6)*exp(-3*(l[1]**2)/2 - 3*(l[2]**2)/2 - 3*(l[3]**2)/2)*
Product(Abs(l[i] - l[j])**2, (j, i + 1, 3), (i, 1, 2))/(16*pi**Rational(3, 2))))
s = Dummy('s')
assert level_spacing_distribution(G).dummy_eq(Lambda(s, 32*s**2*exp(-4*s**2/pi)/pi**2))
def test_GaussianOrthogonalEnsemble():
H = RandomMatrixSymbol('H', 3, 3)
_H = MatrixSymbol('_H', 3, 3)
G = GOE('O', 3)
assert density(G)(H) == exp(-3*Trace(H**2)/4)/Integral(exp(-3*Trace(_H**2)/4), _H)
i, j = (Dummy('i', integer=True, positive=True),
Dummy('j', integer=True, positive=True))
l = IndexedBase('l')
assert joint_eigen_distribution(G).dummy_eq(
Lambda((l[1], l[2], l[3]),
9*sqrt(2)*exp(-3*l[1]**2/2 - 3*l[2]**2/2 - 3*l[3]**2/2)*
Product(Abs(l[i] - l[j]), (j, i + 1, 3), (i, 1, 2))/(32*pi)))
s = Dummy('s')
assert level_spacing_distribution(G).dummy_eq(Lambda(s, s*pi*exp(-s**2*pi/4)/2))
def test_GaussianSymplecticEnsemble():
H = RandomMatrixSymbol('H', 3, 3)
_H = MatrixSymbol('_H', 3, 3)
G = GSE('O', 3)
assert density(G)(H) == exp(-3*Trace(H**2))/Integral(exp(-3*Trace(_H**2)), _H)
i, j = (Dummy('i', integer=True, positive=True),
Dummy('j', integer=True, positive=True))
l = IndexedBase('l')
assert joint_eigen_distribution(G).dummy_eq(
Lambda((l[1], l[2], l[3]),
162*sqrt(3)*exp(-3*l[1]**2/2 - 3*l[2]**2/2 - 3*l[3]**2/2)*
Product(Abs(l[i] - l[j])**4, (j, i + 1, 3), (i, 1, 2))/(5*pi**Rational(3, 2))))
s = Dummy('s')
assert level_spacing_distribution(G).dummy_eq(Lambda(s, S(262144)*s**4*exp(-64*s**2/(9*pi))/(729*pi**3)))
def test_CircularUnitaryEnsemble():
CU = CUE('U', 3)
j, k = (Dummy('j', integer=True, positive=True),
Dummy('k', integer=True, positive=True))
t = IndexedBase('t')
assert joint_eigen_distribution(CU).dummy_eq(
Lambda((t[1], t[2], t[3]),
Product(Abs(exp(I*t[j]) - exp(I*t[k]))**2,
(j, k + 1, 3), (k, 1, 2))/(48*pi**3))
)
def test_CircularOrthogonalEnsemble():
CO = COE('U', 3)
j, k = (Dummy('j', integer=True, positive=True),
Dummy('k', integer=True, positive=True))
t = IndexedBase('t')
assert joint_eigen_distribution(CO).dummy_eq(
Lambda((t[1], t[2], t[3]),
Product(Abs(exp(I*t[j]) - exp(I*t[k])),
(j, k + 1, 3), (k, 1, 2))/(48*pi**2))
)
def test_CircularSymplecticEnsemble():
CS = CSE('U', 3)
j, k = (Dummy('j', integer=True, positive=True),
Dummy('k', integer=True, positive=True))
t = IndexedBase('t')
assert joint_eigen_distribution(CS).dummy_eq(
Lambda((t[1], t[2], t[3]),
Product(Abs(exp(I*t[j]) - exp(I*t[k]))**4,
(j, k + 1, 3), (k, 1, 2))/(720*pi**3))
)
def test_JointEigenDistribution():
A = Matrix([[Normal('A00', 0, 1), Normal('A01', 1, 1)],
[Beta('A10', 1, 1), Beta('A11', 1, 1)]])
JointEigenDistribution(A) == \
JointDistributionHandmade(-sqrt(A[0, 0]**2 - 2*A[0, 0]*A[1, 1] + 4*A[0, 1]*A[1, 0] + A[1, 1]**2)/2 +
A[0, 0]/2 + A[1, 1]/2, sqrt(A[0, 0]**2 - 2*A[0, 0]*A[1, 1] + 4*A[0, 1]*A[1, 0] + A[1, 1]**2)/2 + A[0, 0]/2 + A[1, 1]/2)
raises(ValueError, lambda: JointEigenDistribution(Matrix([[1, 0], [2, 1]])))
def test_issue_19841():
G1 = GUE('U', 2)
G2 = G1.xreplace({2: 2})
assert G1.args == G2.args
X = MatrixSymbol('X', 2, 2)
G = GSE('U', 2)
h_pspace = RandomMatrixPSpace('P', model=density(G))
H = RandomMatrixSymbol('H', 2, 2, pspace=h_pspace)
H2 = RandomMatrixSymbol('H', 2, 2, pspace=None)
assert H.doit() == H
assert (2*H).xreplace({H: X}) == 2*X
assert (2*H).xreplace({H2: X}) == 2*H
assert (2*H2).xreplace({H: X}) == 2*H2
assert (2*H2).xreplace({H2: X}) == 2*X
|
4e842f2f314ae37c49e13cab7157f0dc6469185f3bea6c5d9714edd38ecab727 | from sympy.concrete.summations import Sum
from sympy.core.function import (Lambda, diff, expand_func)
from sympy.core.mul import Mul
from sympy.core import EulerGamma
from sympy.core.numbers import (E as e, I, Rational, pi)
from sympy.core.relational import (Eq, Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, symbols)
from sympy.functions.combinatorial.factorials import (binomial, factorial)
from sympy.functions.elementary.complexes import (Abs, im, re, sign)
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.hyperbolic import (cosh, sinh)
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (asin, atan, cos, sin, tan)
from sympy.functions.special.bessel import (besseli, besselj, besselk)
from sympy.functions.special.beta_functions import beta
from sympy.functions.special.error_functions import (erf, erfc, erfi, expint)
from sympy.functions.special.gamma_functions import (gamma, lowergamma, uppergamma)
from sympy.functions.special.hyper import hyper
from sympy.integrals.integrals import Integral
from sympy.logic.boolalg import (And, Or)
from sympy.sets.sets import Interval
from sympy.simplify.simplify import simplify
from sympy.utilities.lambdify import lambdify
from sympy.functions.special.error_functions import erfinv
from sympy.functions.special.hyper import meijerg
from sympy.sets.sets import FiniteSet, Complement, Intersection
from sympy.stats import (P, E, where, density, variance, covariance, skewness, kurtosis, median,
given, pspace, cdf, characteristic_function, moment_generating_function,
ContinuousRV, Arcsin, Benini, Beta, BetaNoncentral, BetaPrime,
Cauchy, Chi, ChiSquared, ChiNoncentral, Dagum, Erlang, ExGaussian,
Exponential, ExponentialPower, FDistribution, FisherZ, Frechet, Gamma,
GammaInverse, Gompertz, Gumbel, Kumaraswamy, Laplace, Levy, Logistic, LogCauchy,
LogLogistic, LogitNormal, LogNormal, Maxwell, Moyal, Nakagami, Normal, GaussianInverse,
Pareto, PowerFunction, QuadraticU, RaisedCosine, Rayleigh, Reciprocal, ShiftedGompertz, StudentT,
Trapezoidal, Triangular, Uniform, UniformSum, VonMises, Weibull, coskewness,
WignerSemicircle, Wald, correlation, moment, cmoment, smoment, quantile,
Lomax, BoundedPareto)
from sympy.stats.crv_types import NormalDistribution, ExponentialDistribution, ContinuousDistributionHandmade
from sympy.stats.joint_rv_types import MultivariateLaplaceDistribution, MultivariateNormalDistribution
from sympy.stats.crv import SingleContinuousPSpace, SingleContinuousDomain
from sympy.stats.compound_rv import CompoundPSpace
from sympy.stats.symbolic_probability import Probability
from sympy.testing.pytest import raises, XFAIL, slow, ignore_warnings
from sympy.testing.randtest import verify_numerically as tn
oo = S.Infinity
x, y, z = map(Symbol, 'xyz')
def test_single_normal():
mu = Symbol('mu', real=True)
sigma = Symbol('sigma', positive=True)
X = Normal('x', 0, 1)
Y = X*sigma + mu
assert E(Y) == mu
assert variance(Y) == sigma**2
pdf = density(Y)
x = Symbol('x', real=True)
assert (pdf(x) ==
2**S.Half*exp(-(x - mu)**2/(2*sigma**2))/(2*pi**S.Half*sigma))
assert P(X**2 < 1) == erf(2**S.Half/2)
ans = quantile(Y)(x)
assert ans == Complement(Intersection(FiniteSet(
sqrt(2)*sigma*(sqrt(2)*mu/(2*sigma)+ erfinv(2*x - 1))),
Interval(-oo, oo)), FiniteSet(mu))
assert E(X, Eq(X, mu)) == mu
assert median(X) == FiniteSet(0)
# issue 8248
assert X.pspace.compute_expectation(1).doit() == 1
def test_conditional_1d():
X = Normal('x', 0, 1)
Y = given(X, X >= 0)
z = Symbol('z')
assert density(Y)(z) == 2 * density(X)(z)
assert Y.pspace.domain.set == Interval(0, oo)
assert E(Y) == sqrt(2) / sqrt(pi)
assert E(X**2) == E(Y**2)
def test_ContinuousDomain():
X = Normal('x', 0, 1)
assert where(X**2 <= 1).set == Interval(-1, 1)
assert where(X**2 <= 1).symbol == X.symbol
where(And(X**2 <= 1, X >= 0)).set == Interval(0, 1)
raises(ValueError, lambda: where(sin(X) > 1))
Y = given(X, X >= 0)
assert Y.pspace.domain.set == Interval(0, oo)
@slow
def test_multiple_normal():
X, Y = Normal('x', 0, 1), Normal('y', 0, 1)
p = Symbol("p", positive=True)
assert E(X + Y) == 0
assert variance(X + Y) == 2
assert variance(X + X) == 4
assert covariance(X, Y) == 0
assert covariance(2*X + Y, -X) == -2*variance(X)
assert skewness(X) == 0
assert skewness(X + Y) == 0
assert kurtosis(X) == 3
assert kurtosis(X+Y) == 3
assert correlation(X, Y) == 0
assert correlation(X, X + Y) == correlation(X, X - Y)
assert moment(X, 2) == 1
assert cmoment(X, 3) == 0
assert moment(X + Y, 4) == 12
assert cmoment(X, 2) == variance(X)
assert smoment(X*X, 2) == 1
assert smoment(X + Y, 3) == skewness(X + Y)
assert smoment(X + Y, 4) == kurtosis(X + Y)
assert E(X, Eq(X + Y, 0)) == 0
assert variance(X, Eq(X + Y, 0)) == S.Half
assert quantile(X)(p) == sqrt(2)*erfinv(2*p - S.One)
def test_symbolic():
mu1, mu2 = symbols('mu1 mu2', real=True)
s1, s2 = symbols('sigma1 sigma2', positive=True)
rate = Symbol('lambda', positive=True)
X = Normal('x', mu1, s1)
Y = Normal('y', mu2, s2)
Z = Exponential('z', rate)
a, b, c = symbols('a b c', real=True)
assert E(X) == mu1
assert E(X + Y) == mu1 + mu2
assert E(a*X + b) == a*E(X) + b
assert variance(X) == s1**2
assert variance(X + a*Y + b) == variance(X) + a**2*variance(Y)
assert E(Z) == 1/rate
assert E(a*Z + b) == a*E(Z) + b
assert E(X + a*Z + b) == mu1 + a/rate + b
assert median(X) == FiniteSet(mu1)
def test_cdf():
X = Normal('x', 0, 1)
d = cdf(X)
assert P(X < 1) == d(1).rewrite(erfc)
assert d(0) == S.Half
d = cdf(X, X > 0) # given X>0
assert d(0) == 0
Y = Exponential('y', 10)
d = cdf(Y)
assert d(-5) == 0
assert P(Y > 3) == 1 - d(3)
raises(ValueError, lambda: cdf(X + Y))
Z = Exponential('z', 1)
f = cdf(Z)
assert f(z) == Piecewise((1 - exp(-z), z >= 0), (0, True))
def test_characteristic_function():
X = Uniform('x', 0, 1)
cf = characteristic_function(X)
assert cf(1) == -I*(-1 + exp(I))
Y = Normal('y', 1, 1)
cf = characteristic_function(Y)
assert cf(0) == 1
assert cf(1) == exp(I - S.Half)
Z = Exponential('z', 5)
cf = characteristic_function(Z)
assert cf(0) == 1
assert cf(1).expand() == Rational(25, 26) + I*Rational(5, 26)
X = GaussianInverse('x', 1, 1)
cf = characteristic_function(X)
assert cf(0) == 1
assert cf(1) == exp(1 - sqrt(1 - 2*I))
X = ExGaussian('x', 0, 1, 1)
cf = characteristic_function(X)
assert cf(0) == 1
assert cf(1) == (1 + I)*exp(Rational(-1, 2))/2
L = Levy('x', 0, 1)
cf = characteristic_function(L)
assert cf(0) == 1
assert cf(1) == exp(-sqrt(2)*sqrt(-I))
def test_moment_generating_function():
t = symbols('t', positive=True)
# Symbolic tests
a, b, c = symbols('a b c')
mgf = moment_generating_function(Beta('x', a, b))(t)
assert mgf == hyper((a,), (a + b,), t)
mgf = moment_generating_function(Chi('x', a))(t)
assert mgf == sqrt(2)*t*gamma(a/2 + S.Half)*\
hyper((a/2 + S.Half,), (Rational(3, 2),), t**2/2)/gamma(a/2) +\
hyper((a/2,), (S.Half,), t**2/2)
mgf = moment_generating_function(ChiSquared('x', a))(t)
assert mgf == (1 - 2*t)**(-a/2)
mgf = moment_generating_function(Erlang('x', a, b))(t)
assert mgf == (1 - t/b)**(-a)
mgf = moment_generating_function(ExGaussian("x", a, b, c))(t)
assert mgf == exp(a*t + b**2*t**2/2)/(1 - t/c)
mgf = moment_generating_function(Exponential('x', a))(t)
assert mgf == a/(a - t)
mgf = moment_generating_function(Gamma('x', a, b))(t)
assert mgf == (-b*t + 1)**(-a)
mgf = moment_generating_function(Gumbel('x', a, b))(t)
assert mgf == exp(b*t)*gamma(-a*t + 1)
mgf = moment_generating_function(Gompertz('x', a, b))(t)
assert mgf == b*exp(b)*expint(t/a, b)
mgf = moment_generating_function(Laplace('x', a, b))(t)
assert mgf == exp(a*t)/(-b**2*t**2 + 1)
mgf = moment_generating_function(Logistic('x', a, b))(t)
assert mgf == exp(a*t)*beta(-b*t + 1, b*t + 1)
mgf = moment_generating_function(Normal('x', a, b))(t)
assert mgf == exp(a*t + b**2*t**2/2)
mgf = moment_generating_function(Pareto('x', a, b))(t)
assert mgf == b*(-a*t)**b*uppergamma(-b, -a*t)
mgf = moment_generating_function(QuadraticU('x', a, b))(t)
assert str(mgf) == ("(3*(t*(-4*b + (a + b)**2) + 4)*exp(b*t) - "
"3*(t*(a**2 + 2*a*(b - 2) + b**2) + 4)*exp(a*t))/(t**2*(a - b)**3)")
mgf = moment_generating_function(RaisedCosine('x', a, b))(t)
assert mgf == pi**2*exp(a*t)*sinh(b*t)/(b*t*(b**2*t**2 + pi**2))
mgf = moment_generating_function(Rayleigh('x', a))(t)
assert mgf == sqrt(2)*sqrt(pi)*a*t*(erf(sqrt(2)*a*t/2) + 1)\
*exp(a**2*t**2/2)/2 + 1
mgf = moment_generating_function(Triangular('x', a, b, c))(t)
assert str(mgf) == ("(-2*(-a + b)*exp(c*t) + 2*(-a + c)*exp(b*t) + "
"2*(b - c)*exp(a*t))/(t**2*(-a + b)*(-a + c)*(b - c))")
mgf = moment_generating_function(Uniform('x', a, b))(t)
assert mgf == (-exp(a*t) + exp(b*t))/(t*(-a + b))
mgf = moment_generating_function(UniformSum('x', a))(t)
assert mgf == ((exp(t) - 1)/t)**a
mgf = moment_generating_function(WignerSemicircle('x', a))(t)
assert mgf == 2*besseli(1, a*t)/(a*t)
# Numeric tests
mgf = moment_generating_function(Beta('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 1) == hyper((2,), (3,), 1)/2
mgf = moment_generating_function(Chi('x', 1))(t)
assert mgf.diff(t).subs(t, 1) == sqrt(2)*hyper((1,), (Rational(3, 2),), S.Half
)/sqrt(pi) + hyper((Rational(3, 2),), (Rational(3, 2),), S.Half) + 2*sqrt(2)*hyper((2,),
(Rational(5, 2),), S.Half)/(3*sqrt(pi))
mgf = moment_generating_function(ChiSquared('x', 1))(t)
assert mgf.diff(t).subs(t, 1) == I
mgf = moment_generating_function(Erlang('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == 1
mgf = moment_generating_function(ExGaussian("x", 0, 1, 1))(t)
assert mgf.diff(t).subs(t, 2) == -exp(2)
mgf = moment_generating_function(Exponential('x', 1))(t)
assert mgf.diff(t).subs(t, 0) == 1
mgf = moment_generating_function(Gamma('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == 1
mgf = moment_generating_function(Gumbel('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == EulerGamma + 1
mgf = moment_generating_function(Gompertz('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 1) == -e*meijerg(((), (1, 1)),
((0, 0, 0), ()), 1)
mgf = moment_generating_function(Laplace('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == 1
mgf = moment_generating_function(Logistic('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == beta(1, 1)
mgf = moment_generating_function(Normal('x', 0, 1))(t)
assert mgf.diff(t).subs(t, 1) == exp(S.Half)
mgf = moment_generating_function(Pareto('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 0) == expint(1, 0)
mgf = moment_generating_function(QuadraticU('x', 1, 2))(t)
assert mgf.diff(t).subs(t, 1) == -12*e - 3*exp(2)
mgf = moment_generating_function(RaisedCosine('x', 1, 1))(t)
assert mgf.diff(t).subs(t, 1) == -2*e*pi**2*sinh(1)/\
(1 + pi**2)**2 + e*pi**2*cosh(1)/(1 + pi**2)
mgf = moment_generating_function(Rayleigh('x', 1))(t)
assert mgf.diff(t).subs(t, 0) == sqrt(2)*sqrt(pi)/2
mgf = moment_generating_function(Triangular('x', 1, 3, 2))(t)
assert mgf.diff(t).subs(t, 1) == -e + exp(3)
mgf = moment_generating_function(Uniform('x', 0, 1))(t)
assert mgf.diff(t).subs(t, 1) == 1
mgf = moment_generating_function(UniformSum('x', 1))(t)
assert mgf.diff(t).subs(t, 1) == 1
mgf = moment_generating_function(WignerSemicircle('x', 1))(t)
assert mgf.diff(t).subs(t, 1) == -2*besseli(1, 1) + besseli(2, 1) +\
besseli(0, 1)
def test_ContinuousRV():
pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
# X and Y should be equivalent
X = ContinuousRV(x, pdf, check=True)
Y = Normal('y', 0, 1)
assert variance(X) == variance(Y)
assert P(X > 0) == P(Y > 0)
Z = ContinuousRV(z, exp(-z), set=Interval(0, oo))
assert Z.pspace.domain.set == Interval(0, oo)
assert E(Z) == 1
assert P(Z > 5) == exp(-5)
raises(ValueError, lambda: ContinuousRV(z, exp(-z), set=Interval(0, 10), check=True))
# the correct pdf for Gamma(k, theta) but the integral in `check`
# integrates to something equivalent to 1 and not to 1 exactly
_x, k, theta = symbols("x k theta", positive=True)
pdf = 1/(gamma(k)*theta**k)*_x**(k-1)*exp(-_x/theta)
X = ContinuousRV(_x, pdf, set=Interval(0, oo))
Y = Gamma('y', k, theta)
assert (E(X) - E(Y)).simplify() == 0
assert (variance(X) - variance(Y)).simplify() == 0
def test_arcsin():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
X = Arcsin('x', a, b)
assert density(X)(x) == 1/(pi*sqrt((-x + b)*(x - a)))
assert cdf(X)(x) == Piecewise((0, a > x),
(2*asin(sqrt((-a + x)/(-a + b)))/pi, b >= x),
(1, True))
assert pspace(X).domain.set == Interval(a, b)
def test_benini():
alpha = Symbol("alpha", positive=True)
beta = Symbol("beta", positive=True)
sigma = Symbol("sigma", positive=True)
X = Benini('x', alpha, beta, sigma)
assert density(X)(x) == ((alpha/x + 2*beta*log(x/sigma)/x)
*exp(-alpha*log(x/sigma) - beta*log(x/sigma)**2))
assert pspace(X).domain.set == Interval(sigma, oo)
raises(NotImplementedError, lambda: moment_generating_function(X))
alpha = Symbol("alpha", nonpositive=True)
raises(ValueError, lambda: Benini('x', alpha, beta, sigma))
beta = Symbol("beta", nonpositive=True)
raises(ValueError, lambda: Benini('x', alpha, beta, sigma))
alpha = Symbol("alpha", positive=True)
raises(ValueError, lambda: Benini('x', alpha, beta, sigma))
beta = Symbol("beta", positive=True)
sigma = Symbol("sigma", nonpositive=True)
raises(ValueError, lambda: Benini('x', alpha, beta, sigma))
def test_beta():
a, b = symbols('alpha beta', positive=True)
B = Beta('x', a, b)
assert pspace(B).domain.set == Interval(0, 1)
assert characteristic_function(B)(x) == hyper((a,), (a + b,), I*x)
assert density(B)(x) == x**(a - 1)*(1 - x)**(b - 1)/beta(a, b)
assert simplify(E(B)) == a / (a + b)
assert simplify(variance(B)) == a*b / (a**3 + 3*a**2*b + a**2 + 3*a*b**2 + 2*a*b + b**3 + b**2)
# Full symbolic solution is too much, test with numeric version
a, b = 1, 2
B = Beta('x', a, b)
assert expand_func(E(B)) == a / S(a + b)
assert expand_func(variance(B)) == (a*b) / S((a + b)**2 * (a + b + 1))
assert median(B) == FiniteSet(1 - 1/sqrt(2))
def test_beta_noncentral():
a, b = symbols('a b', positive=True)
c = Symbol('c', nonnegative=True)
_k = Dummy('k')
X = BetaNoncentral('x', a, b, c)
assert pspace(X).domain.set == Interval(0, 1)
dens = density(X)
z = Symbol('z')
res = Sum( z**(_k + a - 1)*(c/2)**_k*(1 - z)**(b - 1)*exp(-c/2)/
(beta(_k + a, b)*factorial(_k)), (_k, 0, oo))
assert dens(z).dummy_eq(res)
# BetaCentral should not raise if the assumptions
# on the symbols can not be determined
a, b, c = symbols('a b c')
assert BetaNoncentral('x', a, b, c)
a = Symbol('a', positive=False, real=True)
raises(ValueError, lambda: BetaNoncentral('x', a, b, c))
a = Symbol('a', positive=True)
b = Symbol('b', positive=False, real=True)
raises(ValueError, lambda: BetaNoncentral('x', a, b, c))
a = Symbol('a', positive=True)
b = Symbol('b', positive=True)
c = Symbol('c', nonnegative=False, real=True)
raises(ValueError, lambda: BetaNoncentral('x', a, b, c))
def test_betaprime():
alpha = Symbol("alpha", positive=True)
betap = Symbol("beta", positive=True)
X = BetaPrime('x', alpha, betap)
assert density(X)(x) == x**(alpha - 1)*(x + 1)**(-alpha - betap)/beta(alpha, betap)
alpha = Symbol("alpha", nonpositive=True)
raises(ValueError, lambda: BetaPrime('x', alpha, betap))
alpha = Symbol("alpha", positive=True)
betap = Symbol("beta", nonpositive=True)
raises(ValueError, lambda: BetaPrime('x', alpha, betap))
X = BetaPrime('x', 1, 1)
assert median(X) == FiniteSet(1)
def test_BoundedPareto():
L, H = symbols('L, H', negative=True)
raises(ValueError, lambda: BoundedPareto('X', 1, L, H))
L, H = symbols('L, H', real=False)
raises(ValueError, lambda: BoundedPareto('X', 1, L, H))
L, H = symbols('L, H', positive=True)
raises(ValueError, lambda: BoundedPareto('X', -1, L, H))
X = BoundedPareto('X', 2, L, H)
assert X.pspace.domain.set == Interval(L, H)
assert density(X)(x) == 2*L**2/(x**3*(1 - L**2/H**2))
assert cdf(X)(x) == Piecewise((-H**2*L**2/(x**2*(H**2 - L**2)) \
+ H**2/(H**2 - L**2), L <= x), (0, True))
assert E(X).simplify() == 2*H*L/(H + L)
X = BoundedPareto('X', 1, 2, 4)
assert E(X).simplify() == log(16)
assert median(X) == FiniteSet(Rational(8, 3))
assert variance(X).simplify() == 8 - 16*log(2)**2
def test_cauchy():
x0 = Symbol("x0", real=True)
gamma = Symbol("gamma", positive=True)
p = Symbol("p", positive=True)
X = Cauchy('x', x0, gamma)
# Tests the characteristic function
assert characteristic_function(X)(x) == exp(-gamma*Abs(x) + I*x*x0)
raises(NotImplementedError, lambda: moment_generating_function(X))
assert density(X)(x) == 1/(pi*gamma*(1 + (x - x0)**2/gamma**2))
assert diff(cdf(X)(x), x) == density(X)(x)
assert quantile(X)(p) == gamma*tan(pi*(p - S.Half)) + x0
x1 = Symbol("x1", real=False)
raises(ValueError, lambda: Cauchy('x', x1, gamma))
gamma = Symbol("gamma", nonpositive=True)
raises(ValueError, lambda: Cauchy('x', x0, gamma))
assert median(X) == FiniteSet(x0)
def test_chi():
from sympy.core.numbers import I
k = Symbol("k", integer=True)
X = Chi('x', k)
assert density(X)(x) == 2**(-k/2 + 1)*x**(k - 1)*exp(-x**2/2)/gamma(k/2)
# Tests the characteristic function
assert characteristic_function(X)(x) == sqrt(2)*I*x*gamma(k/2 + S(1)/2)*hyper((k/2 + S(1)/2,),
(S(3)/2,), -x**2/2)/gamma(k/2) + hyper((k/2,), (S(1)/2,), -x**2/2)
# Tests the moment generating function
assert moment_generating_function(X)(x) == sqrt(2)*x*gamma(k/2 + S(1)/2)*hyper((k/2 + S(1)/2,),
(S(3)/2,), x**2/2)/gamma(k/2) + hyper((k/2,), (S(1)/2,), x**2/2)
k = Symbol("k", integer=True, positive=False)
raises(ValueError, lambda: Chi('x', k))
k = Symbol("k", integer=False, positive=True)
raises(ValueError, lambda: Chi('x', k))
def test_chi_noncentral():
k = Symbol("k", integer=True)
l = Symbol("l")
X = ChiNoncentral("x", k, l)
assert density(X)(x) == (x**k*l*(x*l)**(-k/2)*
exp(-x**2/2 - l**2/2)*besseli(k/2 - 1, x*l))
k = Symbol("k", integer=True, positive=False)
raises(ValueError, lambda: ChiNoncentral('x', k, l))
k = Symbol("k", integer=True, positive=True)
l = Symbol("l", nonpositive=True)
raises(ValueError, lambda: ChiNoncentral('x', k, l))
k = Symbol("k", integer=False)
l = Symbol("l", positive=True)
raises(ValueError, lambda: ChiNoncentral('x', k, l))
def test_chi_squared():
k = Symbol("k", integer=True)
X = ChiSquared('x', k)
# Tests the characteristic function
assert characteristic_function(X)(x) == ((-2*I*x + 1)**(-k/2))
assert density(X)(x) == 2**(-k/2)*x**(k/2 - 1)*exp(-x/2)/gamma(k/2)
assert cdf(X)(x) == Piecewise((lowergamma(k/2, x/2)/gamma(k/2), x >= 0), (0, True))
assert E(X) == k
assert variance(X) == 2*k
X = ChiSquared('x', 15)
assert cdf(X)(3) == -14873*sqrt(6)*exp(Rational(-3, 2))/(5005*sqrt(pi)) + erf(sqrt(6)/2)
k = Symbol("k", integer=True, positive=False)
raises(ValueError, lambda: ChiSquared('x', k))
k = Symbol("k", integer=False, positive=True)
raises(ValueError, lambda: ChiSquared('x', k))
def test_dagum():
p = Symbol("p", positive=True)
b = Symbol("b", positive=True)
a = Symbol("a", positive=True)
X = Dagum('x', p, a, b)
assert density(X)(x) == a*p*(x/b)**(a*p)*((x/b)**a + 1)**(-p - 1)/x
assert cdf(X)(x) == Piecewise(((1 + (x/b)**(-a))**(-p), x >= 0),
(0, True))
p = Symbol("p", nonpositive=True)
raises(ValueError, lambda: Dagum('x', p, a, b))
p = Symbol("p", positive=True)
b = Symbol("b", nonpositive=True)
raises(ValueError, lambda: Dagum('x', p, a, b))
b = Symbol("b", positive=True)
a = Symbol("a", nonpositive=True)
raises(ValueError, lambda: Dagum('x', p, a, b))
X = Dagum('x', 1, 1, 1)
assert median(X) == FiniteSet(1)
def test_erlang():
k = Symbol("k", integer=True, positive=True)
l = Symbol("l", positive=True)
X = Erlang("x", k, l)
assert density(X)(x) == x**(k - 1)*l**k*exp(-x*l)/gamma(k)
assert cdf(X)(x) == Piecewise((lowergamma(k, l*x)/gamma(k), x > 0),
(0, True))
def test_exgaussian():
m, z = symbols("m, z")
s, l = symbols("s, l", positive=True)
X = ExGaussian("x", m, s, l)
assert density(X)(z) == l*exp(l*(l*s**2 + 2*m - 2*z)/2) *\
erfc(sqrt(2)*(l*s**2 + m - z)/(2*s))/2
# Note: actual_output simplifies to expected_output.
# Ideally cdf(X)(z) would return expected_output
# expected_output = (erf(sqrt(2)*(l*s**2 + m - z)/(2*s)) - 1)*exp(l*(l*s**2 + 2*m - 2*z)/2)/2 - erf(sqrt(2)*(m - z)/(2*s))/2 + S.Half
u = l*(z - m)
v = l*s
GaussianCDF1 = cdf(Normal('x', 0, v))(u)
GaussianCDF2 = cdf(Normal('x', v**2, v))(u)
actual_output = GaussianCDF1 - exp(-u + (v**2/2) + log(GaussianCDF2))
assert cdf(X)(z) == actual_output
# assert simplify(actual_output) == expected_output
assert variance(X).expand() == s**2 + l**(-2)
assert skewness(X).expand() == 2/(l**3*s**2*sqrt(s**2 + l**(-2)) + l *
sqrt(s**2 + l**(-2)))
def test_exponential():
rate = Symbol('lambda', positive=True)
X = Exponential('x', rate)
p = Symbol("p", positive=True, real=True, finite=True)
assert E(X) == 1/rate
assert variance(X) == 1/rate**2
assert skewness(X) == 2
assert skewness(X) == smoment(X, 3)
assert kurtosis(X) == 9
assert kurtosis(X) == smoment(X, 4)
assert smoment(2*X, 4) == smoment(X, 4)
assert moment(X, 3) == 3*2*1/rate**3
assert P(X > 0) is S.One
assert P(X > 1) == exp(-rate)
assert P(X > 10) == exp(-10*rate)
assert quantile(X)(p) == -log(1-p)/rate
assert where(X <= 1).set == Interval(0, 1)
Y = Exponential('y', 1)
assert median(Y) == FiniteSet(log(2))
#Test issue 9970
z = Dummy('z')
assert P(X > z) == exp(-z*rate)
assert P(X < z) == 0
#Test issue 10076 (Distribution with interval(0,oo))
x = Symbol('x')
_z = Dummy('_z')
b = SingleContinuousPSpace(x, ExponentialDistribution(2))
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
expected1 = Integral(2*exp(-2*_z), (_z, 3, oo))
assert b.probability(x > 3, evaluate=False).rewrite(Integral).dummy_eq(expected1)
expected2 = Integral(2*exp(-2*_z), (_z, 0, 4))
assert b.probability(x < 4, evaluate=False).rewrite(Integral).dummy_eq(expected2)
Y = Exponential('y', 2*rate)
assert coskewness(X, X, X) == skewness(X)
assert coskewness(X, Y + rate*X, Y + 2*rate*X) == \
4/(sqrt(1 + 1/(4*rate**2))*sqrt(4 + 1/(4*rate**2)))
assert coskewness(X + 2*Y, Y + X, Y + 2*X, X > 3) == \
sqrt(170)*Rational(9, 85)
def test_exponential_power():
mu = Symbol('mu')
z = Symbol('z')
alpha = Symbol('alpha', positive=True)
beta = Symbol('beta', positive=True)
X = ExponentialPower('x', mu, alpha, beta)
assert density(X)(z) == beta*exp(-(Abs(mu - z)/alpha)
** beta)/(2*alpha*gamma(1/beta))
assert cdf(X)(z) == S.Half + lowergamma(1/beta,
(Abs(mu - z)/alpha)**beta)*sign(-mu + z)/\
(2*gamma(1/beta))
def test_f_distribution():
d1 = Symbol("d1", positive=True)
d2 = Symbol("d2", positive=True)
X = FDistribution("x", d1, d2)
assert density(X)(x) == (d2**(d2/2)*sqrt((d1*x)**d1*(d1*x + d2)**(-d1 - d2))
/(x*beta(d1/2, d2/2)))
raises(NotImplementedError, lambda: moment_generating_function(X))
d1 = Symbol("d1", nonpositive=True)
raises(ValueError, lambda: FDistribution('x', d1, d1))
d1 = Symbol("d1", positive=True, integer=False)
raises(ValueError, lambda: FDistribution('x', d1, d1))
d1 = Symbol("d1", positive=True)
d2 = Symbol("d2", nonpositive=True)
raises(ValueError, lambda: FDistribution('x', d1, d2))
d2 = Symbol("d2", positive=True, integer=False)
raises(ValueError, lambda: FDistribution('x', d1, d2))
def test_fisher_z():
d1 = Symbol("d1", positive=True)
d2 = Symbol("d2", positive=True)
X = FisherZ("x", d1, d2)
assert density(X)(x) == (2*d1**(d1/2)*d2**(d2/2)*(d1*exp(2*x) + d2)
**(-d1/2 - d2/2)*exp(d1*x)/beta(d1/2, d2/2))
def test_frechet():
a = Symbol("a", positive=True)
s = Symbol("s", positive=True)
m = Symbol("m", real=True)
X = Frechet("x", a, s=s, m=m)
assert density(X)(x) == a*((x - m)/s)**(-a - 1)*exp(-((x - m)/s)**(-a))/s
assert cdf(X)(x) == Piecewise((exp(-((-m + x)/s)**(-a)), m <= x), (0, True))
@slow
def test_gamma():
k = Symbol("k", positive=True)
theta = Symbol("theta", positive=True)
X = Gamma('x', k, theta)
# Tests characteristic function
assert characteristic_function(X)(x) == ((-I*theta*x + 1)**(-k))
assert density(X)(x) == x**(k - 1)*theta**(-k)*exp(-x/theta)/gamma(k)
assert cdf(X, meijerg=True)(z) == Piecewise(
(-k*lowergamma(k, 0)/gamma(k + 1) +
k*lowergamma(k, z/theta)/gamma(k + 1), z >= 0),
(0, True))
# assert simplify(variance(X)) == k*theta**2 # handled numerically below
assert E(X) == moment(X, 1)
k, theta = symbols('k theta', positive=True)
X = Gamma('x', k, theta)
assert E(X) == k*theta
assert variance(X) == k*theta**2
assert skewness(X).expand() == 2/sqrt(k)
assert kurtosis(X).expand() == 3 + 6/k
Y = Gamma('y', 2*k, 3*theta)
assert coskewness(X, theta*X + Y, k*X + Y).simplify() == \
2*531441**(-k)*sqrt(k)*theta*(3*3**(12*k) - 2*531441**k) \
/(sqrt(k**2 + 18)*sqrt(theta**2 + 18))
def test_gamma_inverse():
a = Symbol("a", positive=True)
b = Symbol("b", positive=True)
X = GammaInverse("x", a, b)
assert density(X)(x) == x**(-a - 1)*b**a*exp(-b/x)/gamma(a)
assert cdf(X)(x) == Piecewise((uppergamma(a, b/x)/gamma(a), x > 0), (0, True))
assert characteristic_function(X)(x) == 2 * (-I*b*x)**(a/2) \
* besselk(a, 2*sqrt(b)*sqrt(-I*x))/gamma(a)
raises(NotImplementedError, lambda: moment_generating_function(X))
def test_gompertz():
b = Symbol("b", positive=True)
eta = Symbol("eta", positive=True)
X = Gompertz("x", b, eta)
assert density(X)(x) == b*eta*exp(eta)*exp(b*x)*exp(-eta*exp(b*x))
assert cdf(X)(x) == 1 - exp(eta)*exp(-eta*exp(b*x))
assert diff(cdf(X)(x), x) == density(X)(x)
def test_gumbel():
beta = Symbol("beta", positive=True)
mu = Symbol("mu")
x = Symbol("x")
y = Symbol("y")
X = Gumbel("x", beta, mu)
Y = Gumbel("y", beta, mu, minimum=True)
assert density(X)(x).expand() == \
exp(mu/beta)*exp(-x/beta)*exp(-exp(mu/beta)*exp(-x/beta))/beta
assert density(Y)(y).expand() == \
exp(-mu/beta)*exp(y/beta)*exp(-exp(-mu/beta)*exp(y/beta))/beta
assert cdf(X)(x).expand() == \
exp(-exp(mu/beta)*exp(-x/beta))
assert characteristic_function(X)(x) == exp(I*mu*x)*gamma(-I*beta*x + 1)
def test_kumaraswamy():
a = Symbol("a", positive=True)
b = Symbol("b", positive=True)
X = Kumaraswamy("x", a, b)
assert density(X)(x) == x**(a - 1)*a*b*(-x**a + 1)**(b - 1)
assert cdf(X)(x) == Piecewise((0, x < 0),
(-(-x**a + 1)**b + 1, x <= 1),
(1, True))
def test_laplace():
mu = Symbol("mu")
b = Symbol("b", positive=True)
X = Laplace('x', mu, b)
#Tests characteristic_function
assert characteristic_function(X)(x) == (exp(I*mu*x)/(b**2*x**2 + 1))
assert density(X)(x) == exp(-Abs(x - mu)/b)/(2*b)
assert cdf(X)(x) == Piecewise((exp((-mu + x)/b)/2, mu > x),
(-exp((mu - x)/b)/2 + 1, True))
X = Laplace('x', [1, 2], [[1, 0], [0, 1]])
assert isinstance(pspace(X).distribution, MultivariateLaplaceDistribution)
def test_levy():
mu = Symbol("mu", real=True)
c = Symbol("c", positive=True)
X = Levy('x', mu, c)
assert X.pspace.domain.set == Interval(mu, oo)
assert density(X)(x) == sqrt(c/(2*pi))*exp(-c/(2*(x - mu)))/((x - mu)**(S.One + S.Half))
assert cdf(X)(x) == erfc(sqrt(c/(2*(x - mu))))
raises(NotImplementedError, lambda: moment_generating_function(X))
mu = Symbol("mu", real=False)
raises(ValueError, lambda: Levy('x',mu,c))
c = Symbol("c", nonpositive=True)
raises(ValueError, lambda: Levy('x',mu,c))
mu = Symbol("mu", real=True)
raises(ValueError, lambda: Levy('x',mu,c))
def test_logcauchy():
mu = Symbol("mu", positive=True)
sigma = Symbol("sigma", positive=True)
X = LogCauchy("x", mu, sigma)
assert density(X)(x) == sigma/(x*pi*(sigma**2 + (-mu + log(x))**2))
assert cdf(X)(x) == atan((log(x) - mu)/sigma)/pi + S.Half
def test_logistic():
mu = Symbol("mu", real=True)
s = Symbol("s", positive=True)
p = Symbol("p", positive=True)
X = Logistic('x', mu, s)
#Tests characteristics_function
assert characteristic_function(X)(x) == \
(Piecewise((pi*s*x*exp(I*mu*x)/sinh(pi*s*x), Ne(x, 0)), (1, True)))
assert density(X)(x) == exp((-x + mu)/s)/(s*(exp((-x + mu)/s) + 1)**2)
assert cdf(X)(x) == 1/(exp((mu - x)/s) + 1)
assert quantile(X)(p) == mu - s*log(-S.One + 1/p)
def test_loglogistic():
a, b = symbols('a b')
assert LogLogistic('x', a, b)
a = Symbol('a', negative=True)
b = Symbol('b', positive=True)
raises(ValueError, lambda: LogLogistic('x', a, b))
a = Symbol('a', positive=True)
b = Symbol('b', negative=True)
raises(ValueError, lambda: LogLogistic('x', a, b))
a, b, z, p = symbols('a b z p', positive=True)
X = LogLogistic('x', a, b)
assert density(X)(z) == b*(z/a)**(b - 1)/(a*((z/a)**b + 1)**2)
assert cdf(X)(z) == 1/(1 + (z/a)**(-b))
assert quantile(X)(p) == a*(p/(1 - p))**(1/b)
# Expectation
assert E(X) == Piecewise((S.NaN, b <= 1), (pi*a/(b*sin(pi/b)), True))
b = symbols('b', prime=True) # b > 1
X = LogLogistic('x', a, b)
assert E(X) == pi*a/(b*sin(pi/b))
X = LogLogistic('x', 1, 2)
assert median(X) == FiniteSet(1)
def test_logitnormal():
mu = Symbol('mu', real=True)
s = Symbol('s', positive=True)
X = LogitNormal('x', mu, s)
x = Symbol('x')
assert density(X)(x) == sqrt(2)*exp(-(-mu + log(x/(1 - x)))**2/(2*s**2))/(2*sqrt(pi)*s*x*(1 - x))
assert cdf(X)(x) == erf(sqrt(2)*(-mu + log(x/(1 - x)))/(2*s))/2 + S(1)/2
def test_lognormal():
mean = Symbol('mu', real=True)
std = Symbol('sigma', positive=True)
X = LogNormal('x', mean, std)
# The sympy integrator can't do this too well
#assert E(X) == exp(mean+std**2/2)
#assert variance(X) == (exp(std**2)-1) * exp(2*mean + std**2)
# The sympy integrator can't do this too well
#assert E(X) ==
raises(NotImplementedError, lambda: moment_generating_function(X))
mu = Symbol("mu", real=True)
sigma = Symbol("sigma", positive=True)
X = LogNormal('x', mu, sigma)
assert density(X)(x) == (sqrt(2)*exp(-(-mu + log(x))**2
/(2*sigma**2))/(2*x*sqrt(pi)*sigma))
# Tests cdf
assert cdf(X)(x) == Piecewise(
(erf(sqrt(2)*(-mu + log(x))/(2*sigma))/2
+ S(1)/2, x > 0), (0, True))
X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1
assert density(X)(x) == sqrt(2)*exp(-log(x)**2/2)/(2*x*sqrt(pi))
def test_Lomax():
a, l = symbols('a, l', negative=True)
raises(ValueError, lambda: Lomax('X', a, l))
a, l = symbols('a, l', real=False)
raises(ValueError, lambda: Lomax('X', a, l))
a, l = symbols('a, l', positive=True)
X = Lomax('X', a, l)
assert X.pspace.domain.set == Interval(0, oo)
assert density(X)(x) == a*(1 + x/l)**(-a - 1)/l
assert cdf(X)(x) == Piecewise((1 - (1 + x/l)**(-a), x >= 0), (0, True))
a = 3
X = Lomax('X', a, l)
assert E(X) == l/2
assert median(X) == FiniteSet(l*(-1 + 2**Rational(1, 3)))
assert variance(X) == 3*l**2/4
def test_maxwell():
a = Symbol("a", positive=True)
X = Maxwell('x', a)
assert density(X)(x) == (sqrt(2)*x**2*exp(-x**2/(2*a**2))/
(sqrt(pi)*a**3))
assert E(X) == 2*sqrt(2)*a/sqrt(pi)
assert variance(X) == -8*a**2/pi + 3*a**2
assert cdf(X)(x) == erf(sqrt(2)*x/(2*a)) - sqrt(2)*x*exp(-x**2/(2*a**2))/(sqrt(pi)*a)
assert diff(cdf(X)(x), x) == density(X)(x)
def test_Moyal():
mu = Symbol('mu',real=False)
sigma = Symbol('sigma', real=True, positive=True)
raises(ValueError, lambda: Moyal('M',mu, sigma))
mu = Symbol('mu', real=True)
sigma = Symbol('sigma', real=True, negative=True)
raises(ValueError, lambda: Moyal('M',mu, sigma))
sigma = Symbol('sigma', real=True, positive=True)
M = Moyal('M', mu, sigma)
assert density(M)(z) == sqrt(2)*exp(-exp((mu - z)/sigma)/2
- (-mu + z)/(2*sigma))/(2*sqrt(pi)*sigma)
assert cdf(M)(z).simplify() == 1 - erf(sqrt(2)*exp((mu - z)/(2*sigma))/2)
assert characteristic_function(M)(z) == 2**(-I*sigma*z)*exp(I*mu*z) \
*gamma(-I*sigma*z + Rational(1, 2))/sqrt(pi)
assert E(M) == mu + EulerGamma*sigma + sigma*log(2)
assert moment_generating_function(M)(z) == 2**(-sigma*z)*exp(mu*z) \
*gamma(-sigma*z + Rational(1, 2))/sqrt(pi)
def test_nakagami():
mu = Symbol("mu", positive=True)
omega = Symbol("omega", positive=True)
X = Nakagami('x', mu, omega)
assert density(X)(x) == (2*x**(2*mu - 1)*mu**mu*omega**(-mu)
*exp(-x**2*mu/omega)/gamma(mu))
assert simplify(E(X)) == (sqrt(mu)*sqrt(omega)
*gamma(mu + S.Half)/gamma(mu + 1))
assert simplify(variance(X)) == (
omega - omega*gamma(mu + S.Half)**2/(gamma(mu)*gamma(mu + 1)))
assert cdf(X)(x) == Piecewise(
(lowergamma(mu, mu*x**2/omega)/gamma(mu), x > 0),
(0, True))
X = Nakagami('x', 1, 1)
assert median(X) == FiniteSet(sqrt(log(2)))
def test_gaussian_inverse():
# test for symbolic parameters
a, b = symbols('a b')
assert GaussianInverse('x', a, b)
# Inverse Gaussian distribution is also known as Wald distribution
# `GaussianInverse` can also be referred by the name `Wald`
a, b, z = symbols('a b z')
X = Wald('x', a, b)
assert density(X)(z) == sqrt(2)*sqrt(b/z**3)*exp(-b*(-a + z)**2/(2*a**2*z))/(2*sqrt(pi))
a, b = symbols('a b', positive=True)
z = Symbol('z', positive=True)
X = GaussianInverse('x', a, b)
assert density(X)(z) == sqrt(2)*sqrt(b)*sqrt(z**(-3))*exp(-b*(-a + z)**2/(2*a**2*z))/(2*sqrt(pi))
assert E(X) == a
assert variance(X).expand() == a**3/b
assert cdf(X)(z) == (S.Half - erf(sqrt(2)*sqrt(b)*(1 + z/a)/(2*sqrt(z)))/2)*exp(2*b/a) +\
erf(sqrt(2)*sqrt(b)*(-1 + z/a)/(2*sqrt(z)))/2 + S.Half
a = symbols('a', nonpositive=True)
raises(ValueError, lambda: GaussianInverse('x', a, b))
a = symbols('a', positive=True)
b = symbols('b', nonpositive=True)
raises(ValueError, lambda: GaussianInverse('x', a, b))
def test_pareto():
xm, beta = symbols('xm beta', positive=True)
alpha = beta + 5
X = Pareto('x', xm, alpha)
dens = density(X)
#Tests cdf function
assert cdf(X)(x) == \
Piecewise((-x**(-beta - 5)*xm**(beta + 5) + 1, x >= xm), (0, True))
#Tests characteristic_function
assert characteristic_function(X)(x) == \
((-I*x*xm)**(beta + 5)*(beta + 5)*uppergamma(-beta - 5, -I*x*xm))
assert dens(x) == x**(-(alpha + 1))*xm**(alpha)*(alpha)
assert simplify(E(X)) == alpha*xm/(alpha-1)
# computation of taylor series for MGF still too slow
#assert simplify(variance(X)) == xm**2*alpha / ((alpha-1)**2*(alpha-2))
def test_pareto_numeric():
xm, beta = 3, 2
alpha = beta + 5
X = Pareto('x', xm, alpha)
assert E(X) == alpha*xm/S(alpha - 1)
assert variance(X) == xm**2*alpha / S((alpha - 1)**2*(alpha - 2))
assert median(X) == FiniteSet(3*2**Rational(1, 7))
# Skewness tests too slow. Try shortcutting function?
def test_PowerFunction():
alpha = Symbol("alpha", nonpositive=True)
a, b = symbols('a, b', real=True)
raises (ValueError, lambda: PowerFunction('x', alpha, a, b))
a, b = symbols('a, b', real=False)
raises (ValueError, lambda: PowerFunction('x', alpha, a, b))
alpha = Symbol("alpha", positive=True)
a, b = symbols('a, b', real=True)
raises (ValueError, lambda: PowerFunction('x', alpha, 5, 2))
X = PowerFunction('X', 2, a, b)
assert density(X)(z) == (-2*a + 2*z)/(-a + b)**2
assert cdf(X)(z) == Piecewise((a**2/(a**2 - 2*a*b + b**2) -
2*a*z/(a**2 - 2*a*b + b**2) + z**2/(a**2 - 2*a*b + b**2), a <= z), (0, True))
X = PowerFunction('X', 2, 0, 1)
assert density(X)(z) == 2*z
assert cdf(X)(z) == Piecewise((z**2, z >= 0), (0,True))
assert E(X) == Rational(2,3)
assert P(X < 0) == 0
assert P(X < 1) == 1
assert median(X) == FiniteSet(1/sqrt(2))
def test_raised_cosine():
mu = Symbol("mu", real=True)
s = Symbol("s", positive=True)
X = RaisedCosine("x", mu, s)
assert pspace(X).domain.set == Interval(mu - s, mu + s)
#Tests characteristics_function
assert characteristic_function(X)(x) == \
Piecewise((exp(-I*pi*mu/s)/2, Eq(x, -pi/s)), (exp(I*pi*mu/s)/2, Eq(x, pi/s)), (pi**2*exp(I*mu*x)*sin(s*x)/(s*x*(-s**2*x**2 + pi**2)), True))
assert density(X)(x) == (Piecewise(((cos(pi*(x - mu)/s) + 1)/(2*s),
And(x <= mu + s, mu - s <= x)), (0, True)))
def test_rayleigh():
sigma = Symbol("sigma", positive=True)
X = Rayleigh('x', sigma)
#Tests characteristic_function
assert characteristic_function(X)(x) == (-sqrt(2)*sqrt(pi)*sigma*x*(erfi(sqrt(2)*sigma*x/2) - I)*exp(-sigma**2*x**2/2)/2 + 1)
assert density(X)(x) == x*exp(-x**2/(2*sigma**2))/sigma**2
assert E(X) == sqrt(2)*sqrt(pi)*sigma/2
assert variance(X) == -pi*sigma**2/2 + 2*sigma**2
assert cdf(X)(x) == 1 - exp(-x**2/(2*sigma**2))
assert diff(cdf(X)(x), x) == density(X)(x)
def test_reciprocal():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
X = Reciprocal('x', a, b)
assert density(X)(x) == 1/(x*(-log(a) + log(b)))
assert cdf(X)(x) == Piecewise((log(a)/(log(a) - log(b)) - log(x)/(log(a) - log(b)), a <= x), (0, True))
X = Reciprocal('x', 5, 30)
assert E(X) == 25/(log(30) - log(5))
assert P(X < 4) == S.Zero
assert P(X < 20) == log(20) / (log(30) - log(5)) - log(5) / (log(30) - log(5))
assert cdf(X)(10) == log(10) / (log(30) - log(5)) - log(5) / (log(30) - log(5))
a = symbols('a', nonpositive=True)
raises(ValueError, lambda: Reciprocal('x', a, b))
a = symbols('a', positive=True)
b = symbols('b', positive=True)
raises(ValueError, lambda: Reciprocal('x', a + b, a))
def test_shiftedgompertz():
b = Symbol("b", positive=True)
eta = Symbol("eta", positive=True)
X = ShiftedGompertz("x", b, eta)
assert density(X)(x) == b*(eta*(1 - exp(-b*x)) + 1)*exp(-b*x)*exp(-eta*exp(-b*x))
def test_studentt():
nu = Symbol("nu", positive=True)
X = StudentT('x', nu)
assert density(X)(x) == (1 + x**2/nu)**(-nu/2 - S.Half)/(sqrt(nu)*beta(S.Half, nu/2))
assert cdf(X)(x) == S.Half + x*gamma(nu/2 + S.Half)*hyper((S.Half, nu/2 + S.Half),
(Rational(3, 2),), -x**2/nu)/(sqrt(pi)*sqrt(nu)*gamma(nu/2))
raises(NotImplementedError, lambda: moment_generating_function(X))
def test_trapezoidal():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
c = Symbol("c", real=True)
d = Symbol("d", real=True)
X = Trapezoidal('x', a, b, c, d)
assert density(X)(x) == Piecewise(((-2*a + 2*x)/((-a + b)*(-a - b + c + d)), (a <= x) & (x < b)),
(2/(-a - b + c + d), (b <= x) & (x < c)),
((2*d - 2*x)/((-c + d)*(-a - b + c + d)), (c <= x) & (x <= d)),
(0, True))
X = Trapezoidal('x', 0, 1, 2, 3)
assert E(X) == Rational(3, 2)
assert variance(X) == Rational(5, 12)
assert P(X < 2) == Rational(3, 4)
assert median(X) == FiniteSet(Rational(3, 2))
def test_triangular():
a = Symbol("a")
b = Symbol("b")
c = Symbol("c")
X = Triangular('x', a, b, c)
assert pspace(X).domain.set == Interval(a, b)
assert str(density(X)(x)) == ("Piecewise(((-2*a + 2*x)/((-a + b)*(-a + c)), (a <= x) & (c > x)), "
"(2/(-a + b), Eq(c, x)), ((2*b - 2*x)/((-a + b)*(b - c)), (b >= x) & (c < x)), (0, True))")
#Tests moment_generating_function
assert moment_generating_function(X)(x).expand() == \
((-2*(-a + b)*exp(c*x) + 2*(-a + c)*exp(b*x) + 2*(b - c)*exp(a*x))/(x**2*(-a + b)*(-a + c)*(b - c))).expand()
assert str(characteristic_function(X)(x)) == \
'(2*(-a + b)*exp(I*c*x) - 2*(-a + c)*exp(I*b*x) - 2*(b - c)*exp(I*a*x))/(x**2*(-a + b)*(-a + c)*(b - c))'
def test_quadratic_u():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
X = QuadraticU("x", a, b)
Y = QuadraticU("x", 1, 2)
assert pspace(X).domain.set == Interval(a, b)
# Tests _moment_generating_function
assert moment_generating_function(Y)(1) == -15*exp(2) + 27*exp(1)
assert moment_generating_function(Y)(2) == -9*exp(4)/2 + 21*exp(2)/2
assert characteristic_function(Y)(1) == 3*I*(-1 + 4*I)*exp(I*exp(2*I))
assert density(X)(x) == (Piecewise((12*(x - a/2 - b/2)**2/(-a + b)**3,
And(x <= b, a <= x)), (0, True)))
def test_uniform():
l = Symbol('l', real=True)
w = Symbol('w', positive=True)
X = Uniform('x', l, l + w)
assert E(X) == l + w/2
assert variance(X).expand() == w**2/12
# With numbers all is well
X = Uniform('x', 3, 5)
assert P(X < 3) == 0 and P(X > 5) == 0
assert P(X < 4) == P(X > 4) == S.Half
assert median(X) == FiniteSet(4)
z = Symbol('z')
p = density(X)(z)
assert p.subs(z, 3.7) == S.Half
assert p.subs(z, -1) == 0
assert p.subs(z, 6) == 0
c = cdf(X)
assert c(2) == 0 and c(3) == 0
assert c(Rational(7, 2)) == Rational(1, 4)
assert c(5) == 1 and c(6) == 1
@XFAIL
def test_uniform_P():
""" This stopped working because SingleContinuousPSpace.compute_density no
longer calls integrate on a DiracDelta but rather just solves directly.
integrate used to call UniformDistribution.expectation which special-cased
subsed out the Min and Max terms that Uniform produces
I decided to regress on this class for general cleanliness (and I suspect
speed) of the algorithm.
"""
l = Symbol('l', real=True)
w = Symbol('w', positive=True)
X = Uniform('x', l, l + w)
assert P(X < l) == 0 and P(X > l + w) == 0
def test_uniformsum():
n = Symbol("n", integer=True)
_k = Dummy("k")
x = Symbol("x")
X = UniformSum('x', n)
res = Sum((-1)**_k*(-_k + x)**(n - 1)*binomial(n, _k), (_k, 0, floor(x)))/factorial(n - 1)
assert density(X)(x).dummy_eq(res)
#Tests set functions
assert X.pspace.domain.set == Interval(0, n)
#Tests the characteristic_function
assert characteristic_function(X)(x) == (-I*(exp(I*x) - 1)/x)**n
#Tests the moment_generating_function
assert moment_generating_function(X)(x) == ((exp(x) - 1)/x)**n
def test_von_mises():
mu = Symbol("mu")
k = Symbol("k", positive=True)
X = VonMises("x", mu, k)
assert density(X)(x) == exp(k*cos(x - mu))/(2*pi*besseli(0, k))
def test_weibull():
a, b = symbols('a b', positive=True)
# FIXME: simplify(E(X)) seems to hang without extended_positive=True
# On a Linux machine this had a rapid memory leak...
# a, b = symbols('a b', positive=True)
X = Weibull('x', a, b)
assert E(X).expand() == a * gamma(1 + 1/b)
assert variance(X).expand() == (a**2 * gamma(1 + 2/b) - E(X)**2).expand()
assert simplify(skewness(X)) == (2*gamma(1 + 1/b)**3 - 3*gamma(1 + 1/b)*gamma(1 + 2/b) + gamma(1 + 3/b))/(-gamma(1 + 1/b)**2 + gamma(1 + 2/b))**Rational(3, 2)
assert simplify(kurtosis(X)) == (-3*gamma(1 + 1/b)**4 +\
6*gamma(1 + 1/b)**2*gamma(1 + 2/b) - 4*gamma(1 + 1/b)*gamma(1 + 3/b) + gamma(1 + 4/b))/(gamma(1 + 1/b)**2 - gamma(1 + 2/b))**2
def test_weibull_numeric():
# Test for integers and rationals
a = 1
bvals = [S.Half, 1, Rational(3, 2), 5]
for b in bvals:
X = Weibull('x', a, b)
assert simplify(E(X)) == expand_func(a * gamma(1 + 1/S(b)))
assert simplify(variance(X)) == simplify(
a**2 * gamma(1 + 2/S(b)) - E(X)**2)
# Not testing Skew... it's slow with int/frac values > 3/2
def test_wignersemicircle():
R = Symbol("R", positive=True)
X = WignerSemicircle('x', R)
assert pspace(X).domain.set == Interval(-R, R)
assert density(X)(x) == 2*sqrt(-x**2 + R**2)/(pi*R**2)
assert E(X) == 0
#Tests ChiNoncentralDistribution
assert characteristic_function(X)(x) == \
Piecewise((2*besselj(1, R*x)/(R*x), Ne(x, 0)), (1, True))
def test_input_value_assertions():
a, b = symbols('a b')
p, q = symbols('p q', positive=True)
m, n = symbols('m n', positive=False, real=True)
raises(ValueError, lambda: Normal('x', 3, 0))
raises(ValueError, lambda: Normal('x', m, n))
Normal('X', a, p) # No error raised
raises(ValueError, lambda: Exponential('x', m))
Exponential('Ex', p) # No error raised
for fn in [Pareto, Weibull, Beta, Gamma]:
raises(ValueError, lambda: fn('x', m, p))
raises(ValueError, lambda: fn('x', p, n))
fn('x', p, q) # No error raised
def test_unevaluated():
X = Normal('x', 0, 1)
k = Dummy('k')
expr1 = Integral(sqrt(2)*k*exp(-k**2/2)/(2*sqrt(pi)), (k, -oo, oo))
expr2 = Integral(sqrt(2)*exp(-k**2/2)/(2*sqrt(pi)), (k, 0, oo))
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert E(X, evaluate=False).rewrite(Integral).dummy_eq(expr1)
assert E(X + 1, evaluate=False).rewrite(Integral).dummy_eq(expr1 + 1)
assert P(X > 0, evaluate=False).rewrite(Integral).dummy_eq(expr2)
assert P(X > 0, X**2 < 1) == S.Half
def test_probability_unevaluated():
T = Normal('T', 30, 3)
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert type(P(T > 33, evaluate=False)) == Probability
def test_density_unevaluated():
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 2)
assert isinstance(density(X+Y, evaluate=False)(z), Integral)
def test_NormalDistribution():
nd = NormalDistribution(0, 1)
x = Symbol('x')
assert nd.cdf(x) == erf(sqrt(2)*x/2)/2 + S.Half
assert nd.expectation(1, x) == 1
assert nd.expectation(x, x) == 0
assert nd.expectation(x**2, x) == 1
#Test issue 10076
a = SingleContinuousPSpace(x, NormalDistribution(2, 4))
_z = Dummy('_z')
expected1 = Integral(sqrt(2)*exp(-(_z - 2)**2/32)/(8*sqrt(pi)),(_z, -oo, 1))
assert a.probability(x < 1, evaluate=False).dummy_eq(expected1) is True
expected2 = Integral(sqrt(2)*exp(-(_z - 2)**2/32)/(8*sqrt(pi)),(_z, 1, oo))
assert a.probability(x > 1, evaluate=False).dummy_eq(expected2) is True
b = SingleContinuousPSpace(x, NormalDistribution(1, 9))
expected3 = Integral(sqrt(2)*exp(-(_z - 1)**2/162)/(18*sqrt(pi)),(_z, 6, oo))
assert b.probability(x > 6, evaluate=False).dummy_eq(expected3) is True
expected4 = Integral(sqrt(2)*exp(-(_z - 1)**2/162)/(18*sqrt(pi)),(_z, -oo, 6))
assert b.probability(x < 6, evaluate=False).dummy_eq(expected4) is True
def test_random_parameters():
mu = Normal('mu', 2, 3)
meas = Normal('T', mu, 1)
assert density(meas, evaluate=False)(z)
assert isinstance(pspace(meas), CompoundPSpace)
X = Normal('x', [1, 2], [[1, 0], [0, 1]])
assert isinstance(pspace(X).distribution, MultivariateNormalDistribution)
assert density(meas)(z).simplify() == sqrt(5)*exp(-z**2/20 + z/5 - S(1)/5)/(10*sqrt(pi))
def test_random_parameters_given():
mu = Normal('mu', 2, 3)
meas = Normal('T', mu, 1)
assert given(meas, Eq(mu, 5)) == Normal('T', 5, 1)
def test_conjugate_priors():
mu = Normal('mu', 2, 3)
x = Normal('x', mu, 1)
assert isinstance(simplify(density(mu, Eq(x, y), evaluate=False)(z)),
Mul)
def test_difficult_univariate():
""" Since using solve in place of deltaintegrate we're able to perform
substantially more complex density computations on single continuous random
variables """
x = Normal('x', 0, 1)
assert density(x**3)
assert density(exp(x**2))
assert density(log(x))
def test_issue_10003():
X = Exponential('x', 3)
G = Gamma('g', 1, 2)
assert P(X < -1) is S.Zero
assert P(G < -1) is S.Zero
@slow
def test_precomputed_cdf():
x = symbols("x", real=True)
mu = symbols("mu", real=True)
sigma, xm, alpha = symbols("sigma xm alpha", positive=True)
n = symbols("n", integer=True, positive=True)
distribs = [
Normal("X", mu, sigma),
Pareto("P", xm, alpha),
ChiSquared("C", n),
Exponential("E", sigma),
# LogNormal("L", mu, sigma),
]
for X in distribs:
compdiff = cdf(X)(x) - simplify(X.pspace.density.compute_cdf()(x))
compdiff = simplify(compdiff.rewrite(erfc))
assert compdiff == 0
@slow
def test_precomputed_characteristic_functions():
import mpmath
def test_cf(dist, support_lower_limit, support_upper_limit):
pdf = density(dist)
t = Symbol('t')
# first function is the hardcoded CF of the distribution
cf1 = lambdify([t], characteristic_function(dist)(t), 'mpmath')
# second function is the Fourier transform of the density function
f = lambdify([x, t], pdf(x)*exp(I*x*t), 'mpmath')
cf2 = lambda t: mpmath.quad(lambda x: f(x, t), [support_lower_limit, support_upper_limit], maxdegree=10)
# compare the two functions at various points
for test_point in [2, 5, 8, 11]:
n1 = cf1(test_point)
n2 = cf2(test_point)
assert abs(re(n1) - re(n2)) < 1e-12
assert abs(im(n1) - im(n2)) < 1e-12
test_cf(Beta('b', 1, 2), 0, 1)
test_cf(Chi('c', 3), 0, mpmath.inf)
test_cf(ChiSquared('c', 2), 0, mpmath.inf)
test_cf(Exponential('e', 6), 0, mpmath.inf)
test_cf(Logistic('l', 1, 2), -mpmath.inf, mpmath.inf)
test_cf(Normal('n', -1, 5), -mpmath.inf, mpmath.inf)
test_cf(RaisedCosine('r', 3, 1), 2, 4)
test_cf(Rayleigh('r', 0.5), 0, mpmath.inf)
test_cf(Uniform('u', -1, 1), -1, 1)
test_cf(WignerSemicircle('w', 3), -3, 3)
def test_long_precomputed_cdf():
x = symbols("x", real=True)
distribs = [
Arcsin("A", -5, 9),
Dagum("D", 4, 10, 3),
Erlang("E", 14, 5),
Frechet("F", 2, 6, -3),
Gamma("G", 2, 7),
GammaInverse("GI", 3, 5),
Kumaraswamy("K", 6, 8),
Laplace("LA", -5, 4),
Logistic("L", -6, 7),
Nakagami("N", 2, 7),
StudentT("S", 4)
]
for distr in distribs:
for _ in range(5):
assert tn(diff(cdf(distr)(x), x), density(distr)(x), x, a=0, b=0, c=1, d=0)
US = UniformSum("US", 5)
pdf01 = density(US)(x).subs(floor(x), 0).doit() # pdf on (0, 1)
cdf01 = cdf(US, evaluate=False)(x).subs(floor(x), 0).doit() # cdf on (0, 1)
assert tn(diff(cdf01, x), pdf01, x, a=0, b=0, c=1, d=0)
def test_issue_13324():
X = Uniform('X', 0, 1)
assert E(X, X > S.Half) == Rational(3, 4)
assert E(X, X > 0) == S.Half
def test_issue_20756():
X = Uniform('X', -1, +1)
Y = Uniform('Y', -1, +1)
assert E(X * Y) == S.Zero
assert E(X * ((Y + 1) - 1)) == S.Zero
assert E(Y * (X*(X + 1) - X*X)) == S.Zero
def test_FiniteSet_prob():
E = Exponential('E', 3)
N = Normal('N', 5, 7)
assert P(Eq(E, 1)) is S.Zero
assert P(Eq(N, 2)) is S.Zero
assert P(Eq(N, x)) is S.Zero
def test_prob_neq():
E = Exponential('E', 4)
X = ChiSquared('X', 4)
assert P(Ne(E, 2)) == 1
assert P(Ne(X, 4)) == 1
assert P(Ne(X, 4)) == 1
assert P(Ne(X, 5)) == 1
assert P(Ne(E, x)) == 1
def test_union():
N = Normal('N', 3, 2)
assert simplify(P(N**2 - N > 2)) == \
-erf(sqrt(2))/2 - erfc(sqrt(2)/4)/2 + Rational(3, 2)
assert simplify(P(N**2 - 4 > 0)) == \
-erf(5*sqrt(2)/4)/2 - erfc(sqrt(2)/4)/2 + Rational(3, 2)
def test_Or():
N = Normal('N', 0, 1)
assert simplify(P(Or(N > 2, N < 1))) == \
-erf(sqrt(2))/2 - erfc(sqrt(2)/2)/2 + Rational(3, 2)
assert P(Or(N < 0, N < 1)) == P(N < 1)
assert P(Or(N > 0, N < 0)) == 1
def test_conditional_eq():
E = Exponential('E', 1)
assert P(Eq(E, 1), Eq(E, 1)) == 1
assert P(Eq(E, 1), Eq(E, 2)) == 0
assert P(E > 1, Eq(E, 2)) == 1
assert P(E < 1, Eq(E, 2)) == 0
def test_ContinuousDistributionHandmade():
x = Symbol('x')
z = Dummy('z')
dens = Lambda(x, Piecewise((S.Half, (0<=x)&(x<1)), (0, (x>=1)&(x<2)),
(S.Half, (x>=2)&(x<3)), (0, True)))
dens = ContinuousDistributionHandmade(dens, set=Interval(0, 3))
space = SingleContinuousPSpace(z, dens)
assert dens.pdf == Lambda(x, Piecewise((1/2, (x >= 0) & (x < 1)),
(0, (x >= 1) & (x < 2)), (1/2, (x >= 2) & (x < 3)), (0, True)))
assert median(space.value) == Interval(1, 2)
assert E(space.value) == Rational(3, 2)
assert variance(space.value) == Rational(13, 12)
def test_issue_16318():
# test compute_expectation function of the SingleContinuousDomain
N = SingleContinuousDomain(x, Interval(0, 1))
raises(ValueError, lambda: SingleContinuousDomain.compute_expectation(N, x+1, {x, y}))
|
dbddbb7216c74d3840df82a2fa38b7d7d413eda8d0d6e782eb8d0c81afb5f190 | from sympy.concrete.summations import Sum
from sympy.core.numbers import (oo, pi)
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import symbols
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.beta_functions import beta
from sympy.functions.special.error_functions import erf
from sympy.functions.special.gamma_functions import gamma
from sympy.integrals.integrals import Integral
from sympy.sets.sets import Interval
from sympy.stats import (Normal, P, E, density, Gamma, Poisson, Rayleigh,
variance, Bernoulli, Beta, Uniform, cdf)
from sympy.stats.compound_rv import CompoundDistribution, CompoundPSpace
from sympy.stats.crv_types import NormalDistribution
from sympy.stats.drv_types import PoissonDistribution
from sympy.stats.frv_types import BernoulliDistribution
from sympy.testing.pytest import raises, ignore_warnings
from sympy.stats.joint_rv_types import MultivariateNormalDistribution
from sympy.abc import x
# helpers for testing troublesome unevaluated expressions
flat = lambda s: ''.join(str(s).split())
streq = lambda *a: len(set(map(flat, a))) == 1
assert streq(x, x)
assert streq(x, 'x')
assert not streq(x, x + 1)
def test_normal_CompoundDist():
X = Normal('X', 1, 2)
Y = Normal('X', X, 4)
assert density(Y)(x).simplify() == sqrt(10)*exp(-x**2/40 + x/20 - S(1)/40)/(20*sqrt(pi))
assert E(Y) == 1 # it is always equal to mean of X
assert P(Y > 1) == S(1)/2 # as 1 is the mean
assert P(Y > 5).simplify() == S(1)/2 - erf(sqrt(10)/5)/2
assert variance(Y) == variance(X) + 4**2 # 2**2 + 4**2
# https://math.stackexchange.com/questions/1484451/
# (Contains proof of E and variance computation)
def test_poisson_CompoundDist():
k, t, y = symbols('k t y', positive=True, real=True)
G = Gamma('G', k, t)
D = Poisson('P', G)
assert density(D)(y).simplify() == t**y*(t + 1)**(-k - y)*gamma(k + y)/(gamma(k)*gamma(y + 1))
# https://en.wikipedia.org/wiki/Negative_binomial_distribution#Gamma%E2%80%93Poisson_mixture
assert E(D).simplify() == k*t # mean of NegativeBinomialDistribution
def test_bernoulli_CompoundDist():
X = Beta('X', 1, 2)
Y = Bernoulli('Y', X)
assert density(Y).dict == {0: S(2)/3, 1: S(1)/3}
assert E(Y) == P(Eq(Y, 1)) == S(1)/3
assert variance(Y) == S(2)/9
assert cdf(Y) == {0: S(2)/3, 1: 1}
# test issue 8128
a = Bernoulli('a', S(1)/2)
b = Bernoulli('b', a)
assert density(b).dict == {0: S(1)/2, 1: S(1)/2}
assert P(b > 0.5) == S(1)/2
X = Uniform('X', 0, 1)
Y = Bernoulli('Y', X)
assert E(Y) == S(1)/2
assert P(Eq(Y, 1)) == E(Y)
def test_unevaluated_CompoundDist():
# these tests need to be removed once they work with evaluation as they are currently not
# evaluated completely in sympy.
R = Rayleigh('R', 4)
X = Normal('X', 3, R)
ans = '''
Piecewise(((-sqrt(pi)*sinh(x/4 - 3/4) + sqrt(pi)*cosh(x/4 - 3/4))/(
8*sqrt(pi)), Abs(arg(x - 3)) <= pi/4), (Integral(sqrt(2)*exp(-(x - 3)
**2/(2*R**2))*exp(-R**2/32)/(32*sqrt(pi)), (R, 0, oo)), True))'''
assert streq(density(X)(x), ans)
expre = '''
Integral(X*Integral(sqrt(2)*exp(-(X-3)**2/(2*R**2))*exp(-R**2/32)/(32*
sqrt(pi)),(R,0,oo)),(X,-oo,oo))'''
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert streq(E(X, evaluate=False).rewrite(Integral), expre)
X = Poisson('X', 1)
Y = Poisson('Y', X)
Z = Poisson('Z', Y)
exprd = Sum(exp(-Y)*Y**x*Sum(exp(-1)*exp(-X)*X**Y/(factorial(X)*factorial(Y)
), (X, 0, oo))/factorial(x), (Y, 0, oo))
assert density(Z)(x) == exprd
N = Normal('N', 1, 2)
M = Normal('M', 3, 4)
D = Normal('D', M, N)
exprd = '''
Integral(sqrt(2)*exp(-(N-1)**2/8)*Integral(exp(-(x-M)**2/(2*N**2))*exp
(-(M-3)**2/32)/(8*pi*N),(M,-oo,oo))/(4*sqrt(pi)),(N,-oo,oo))'''
assert streq(density(D, evaluate=False)(x), exprd)
def test_Compound_Distribution():
X = Normal('X', 2, 4)
N = NormalDistribution(X, 4)
C = CompoundDistribution(N)
assert C.is_Continuous
assert C.set == Interval(-oo, oo)
assert C.pdf(x, evaluate=True).simplify() == exp(-x**2/64 + x/16 - S(1)/16)/(8*sqrt(pi))
assert not isinstance(CompoundDistribution(NormalDistribution(2, 3)),
CompoundDistribution)
M = MultivariateNormalDistribution([1, 2], [[2, 1], [1, 2]])
raises(NotImplementedError, lambda: CompoundDistribution(M))
X = Beta('X', 2, 4)
B = BernoulliDistribution(X, 1, 0)
C = CompoundDistribution(B)
assert C.is_Finite
assert C.set == {0, 1}
y = symbols('y', negative=False, integer=True)
assert C.pdf(y, evaluate=True) == Piecewise((S(1)/(30*beta(2, 4)), Eq(y, 0)),
(S(1)/(60*beta(2, 4)), Eq(y, 1)), (0, True))
k, t, z = symbols('k t z', positive=True, real=True)
G = Gamma('G', k, t)
X = PoissonDistribution(G)
C = CompoundDistribution(X)
assert C.is_Discrete
assert C.set == S.Naturals0
assert C.pdf(z, evaluate=True).simplify() == t**z*(t + 1)**(-k - z)*gamma(k \
+ z)/(gamma(k)*gamma(z + 1))
def test_compound_pspace():
X = Normal('X', 2, 4)
Y = Normal('Y', 3, 6)
assert not isinstance(Y.pspace, CompoundPSpace)
N = NormalDistribution(1, 2)
D = PoissonDistribution(3)
B = BernoulliDistribution(0.2, 1, 0)
pspace1 = CompoundPSpace('N', N)
pspace2 = CompoundPSpace('D', D)
pspace3 = CompoundPSpace('B', B)
assert not isinstance(pspace1, CompoundPSpace)
assert not isinstance(pspace2, CompoundPSpace)
assert not isinstance(pspace3, CompoundPSpace)
M = MultivariateNormalDistribution([1, 2], [[2, 1], [1, 2]])
raises(ValueError, lambda: CompoundPSpace('M', M))
Y = Normal('Y', X, 6)
assert isinstance(Y.pspace, CompoundPSpace)
assert Y.pspace.distribution == CompoundDistribution(NormalDistribution(X, 6))
assert Y.pspace.domain.set == Interval(-oo, oo)
|
15ee3ff414c471ed0c412c7a5ad85af57b90f1890117dd0793c7e946ebb4541d | from functools import singledispatch
from sympy.external import import_module
from sympy.stats.crv_types import BetaDistribution, ChiSquaredDistribution, ExponentialDistribution, GammaDistribution, \
LogNormalDistribution, NormalDistribution, ParetoDistribution, UniformDistribution
from sympy.stats.drv_types import GeometricDistribution, PoissonDistribution, ZetaDistribution
from sympy.stats.frv_types import BinomialDistribution
numpy = import_module('numpy')
@singledispatch
def do_sample_numpy(dist, size, rand_state):
return None
# CRV:
@do_sample_numpy.register(BetaDistribution) # type: ignore
def _(dist: BetaDistribution, size, rand_state):
return rand_state.beta(a=float(dist.alpha), b=float(dist.beta), size=size)
@do_sample_numpy.register(ChiSquaredDistribution) # type: ignore
def _(dist: ChiSquaredDistribution, size, rand_state):
return rand_state.chisquare(df=float(dist.k), size=size)
@do_sample_numpy.register(ExponentialDistribution) # type: ignore
def _(dist: ExponentialDistribution, size, rand_state):
return rand_state.exponential(1 / float(dist.rate), size=size)
@do_sample_numpy.register(GammaDistribution) # type: ignore
def _(dist: GammaDistribution, size, rand_state):
return rand_state.gamma(float(dist.k), float(dist.theta), size=size)
@do_sample_numpy.register(LogNormalDistribution) # type: ignore
def _(dist: LogNormalDistribution, size, rand_state):
return rand_state.lognormal(float(dist.mean), float(dist.std), size=size)
@do_sample_numpy.register(NormalDistribution) # type: ignore
def _(dist: NormalDistribution, size, rand_state):
return rand_state.normal(float(dist.mean), float(dist.std), size=size)
@do_sample_numpy.register(ParetoDistribution) # type: ignore
def _(dist: ParetoDistribution, size, rand_state):
return (numpy.random.pareto(a=float(dist.alpha), size=size) + 1) * float(dist.xm)
@do_sample_numpy.register(UniformDistribution) # type: ignore
def _(dist: UniformDistribution, size, rand_state):
return rand_state.uniform(low=float(dist.left), high=float(dist.right), size=size)
# DRV:
@do_sample_numpy.register(GeometricDistribution) # type: ignore
def _(dist: GeometricDistribution, size, rand_state):
return rand_state.geometric(p=float(dist.p), size=size)
@do_sample_numpy.register(PoissonDistribution) # type: ignore
def _(dist: PoissonDistribution, size, rand_state):
return rand_state.poisson(lam=float(dist.lamda), size=size)
@do_sample_numpy.register(ZetaDistribution) # type: ignore
def _(dist: ZetaDistribution, size, rand_state):
return rand_state.zipf(a=float(dist.s), size=size)
# FRV:
@do_sample_numpy.register(BinomialDistribution) # type: ignore
def _(dist: BinomialDistribution, size, rand_state):
return rand_state.binomial(n=int(dist.n), p=float(dist.p), size=size)
|
659f3b8ee42499af841cfa1142da2ea31874b853e5e14f9193e680c0208d79b6 | from functools import singledispatch
from sympy.core.symbol import Dummy
from sympy.functions.elementary.exponential import exp
from sympy.utilities.lambdify import lambdify
from sympy.external import import_module
from sympy.stats import DiscreteDistributionHandmade
from sympy.stats.crv import SingleContinuousDistribution
from sympy.stats.crv_types import ChiSquaredDistribution, ExponentialDistribution, GammaDistribution, \
LogNormalDistribution, NormalDistribution, ParetoDistribution, UniformDistribution, BetaDistribution, \
StudentTDistribution, CauchyDistribution
from sympy.stats.drv_types import GeometricDistribution, LogarithmicDistribution, NegativeBinomialDistribution, \
PoissonDistribution, SkellamDistribution, YuleSimonDistribution, ZetaDistribution
from sympy.stats.frv import SingleFiniteDistribution
scipy = import_module("scipy", import_kwargs={'fromlist':['stats']})
@singledispatch
def do_sample_scipy(dist, size, seed):
return None
# CRV
@do_sample_scipy.register(SingleContinuousDistribution) # type: ignore
def _(dist: SingleContinuousDistribution, size, seed):
# if we don't need to make a handmade pdf, we won't
import scipy.stats
z = Dummy('z')
handmade_pdf = lambdify(z, dist.pdf(z), ['numpy', 'scipy'])
class scipy_pdf(scipy.stats.rv_continuous):
def _pdf(dist, x):
return handmade_pdf(x)
scipy_rv = scipy_pdf(a=float(dist.set._inf),
b=float(dist.set._sup), name='scipy_pdf')
return scipy_rv.rvs(size=size, random_state=seed)
@do_sample_scipy.register(ChiSquaredDistribution) # type: ignore
def _(dist: ChiSquaredDistribution, size, seed):
# same parametrisation
return scipy.stats.chi2.rvs(df=float(dist.k), size=size, random_state=seed)
@do_sample_scipy.register(ExponentialDistribution) # type: ignore
def _(dist: ExponentialDistribution, size, seed):
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html#scipy.stats.expon
return scipy.stats.expon.rvs(scale=1 / float(dist.rate), size=size, random_state=seed)
@do_sample_scipy.register(GammaDistribution) # type: ignore
def _(dist: GammaDistribution, size, seed):
# https://stackoverflow.com/questions/42150965/how-to-plot-gamma-distribution-with-alpha-and-beta-parameters-in-python
return scipy.stats.gamma.rvs(a=float(dist.k), scale=float(dist.theta), size=size, random_state=seed)
@do_sample_scipy.register(LogNormalDistribution) # type: ignore
def _(dist: LogNormalDistribution, size, seed):
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.lognorm.html
return scipy.stats.lognorm.rvs(scale=float(exp(dist.mean)), s=float(dist.std), size=size, random_state=seed)
@do_sample_scipy.register(NormalDistribution) # type: ignore
def _(dist: NormalDistribution, size, seed):
return scipy.stats.norm.rvs(loc=float(dist.mean), scale=float(dist.std), size=size, random_state=seed)
@do_sample_scipy.register(ParetoDistribution) # type: ignore
def _(dist: ParetoDistribution, size, seed):
# https://stackoverflow.com/questions/42260519/defining-pareto-distribution-in-python-scipy
return scipy.stats.pareto.rvs(b=float(dist.alpha), scale=float(dist.xm), size=size, random_state=seed)
@do_sample_scipy.register(StudentTDistribution) # type: ignore
def _(dist: StudentTDistribution, size, seed):
return scipy.stats.t.rvs(df=float(dist.nu), size=size, random_state=seed)
@do_sample_scipy.register(UniformDistribution) # type: ignore
def _(dist: UniformDistribution, size, seed):
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.uniform.html
return scipy.stats.uniform.rvs(loc=float(dist.left), scale=float(dist.right - dist.left), size=size, random_state=seed)
@do_sample_scipy.register(BetaDistribution) # type: ignore
def _(dist: BetaDistribution, size, seed):
# same parametrisation
return scipy.stats.beta.rvs(a=float(dist.alpha), b=float(dist.beta), size=size, random_state=seed)
@do_sample_scipy.register(CauchyDistribution) # type: ignore
def _(dist: CauchyDistribution, size, seed):
return scipy.stats.cauchy.rvs(loc=float(dist.x0), scale=float(dist.gamma), size=size, random_state=seed)
# DRV:
@do_sample_scipy.register(DiscreteDistributionHandmade) # type: ignore
def _(dist: DiscreteDistributionHandmade, size, seed):
from scipy.stats import rv_discrete
z = Dummy('z')
handmade_pmf = lambdify(z, dist.pdf(z), ['numpy', 'scipy'])
class scipy_pmf(rv_discrete):
def _pmf(dist, x):
return handmade_pmf(x)
scipy_rv = scipy_pmf(a=float(dist.set._inf), b=float(dist.set._sup),
name='scipy_pmf')
return scipy_rv.rvs(size=size, random_state=seed)
@do_sample_scipy.register(GeometricDistribution) # type: ignore
def _(dist: GeometricDistribution, size, seed):
return scipy.stats.geom.rvs(p=float(dist.p), size=size, random_state=seed)
@do_sample_scipy.register(LogarithmicDistribution) # type: ignore
def _(dist: LogarithmicDistribution, size, seed):
return scipy.stats.logser.rvs(p=float(dist.p), size=size, random_state=seed)
@do_sample_scipy.register(NegativeBinomialDistribution) # type: ignore
def _(dist: NegativeBinomialDistribution, size, seed):
return scipy.stats.nbinom.rvs(n=float(dist.r), p=float(dist.p), size=size, random_state=seed)
@do_sample_scipy.register(PoissonDistribution) # type: ignore
def _(dist: PoissonDistribution, size, seed):
return scipy.stats.poisson.rvs(mu=float(dist.lamda), size=size, random_state=seed)
@do_sample_scipy.register(SkellamDistribution) # type: ignore
def _(dist: SkellamDistribution, size, seed):
return scipy.stats.skellam.rvs(mu1=float(dist.mu1), mu2=float(dist.mu2), size=size, random_state=seed)
@do_sample_scipy.register(YuleSimonDistribution) # type: ignore
def _(dist: YuleSimonDistribution, size, seed):
return scipy.stats.yulesimon.rvs(alpha=float(dist.rho), size=size, random_state=seed)
@do_sample_scipy.register(ZetaDistribution) # type: ignore
def _(dist: ZetaDistribution, size, seed):
return scipy.stats.zipf.rvs(a=float(dist.s), size=size, random_state=seed)
# FRV:
@do_sample_scipy.register(SingleFiniteDistribution) # type: ignore
def _(dist: SingleFiniteDistribution, size, seed):
# scipy can handle with custom distributions
from scipy.stats import rv_discrete
density_ = dist.dict
x, y = [], []
for k, v in density_.items():
x.append(int(k))
y.append(float(v))
scipy_rv = rv_discrete(name='scipy_rv', values=(x, y))
return scipy_rv.rvs(size=size, random_state=seed)
|
8460e7a59735dafa3842671cf5caaa544004bf6d3f7f20292074a551bd67f4a6 | from functools import singledispatch
from sympy.external import import_module
from sympy.stats.crv_types import BetaDistribution, CauchyDistribution, ChiSquaredDistribution, ExponentialDistribution, \
GammaDistribution, LogNormalDistribution, NormalDistribution, ParetoDistribution, UniformDistribution, \
GaussianInverseDistribution
from sympy.stats.drv_types import PoissonDistribution, GeometricDistribution, NegativeBinomialDistribution
from sympy.stats.frv_types import BinomialDistribution, BernoulliDistribution
pymc3 = import_module('pymc3')
@singledispatch
def do_sample_pymc3(dist):
return None
# CRV:
@do_sample_pymc3.register(BetaDistribution) # type: ignore
def _(dist: BetaDistribution):
return pymc3.Beta('X', alpha=float(dist.alpha), beta=float(dist.beta))
@do_sample_pymc3.register(CauchyDistribution) # type: ignore
def _(dist: CauchyDistribution):
return pymc3.Cauchy('X', alpha=float(dist.x0), beta=float(dist.gamma))
@do_sample_pymc3.register(ChiSquaredDistribution) # type: ignore
def _(dist: ChiSquaredDistribution):
return pymc3.ChiSquared('X', nu=float(dist.k))
@do_sample_pymc3.register(ExponentialDistribution) # type: ignore
def _(dist: ExponentialDistribution):
return pymc3.Exponential('X', lam=float(dist.rate))
@do_sample_pymc3.register(GammaDistribution) # type: ignore
def _(dist: GammaDistribution):
return pymc3.Gamma('X', alpha=float(dist.k), beta=1 / float(dist.theta))
@do_sample_pymc3.register(LogNormalDistribution) # type: ignore
def _(dist: LogNormalDistribution):
return pymc3.Lognormal('X', mu=float(dist.mean), sigma=float(dist.std))
@do_sample_pymc3.register(NormalDistribution) # type: ignore
def _(dist: NormalDistribution):
return pymc3.Normal('X', float(dist.mean), float(dist.std))
@do_sample_pymc3.register(GaussianInverseDistribution) # type: ignore
def _(dist: GaussianInverseDistribution):
return pymc3.Wald('X', mu=float(dist.mean), lam=float(dist.shape))
@do_sample_pymc3.register(ParetoDistribution) # type: ignore
def _(dist: ParetoDistribution):
return pymc3.Pareto('X', alpha=float(dist.alpha), m=float(dist.xm))
@do_sample_pymc3.register(UniformDistribution) # type: ignore
def _(dist: UniformDistribution):
return pymc3.Uniform('X', lower=float(dist.left), upper=float(dist.right))
# DRV:
@do_sample_pymc3.register(GeometricDistribution) # type: ignore
def _(dist: GeometricDistribution):
return pymc3.Geometric('X', p=float(dist.p))
@do_sample_pymc3.register(NegativeBinomialDistribution) # type: ignore
def _(dist: NegativeBinomialDistribution):
return pymc3.NegativeBinomial('X', mu=float((dist.p * dist.r) / (1 - dist.p)),
alpha=float(dist.r))
@do_sample_pymc3.register(PoissonDistribution) # type: ignore
def _(dist: PoissonDistribution):
return pymc3.Poisson('X', mu=float(dist.lamda))
# FRV:
@do_sample_pymc3.register(BernoulliDistribution) # type: ignore
def _(dist: BernoulliDistribution):
return pymc3.Bernoulli('X', p=float(dist.p))
@do_sample_pymc3.register(BinomialDistribution) # type: ignore
def _(dist: BinomialDistribution):
return pymc3.Binomial('X', n=int(dist.n), p=float(dist.p))
|
237deded31957a948a63ef49efe6ac02c443bae10b18dd800629dc69c8f4b6c7 | from sympy.core.numbers import Rational
from sympy.core.singleton import S
from sympy.external import import_module
from sympy.stats import Binomial, sample, Die, FiniteRV, DiscreteUniform, Bernoulli, BetaBinomial, Hypergeometric, \
Rademacher
from sympy.testing.pytest import skip, raises
def test_given_sample():
X = Die('X', 6)
scipy = import_module('scipy')
if not scipy:
skip('Scipy is not installed. Abort tests')
assert sample(X, X > 5) == 6
def test_sample_numpy():
distribs_numpy = [
Binomial("B", 5, 0.4),
]
size = 3
numpy = import_module('numpy')
if not numpy:
skip('Numpy is not installed. Abort tests for _sample_numpy.')
else:
for X in distribs_numpy:
samps = sample(X, size=size, library='numpy')
for sam in samps:
assert sam in X.pspace.domain.set
raises(NotImplementedError,
lambda: sample(Die("D"), library='numpy'))
raises(NotImplementedError,
lambda: Die("D").pspace.sample(library='tensorflow'))
def test_sample_scipy():
distribs_scipy = [
FiniteRV('F', {1: S.Half, 2: Rational(1, 4), 3: Rational(1, 4)}),
DiscreteUniform("Y", list(range(5))),
Die("D"),
Bernoulli("Be", 0.3),
Binomial("Bi", 5, 0.4),
BetaBinomial("Bb", 2, 1, 1),
Hypergeometric("H", 1, 1, 1),
Rademacher("R")
]
size = 3
scipy = import_module('scipy')
if not scipy:
skip('Scipy not installed. Abort tests for _sample_scipy.')
else:
for X in distribs_scipy:
samps = sample(X, size=size)
samps2 = sample(X, size=(2, 2))
for sam in samps:
assert sam in X.pspace.domain.set
for i in range(2):
for j in range(2):
assert samps2[i][j] in X.pspace.domain.set
def test_sample_pymc3():
distribs_pymc3 = [
Bernoulli('B', 0.2),
Binomial('N', 5, 0.4)
]
size = 3
pymc3 = import_module('pymc3')
if not pymc3:
skip('PyMC3 is not installed. Abort tests for _sample_pymc3.')
else:
for X in distribs_pymc3:
samps = sample(X, size=size, library='pymc3')
for sam in samps:
assert sam in X.pspace.domain.set
raises(NotImplementedError,
lambda: (sample(Die("D"), library='pymc3')))
def test_sample_seed():
F = FiniteRV('F', {1: S.Half, 2: Rational(1, 4), 3: Rational(1, 4)})
size = 10
libraries = ['scipy', 'numpy', 'pymc3']
for lib in libraries:
try:
imported_lib = import_module(lib)
if imported_lib:
s0 = sample(F, size=size, library=lib, seed=0)
s1 = sample(F, size=size, library=lib, seed=0)
s2 = sample(F, size=size, library=lib, seed=1)
assert all(s0 == s1)
assert not all(s1 == s2)
except NotImplementedError:
continue
|
56080b7a2f6167f939a9d6d8675dc52ddf5defbef32ba6e4b39f2d9d2aefe123 | from sympy.core.numbers import oo
from sympy.core.symbol import Symbol
from sympy.functions.elementary.exponential import exp
from sympy.sets.sets import Interval
from sympy.external import import_module
from sympy.stats import Beta, Chi, Normal, Gamma, Exponential, LogNormal, Pareto, ChiSquared, Uniform, sample, \
BetaPrime, Cauchy, GammaInverse, GaussianInverse, StudentT, Weibull, density, ContinuousRV
from sympy.testing.pytest import skip, raises
def test_sample_numpy():
distribs_numpy = [
Beta("B", 1, 1),
Normal("N", 0, 1),
Gamma("G", 2, 7),
Exponential("E", 2),
LogNormal("LN", 0, 1),
Pareto("P", 1, 1),
ChiSquared("CS", 2),
Uniform("U", 0, 1)
]
size = 3
numpy = import_module('numpy')
if not numpy:
skip('Numpy is not installed. Abort tests for _sample_numpy.')
else:
for X in distribs_numpy:
samps = sample(X, size=size, library='numpy')
for sam in samps:
assert sam in X.pspace.domain.set
raises(NotImplementedError,
lambda: sample(Chi("C", 1), library='numpy'))
raises(NotImplementedError,
lambda: Chi("C", 1).pspace.distribution.sample(library='tensorflow'))
def test_sample_scipy():
distribs_scipy = [
Beta("B", 1, 1),
BetaPrime("BP", 1, 1),
Cauchy("C", 1, 1),
Chi("C", 1),
Normal("N", 0, 1),
Gamma("G", 2, 7),
GammaInverse("GI", 1, 1),
GaussianInverse("GUI", 1, 1),
Exponential("E", 2),
LogNormal("LN", 0, 1),
Pareto("P", 1, 1),
StudentT("S", 2),
ChiSquared("CS", 2),
Uniform("U", 0, 1)
]
size = 3
scipy = import_module('scipy')
if not scipy:
skip('Scipy is not installed. Abort tests for _sample_scipy.')
else:
for X in distribs_scipy:
samps = sample(X, size=size, library='scipy')
samps2 = sample(X, size=(2, 2), library='scipy')
for sam in samps:
assert sam in X.pspace.domain.set
for i in range(2):
for j in range(2):
assert samps2[i][j] in X.pspace.domain.set
def test_sample_pymc3():
distribs_pymc3 = [
Beta("B", 1, 1),
Cauchy("C", 1, 1),
Normal("N", 0, 1),
Gamma("G", 2, 7),
GaussianInverse("GI", 1, 1),
Exponential("E", 2),
LogNormal("LN", 0, 1),
Pareto("P", 1, 1),
ChiSquared("CS", 2),
Uniform("U", 0, 1)
]
size = 3
pymc3 = import_module('pymc3')
if not pymc3:
skip('PyMC3 is not installed. Abort tests for _sample_pymc3.')
else:
for X in distribs_pymc3:
samps = sample(X, size=size, library='pymc3')
for sam in samps:
assert sam in X.pspace.domain.set
raises(NotImplementedError,
lambda: sample(Chi("C", 1), library='pymc3'))
def test_sampling_gamma_inverse():
scipy = import_module('scipy')
if not scipy:
skip('Scipy not installed. Abort tests for sampling of gamma inverse.')
X = GammaInverse("x", 1, 1)
assert sample(X) in X.pspace.domain.set
def test_lognormal_sampling():
# Right now, only density function and sampling works
scipy = import_module('scipy')
if not scipy:
skip('Scipy is not installed. Abort tests')
for i in range(3):
X = LogNormal('x', i, 1)
assert sample(X) in X.pspace.domain.set
size = 5
samps = sample(X, size=size)
for samp in samps:
assert samp in X.pspace.domain.set
def test_sampling_gaussian_inverse():
scipy = import_module('scipy')
if not scipy:
skip('Scipy not installed. Abort tests for sampling of Gaussian inverse.')
X = GaussianInverse("x", 1, 1)
assert sample(X, library='scipy') in X.pspace.domain.set
def test_prefab_sampling():
scipy = import_module('scipy')
if not scipy:
skip('Scipy is not installed. Abort tests')
N = Normal('X', 0, 1)
L = LogNormal('L', 0, 1)
E = Exponential('Ex', 1)
P = Pareto('P', 1, 3)
W = Weibull('W', 1, 1)
U = Uniform('U', 0, 1)
B = Beta('B', 2, 5)
G = Gamma('G', 1, 3)
variables = [N, L, E, P, W, U, B, G]
niter = 10
size = 5
for var in variables:
for _ in range(niter):
assert sample(var) in var.pspace.domain.set
samps = sample(var, size=size)
for samp in samps:
assert samp in var.pspace.domain.set
def test_sample_continuous():
z = Symbol('z')
Z = ContinuousRV(z, exp(-z), set=Interval(0, oo))
assert density(Z)(-1) == 0
scipy = import_module('scipy')
if not scipy:
skip('Scipy is not installed. Abort tests')
assert sample(Z) in Z.pspace.domain.set
sym, val = list(Z.pspace.sample().items())[0]
assert sym == Z and val in Interval(0, oo)
libraries = ['scipy', 'numpy', 'pymc3']
for lib in libraries:
try:
imported_lib = import_module(lib)
if imported_lib:
s0, s1, s2 = [], [], []
s0 = sample(Z, size=10, library=lib, seed=0)
s1 = sample(Z, size=10, library=lib, seed=0)
s2 = sample(Z, size=10, library=lib, seed=1)
assert all(s0 == s1)
assert all(s1 != s2)
except NotImplementedError:
continue
|
6d0a95fbc5899243c79b50a7c12cc6821a88628f3624f4697ceefcb0817c380b | from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.external import import_module
from sympy.stats import Geometric, Poisson, Zeta, sample, Skellam, DiscreteRV, Logarithmic, NegativeBinomial, YuleSimon
from sympy.testing.pytest import skip, raises, slow
def test_sample_numpy():
distribs_numpy = [
Geometric('G', 0.5),
Poisson('P', 1),
Zeta('Z', 2)
]
size = 3
numpy = import_module('numpy')
if not numpy:
skip('Numpy is not installed. Abort tests for _sample_numpy.')
else:
for X in distribs_numpy:
samps = sample(X, size=size, library='numpy')
for sam in samps:
assert sam in X.pspace.domain.set
raises(NotImplementedError,
lambda: sample(Skellam('S', 1, 1), library='numpy'))
raises(NotImplementedError,
lambda: Skellam('S', 1, 1).pspace.distribution.sample(library='tensorflow'))
def test_sample_scipy():
p = S(2)/3
x = Symbol('x', integer=True, positive=True)
pdf = p*(1 - p)**(x - 1) # pdf of Geometric Distribution
distribs_scipy = [
DiscreteRV(x, pdf, set=S.Naturals),
Geometric('G', 0.5),
Logarithmic('L', 0.5),
NegativeBinomial('N', 5, 0.4),
Poisson('P', 1),
Skellam('S', 1, 1),
YuleSimon('Y', 1),
Zeta('Z', 2)
]
size = 3
scipy = import_module('scipy')
if not scipy:
skip('Scipy is not installed. Abort tests for _sample_scipy.')
else:
for X in distribs_scipy:
samps = sample(X, size=size, library='scipy')
samps2 = sample(X, size=(2, 2), library='scipy')
for sam in samps:
assert sam in X.pspace.domain.set
for i in range(2):
for j in range(2):
assert samps2[i][j] in X.pspace.domain.set
def test_sample_pymc3():
distribs_pymc3 = [
Geometric('G', 0.5),
Poisson('P', 1),
NegativeBinomial('N', 5, 0.4)
]
size = 3
pymc3 = import_module('pymc3')
if not pymc3:
skip('PyMC3 is not installed. Abort tests for _sample_pymc3.')
else:
for X in distribs_pymc3:
samps = sample(X, size=size, library='pymc3')
for sam in samps:
assert sam in X.pspace.domain.set
raises(NotImplementedError,
lambda: sample(Skellam('S', 1, 1), library='pymc3'))
@slow
def test_sample_discrete():
X = Geometric('X', S.Half)
scipy = import_module('scipy')
if not scipy:
skip('Scipy not installed. Abort tests')
assert sample(X) in X.pspace.domain.set
samps = sample(X, size=2) # This takes long time if ran without scipy
for samp in samps:
assert samp in X.pspace.domain.set
libraries = ['scipy', 'numpy', 'pymc3']
for lib in libraries:
try:
imported_lib = import_module(lib)
if imported_lib:
s0, s1, s2 = [], [], []
s0 = sample(X, size=10, library=lib, seed=0)
s1 = sample(X, size=10, library=lib, seed=0)
s2 = sample(X, size=10, library=lib, seed=1)
assert all(s0 == s1)
assert not all(s1 == s2)
except NotImplementedError:
continue
|
81f41f2ee2d5167f9b9843c17209d4f9fd1589b26263c840125d053b85778955 | from sympy.core import GoldenRatio as phi
from sympy.core.numbers import (Rational, pi)
from sympy.core.singleton import S
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.ntheory.continued_fraction import \
(continued_fraction_periodic as cf_p,
continued_fraction_iterator as cf_i,
continued_fraction_convergents as cf_c,
continued_fraction_reduce as cf_r,
continued_fraction as cf)
from sympy.testing.pytest import raises
def test_continued_fraction():
assert cf_p(1, 1, 10, 0) == cf_p(1, 1, 0, 1)
assert cf_p(1, -1, 10, 1) == cf_p(-1, 1, 10, -1)
t = sqrt(2)
assert cf((1 + t)*(1 - t)) == cf(-1)
for n in [0, 2, Rational(2, 3), sqrt(2), 3*sqrt(2), 1 + 2*sqrt(3)/5,
(2 - 3*sqrt(5))/7, 1 + sqrt(2), (-5 + sqrt(17))/4]:
assert (cf_r(cf(n)) - n).expand() == 0
assert (cf_r(cf(-n)) + n).expand() == 0
raises(ValueError, lambda: cf(sqrt(2 + sqrt(3))))
raises(ValueError, lambda: cf(sqrt(2) + sqrt(3)))
raises(ValueError, lambda: cf(pi))
raises(ValueError, lambda: cf(.1))
raises(ValueError, lambda: cf_p(1, 0, 0))
raises(ValueError, lambda: cf_p(1, 1, -1))
assert cf_p(4, 3, 0) == [1, 3]
assert cf_p(0, 3, 5) == [0, 1, [2, 1, 12, 1, 2, 2]]
assert cf_p(1, 1, 0) == [1]
assert cf_p(3, 4, 0) == [0, 1, 3]
assert cf_p(4, 5, 0) == [0, 1, 4]
assert cf_p(5, 6, 0) == [0, 1, 5]
assert cf_p(11, 13, 0) == [0, 1, 5, 2]
assert cf_p(16, 19, 0) == [0, 1, 5, 3]
assert cf_p(27, 32, 0) == [0, 1, 5, 2, 2]
assert cf_p(1, 2, 5) == [[1]]
assert cf_p(0, 1, 2) == [1, [2]]
assert cf_p(6, 7, 49) == [1, 1, 6]
assert cf_p(3796, 1387, 0) == [2, 1, 2, 1, 4]
assert cf_p(3245, 10000) == [0, 3, 12, 4, 13]
assert cf_p(1932, 2568) == [0, 1, 3, 26, 2]
assert cf_p(6589, 2569) == [2, 1, 1, 3, 2, 1, 3, 1, 23]
def take(iterator, n=7):
res = []
for i, t in enumerate(cf_i(iterator)):
if i >= n:
break
res.append(t)
return res
assert take(phi) == [1, 1, 1, 1, 1, 1, 1]
assert take(pi) == [3, 7, 15, 1, 292, 1, 1]
assert list(cf_i(Rational(17, 12))) == [1, 2, 2, 2]
assert list(cf_i(Rational(-17, 12))) == [-2, 1, 1, 2, 2]
assert list(cf_c([1, 6, 1, 8])) == [S.One, Rational(7, 6), Rational(8, 7), Rational(71, 62)]
assert list(cf_c([2])) == [S(2)]
assert list(cf_c([1, 1, 1, 1, 1, 1, 1])) == [S.One, S(2), Rational(3, 2), Rational(5, 3),
Rational(8, 5), Rational(13, 8), Rational(21, 13)]
assert list(cf_c([1, 6, Rational(-1, 2), 4])) == [S.One, Rational(7, 6), Rational(5, 4), Rational(3, 2)]
assert cf_r([1, 6, 1, 8]) == Rational(71, 62)
assert cf_r([3]) == S(3)
assert cf_r([-1, 5, 1, 4]) == Rational(-24, 29)
assert (cf_r([0, 1, 1, 7, [24, 8]]) - (sqrt(3) + 2)/7).expand() == 0
assert cf_r([1, 5, 9]) == Rational(55, 46)
assert (cf_r([[1]]) - (sqrt(5) + 1)/2).expand() == 0
assert cf_r([-3, 1, 1, [2]]) == -1 - sqrt(2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.