hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
e237ebcc094016bed923a2fe1d6e4164accb4e01b0abca4bacb219c384031f8a | """ Caching facility for SymPy """
from __future__ import print_function, division
from distutils.version import LooseVersion as V
class _cache(list):
""" List of cached functions """
def print_cache(self):
"""print cache info"""
for item in self:
name = item.__name__
myfunc = item
while hasattr(myfunc, '__wrapped__'):
if hasattr(myfunc, 'cache_info'):
info = myfunc.cache_info()
break
else:
myfunc = myfunc.__wrapped__
else:
info = None
print(name, info)
def clear_cache(self):
"""clear cache content"""
for item in self:
myfunc = item
while hasattr(myfunc, '__wrapped__'):
if hasattr(myfunc, 'cache_clear'):
myfunc.cache_clear()
break
else:
myfunc = myfunc.__wrapped__
# global cache registry:
CACHE = _cache()
# make clear and print methods available
print_cache = CACHE.print_cache
clear_cache = CACHE.clear_cache
from functools import update_wrapper
try:
import fastcache
from warnings import warn
# the version attribute __version__ is not present for all versions
if not hasattr(fastcache, '__version__'):
warn("fastcache version >= 0.4.0 required", UserWarning)
raise ImportError
# ensure minimum required version of fastcache is present
if V(fastcache.__version__) < '0.4.0':
warn("fastcache version >= 0.4.0 required, detected {}"\
.format(fastcache.__version__), UserWarning)
raise ImportError
# Do not use fastcache if running under pypy
import platform
if platform.python_implementation() == 'PyPy':
raise ImportError
lru_cache = fastcache.clru_cache
except ImportError:
from sympy.core.compatibility import lru_cache
def __cacheit(maxsize):
"""caching decorator.
important: the result of cached function must be *immutable*
Examples
========
>>> from sympy.core.cache import cacheit
>>> @cacheit
... def f(a, b):
... return a+b
>>> @cacheit
... def f(a, b):
... return [a, b] # <-- WRONG, returns mutable object
to force cacheit to check returned results mutability and consistency,
set environment variable SYMPY_USE_CACHE to 'debug'
"""
def func_wrapper(func):
cfunc = lru_cache(maxsize, typed=True)(func)
# wraps here does not propagate all the necessary info
# for py2.7, use update_wrapper below
def wrapper(*args, **kwargs):
try:
retval = cfunc(*args, **kwargs)
except TypeError:
retval = func(*args, **kwargs)
return retval
wrapper.cache_info = cfunc.cache_info
wrapper.cache_clear = cfunc.cache_clear
# Some versions of update_wrapper erroneously assign the final
# function of the wrapper chain to __wrapped__, see
# https://bugs.python.org/issue17482 .
# To work around this, we need to call update_wrapper first, then
# assign to wrapper.__wrapped__.
update_wrapper(wrapper, func)
wrapper.__wrapped__ = cfunc.__wrapped__
CACHE.append(wrapper)
return wrapper
return func_wrapper
else:
def __cacheit(maxsize):
"""caching decorator.
important: the result of cached function must be *immutable*
Examples
========
>>> from sympy.core.cache import cacheit
>>> @cacheit
... def f(a, b):
... return a+b
>>> @cacheit
... def f(a, b):
... return [a, b] # <-- WRONG, returns mutable object
to force cacheit to check returned results mutability and consistency,
set environment variable SYMPY_USE_CACHE to 'debug'
"""
def func_wrapper(func):
cfunc = fastcache.clru_cache(maxsize, typed=True, unhashable='ignore')(func)
CACHE.append(cfunc)
return cfunc
return func_wrapper
########################################
def __cacheit_nocache(func):
return func
def __cacheit_debug(maxsize):
"""cacheit + code to check cache consistency"""
def func_wrapper(func):
from .decorators import wraps
cfunc = __cacheit(maxsize)(func)
@wraps(func)
def wrapper(*args, **kw_args):
# always call function itself and compare it with cached version
r1 = func(*args, **kw_args)
r2 = cfunc(*args, **kw_args)
# try to see if the result is immutable
#
# this works because:
#
# hash([1,2,3]) -> raise TypeError
# hash({'a':1, 'b':2}) -> raise TypeError
# hash((1,[2,3])) -> raise TypeError
#
# hash((1,2,3)) -> just computes the hash
hash(r1), hash(r2)
# also see if returned values are the same
if r1 != r2:
raise RuntimeError("Returned values are not the same")
return r1
return wrapper
return func_wrapper
def _getenv(key, default=None):
from os import getenv
return getenv(key, default)
# SYMPY_USE_CACHE=yes/no/debug
USE_CACHE = _getenv('SYMPY_USE_CACHE', 'yes').lower()
# SYMPY_CACHE_SIZE=some_integer/None
# special cases :
# SYMPY_CACHE_SIZE=0 -> No caching
# SYMPY_CACHE_SIZE=None -> Unbounded caching
scs = _getenv('SYMPY_CACHE_SIZE', '1000')
if scs.lower() == 'none':
SYMPY_CACHE_SIZE = None
else:
try:
SYMPY_CACHE_SIZE = int(scs)
except ValueError:
raise RuntimeError(
'SYMPY_CACHE_SIZE must be a valid integer or None. ' + \
'Got: %s' % SYMPY_CACHE_SIZE)
if USE_CACHE == 'no':
cacheit = __cacheit_nocache
elif USE_CACHE == 'yes':
cacheit = __cacheit(SYMPY_CACHE_SIZE)
elif USE_CACHE == 'debug':
cacheit = __cacheit_debug(SYMPY_CACHE_SIZE) # a lot slower
else:
raise RuntimeError(
'unrecognized value for SYMPY_USE_CACHE: %s' % USE_CACHE)
|
ca80318f62ce5e0d35b941e3914362bda3a076701b79f1fbef146d9896a6fbbf | """Module for SymPy containers
(SymPy objects that store other SymPy objects)
The containers implemented in this module are subclassed to Basic.
They are supposed to work seamlessly within the SymPy framework.
"""
from __future__ import print_function, division
from collections import OrderedDict
from sympy.core import S
from sympy.core.basic import Basic
from sympy.core.compatibility import as_int, MutableSet
from sympy.core.sympify import sympify, converter
from sympy.utilities.iterables import iterable
class Tuple(Basic):
"""
Wrapper around the builtin tuple object
The Tuple is a subclass of Basic, so that it works well in the
SymPy framework. The wrapped tuple is available as self.args, but
you can also access elements or slices with [:] syntax.
Parameters
==========
sympify : bool
If ``False``, ``sympify`` is not called on ``args``. This
can be used for speedups for very large tuples where the
elements are known to already be sympy objects.
Example
=======
>>> from sympy import symbols
>>> from sympy.core.containers import Tuple
>>> a, b, c, d = symbols('a b c d')
>>> Tuple(a, b, c)[1:]
(b, c)
>>> Tuple(a, b, c).subs(a, d)
(d, b, c)
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('sympify', True):
args = ( sympify(arg) for arg in args )
obj = Basic.__new__(cls, *args)
return obj
def __getitem__(self, i):
if isinstance(i, slice):
indices = i.indices(len(self))
return Tuple(*(self.args[j] for j in range(*indices)))
return self.args[i]
def __len__(self):
return len(self.args)
def __contains__(self, item):
return item in self.args
def __iter__(self):
return iter(self.args)
def __add__(self, other):
if isinstance(other, Tuple):
return Tuple(*(self.args + other.args))
elif isinstance(other, tuple):
return Tuple(*(self.args + other))
else:
return NotImplemented
def __radd__(self, other):
if isinstance(other, Tuple):
return Tuple(*(other.args + self.args))
elif isinstance(other, tuple):
return Tuple(*(other + self.args))
else:
return NotImplemented
def __mul__(self, other):
try:
n = as_int(other)
except ValueError:
raise TypeError("Can't multiply sequence by non-integer of type '%s'" % type(other))
return self.func(*(self.args*n))
__rmul__ = __mul__
def __eq__(self, other):
if isinstance(other, Basic):
return super(Tuple, self).__eq__(other)
return self.args == other
def __ne__(self, other):
if isinstance(other, Basic):
return super(Tuple, self).__ne__(other)
return self.args != other
def __hash__(self):
return hash(self.args)
def _to_mpmath(self, prec):
return tuple(a._to_mpmath(prec) for a in self.args)
def __lt__(self, other):
return sympify(self.args < other.args)
def __le__(self, other):
return sympify(self.args <= other.args)
# XXX: Basic defines count() as something different, so we can't
# redefine it here. Originally this lead to cse() test failure.
def tuple_count(self, value):
"""T.count(value) -> integer -- return number of occurrences of value"""
return self.args.count(value)
def index(self, value, start=None, stop=None):
"""T.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present."""
# XXX: One would expect:
#
# return self.args.index(value, start, stop)
#
# here. Any trouble with that? Yes:
#
# >>> (1,).index(1, None, None)
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: slice indices must be integers or None or have an __index__ method
#
# See: http://bugs.python.org/issue13340
if start is None and stop is None:
return self.args.index(value)
elif stop is None:
return self.args.index(value, start)
else:
return self.args.index(value, start, stop)
def _eval_Eq(self, other):
from sympy.core.function import AppliedUndef
from sympy.core.logic import fuzzy_and, fuzzy_bool
from sympy.core.relational import Eq
if other.is_Symbol or isinstance(other, AppliedUndef):
return None
if not isinstance(other, Tuple) or len(self) != len(other):
return S.false
r = fuzzy_and(fuzzy_bool(Eq(s, o)) for s, o in zip(self, other))
if r is True:
return S.true
elif r is False:
return S.false
converter[tuple] = lambda tup: Tuple(*tup)
def tuple_wrapper(method):
"""
Decorator that converts any tuple in the function arguments into a Tuple.
The motivation for this is to provide simple user interfaces. The user can
call a function with regular tuples in the argument, and the wrapper will
convert them to Tuples before handing them to the function.
>>> from sympy.core.containers import tuple_wrapper
>>> def f(*args):
... return args
>>> g = tuple_wrapper(f)
The decorated function g sees only the Tuple argument:
>>> g(0, (1, 2), 3)
(0, (1, 2), 3)
"""
def wrap_tuples(*args, **kw_args):
newargs = []
for arg in args:
if type(arg) is tuple:
newargs.append(Tuple(*arg))
else:
newargs.append(arg)
return method(*newargs, **kw_args)
return wrap_tuples
class Dict(Basic):
"""
Wrapper around the builtin dict object
The Dict is a subclass of Basic, so that it works well in the
SymPy framework. Because it is immutable, it may be included
in sets, but its values must all be given at instantiation and
cannot be changed afterwards. Otherwise it behaves identically
to the Python dict.
>>> from sympy.core.containers import Dict
>>> D = Dict({1: 'one', 2: 'two'})
>>> for key in D:
... if key == 1:
... print('%s %s' % (key, D[key]))
1 one
The args are sympified so the 1 and 2 are Integers and the values
are Symbols. Queries automatically sympify args so the following work:
>>> 1 in D
True
>>> D.has('one') # searches keys and values
True
>>> 'one' in D # not in the keys
False
>>> D[1]
one
"""
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], (dict, Dict)):
items = [Tuple(k, v) for k, v in args[0].items()]
elif iterable(args) and all(len(arg) == 2 for arg in args):
items = [Tuple(k, v) for k, v in args]
else:
raise TypeError('Pass Dict args as Dict((k1, v1), ...) or Dict({k1: v1, ...})')
elements = frozenset(items)
obj = Basic.__new__(cls, elements)
obj.elements = elements
obj._dict = dict(items) # In case Tuple decides it wants to sympify
return obj
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
return self._dict[sympify(key)]
def __setitem__(self, key, value):
raise NotImplementedError("SymPy Dicts are Immutable")
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
See Also
========
sympy.core.basic.Basic.args
"""
return tuple(self.elements)
def items(self):
'''D.items() -> list of D's (key, value) pairs, as 2-tuples'''
return self._dict.items()
def keys(self):
'''D.keys() -> list of D's keys'''
return self._dict.keys()
def values(self):
'''D.values() -> list of D's values'''
return self._dict.values()
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self._dict)
def __len__(self):
'''x.__len__() <==> len(x)'''
return self._dict.__len__()
def get(self, key, default=None):
'''D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'''
return self._dict.get(sympify(key), default)
def __contains__(self, key):
'''D.__contains__(k) -> True if D has a key k, else False'''
return sympify(key) in self._dict
def __lt__(self, other):
return sympify(self.args < other.args)
@property
def _sorted_args(self):
from sympy.utilities import default_sort_key
return tuple(sorted(self.args, key=default_sort_key))
# this handles dict, defaultdict, OrderedDict
converter[dict] = lambda d: Dict(*d.items())
class OrderedSet(MutableSet):
def __init__(self, iterable=None):
if iterable:
self.map = OrderedDict((item, None) for item in iterable)
else:
self.map = OrderedDict()
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
self.map[key] = None
def discard(self, key):
self.map.pop(key)
def pop(self, last=True):
return self.map.popitem(last=last)[0]
def __iter__(self):
for key in self.map.keys():
yield key
def __repr__(self):
if not self.map:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.map.keys()))
def intersection(self, other):
result = []
for val in self:
if val in other:
result.append(val)
return self.__class__(result)
def difference(self, other):
result = []
for val in self:
if val not in other:
result.append(val)
return self.__class__(result)
def update(self, iterable):
for val in iterable:
self.add(val)
|
59d5ef293b11f4f171c82a601d7add09410ad9789b756a141b7ef84f66ad94a7 | """Logic expressions handling
NOTE
----
at present this is mainly needed for facts.py , feel free however to improve
this stuff for general purpose.
"""
from __future__ import print_function, division
from typing import Dict, Type, Union
# Type of a fuzzy bool
FuzzyBool = Union[bool, None]
def _torf(args):
"""Return True if all args are True, False if they
are all False, else None.
>>> from sympy.core.logic import _torf
>>> _torf((True, True))
True
>>> _torf((False, False))
False
>>> _torf((True, False))
"""
sawT = sawF = False
for a in args:
if a is True:
if sawF:
return
sawT = True
elif a is False:
if sawT:
return
sawF = True
else:
return
return sawT
def _fuzzy_group(args, quick_exit=False):
"""Return True if all args are True, None if there is any None else False
unless ``quick_exit`` is True (then return None as soon as a second False
is seen.
``_fuzzy_group`` is like ``fuzzy_and`` except that it is more
conservative in returning a False, waiting to make sure that all
arguments are True or False and returning None if any arguments are
None. It also has the capability of permiting only a single False and
returning None if more than one is seen. For example, the presence of a
single transcendental amongst rationals would indicate that the group is
no longer rational; but a second transcendental in the group would make the
determination impossible.
Examples
========
>>> from sympy.core.logic import _fuzzy_group
By default, multiple Falses mean the group is broken:
>>> _fuzzy_group([False, False, True])
False
If multiple Falses mean the group status is unknown then set
`quick_exit` to True so None can be returned when the 2nd False is seen:
>>> _fuzzy_group([False, False, True], quick_exit=True)
But if only a single False is seen then the group is known to
be broken:
>>> _fuzzy_group([False, True, True], quick_exit=True)
False
"""
saw_other = False
for a in args:
if a is True:
continue
if a is None:
return
if quick_exit and saw_other:
return
saw_other = True
return not saw_other
def fuzzy_bool(x):
"""Return True, False or None according to x.
Whereas bool(x) returns True or False, fuzzy_bool allows
for the None value and non-false values (which become None), too.
Examples
========
>>> from sympy.core.logic import fuzzy_bool
>>> from sympy.abc import x
>>> fuzzy_bool(x), fuzzy_bool(None)
(None, None)
>>> bool(x), bool(None)
(True, False)
"""
if x is None:
return None
if x in (True, False):
return bool(x)
def fuzzy_and(args):
"""Return True (all True), False (any False) or None.
Examples
========
>>> from sympy.core.logic import fuzzy_and
>>> from sympy import Dummy
If you had a list of objects to test the commutivity of
and you want the fuzzy_and logic applied, passing an
iterator will allow the commutativity to only be computed
as many times as necessary. With this list, False can be
returned after analyzing the first symbol:
>>> syms = [Dummy(commutative=False), Dummy()]
>>> fuzzy_and(s.is_commutative for s in syms)
False
That False would require less work than if a list of pre-computed
items was sent:
>>> fuzzy_and([s.is_commutative for s in syms])
False
"""
rv = True
for ai in args:
ai = fuzzy_bool(ai)
if ai is False:
return False
if rv: # this will stop updating if a None is ever trapped
rv = ai
return rv
def fuzzy_not(v):
"""
Not in fuzzy logic
Return None if `v` is None else `not v`.
Examples
========
>>> from sympy.core.logic import fuzzy_not
>>> fuzzy_not(True)
False
>>> fuzzy_not(None)
>>> fuzzy_not(False)
True
"""
if v is None:
return v
else:
return not v
def fuzzy_or(args):
"""
Or in fuzzy logic. Returns True (any True), False (all False), or None
See the docstrings of fuzzy_and and fuzzy_not for more info. fuzzy_or is
related to the two by the standard De Morgan's law.
>>> from sympy.core.logic import fuzzy_or
>>> fuzzy_or([True, False])
True
>>> fuzzy_or([True, None])
True
>>> fuzzy_or([False, False])
False
>>> print(fuzzy_or([False, None]))
None
"""
rv = False
for ai in args:
ai = fuzzy_bool(ai)
if ai is True:
return True
if rv is False: # this will stop updating if a None is ever trapped
rv = ai
return rv
def fuzzy_xor(args):
"""Return None if any element of args is not True or False, else
True (if there are an odd number of True elements), else False."""
t = f = 0
for a in args:
ai = fuzzy_bool(a)
if ai:
t += 1
elif ai is False:
f += 1
else:
return
return t % 2 == 1
def fuzzy_nand(args):
"""Return False if all args are True, True if they are all False,
else None."""
return fuzzy_not(fuzzy_and(args))
class Logic(object):
"""Logical expression"""
# {} 'op' -> LogicClass
op_2class = {} # type: Dict[str, Type[Logic]]
def __new__(cls, *args):
obj = object.__new__(cls)
obj.args = args
return obj
def __getnewargs__(self):
return self.args
def __hash__(self):
return hash((type(self).__name__,) + tuple(self.args))
def __eq__(a, b):
if not isinstance(b, type(a)):
return False
else:
return a.args == b.args
def __ne__(a, b):
if not isinstance(b, type(a)):
return True
else:
return a.args != b.args
def __lt__(self, other):
if self.__cmp__(other) == -1:
return True
return False
def __cmp__(self, other):
if type(self) is not type(other):
a = str(type(self))
b = str(type(other))
else:
a = self.args
b = other.args
return (a > b) - (a < b)
def __str__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(str(a) for a in self.args))
__repr__ = __str__
@staticmethod
def fromstring(text):
"""Logic from string with space around & and | but none after !.
e.g.
!a & b | c
"""
lexpr = None # current logical expression
schedop = None # scheduled operation
for term in text.split():
# operation symbol
if term in '&|':
if schedop is not None:
raise ValueError(
'double op forbidden: "%s %s"' % (term, schedop))
if lexpr is None:
raise ValueError(
'%s cannot be in the beginning of expression' % term)
schedop = term
continue
if '&' in term or '|' in term:
raise ValueError('& and | must have space around them')
if term[0] == '!':
if len(term) == 1:
raise ValueError('do not include space after "!"')
term = Not(term[1:])
# already scheduled operation, e.g. '&'
if schedop:
lexpr = Logic.op_2class[schedop](lexpr, term)
schedop = None
continue
# this should be atom
if lexpr is not None:
raise ValueError(
'missing op between "%s" and "%s"' % (lexpr, term))
lexpr = term
# let's check that we ended up in correct state
if schedop is not None:
raise ValueError('premature end-of-expression in "%s"' % text)
if lexpr is None:
raise ValueError('"%s" is empty' % text)
# everything looks good now
return lexpr
class AndOr_Base(Logic):
def __new__(cls, *args):
bargs = []
for a in args:
if a == cls.op_x_notx:
return a
elif a == (not cls.op_x_notx):
continue # skip this argument
bargs.append(a)
args = sorted(set(cls.flatten(bargs)), key=hash)
for a in args:
if Not(a) in args:
return cls.op_x_notx
if len(args) == 1:
return args.pop()
elif len(args) == 0:
return not cls.op_x_notx
return Logic.__new__(cls, *args)
@classmethod
def flatten(cls, args):
# quick-n-dirty flattening for And and Or
args_queue = list(args)
res = []
while True:
try:
arg = args_queue.pop(0)
except IndexError:
break
if isinstance(arg, Logic):
if isinstance(arg, cls):
args_queue.extend(arg.args)
continue
res.append(arg)
args = tuple(res)
return args
class And(AndOr_Base):
op_x_notx = False
def _eval_propagate_not(self):
# !(a&b&c ...) == !a | !b | !c ...
return Or(*[Not(a) for a in self.args])
# (a|b|...) & c == (a&c) | (b&c) | ...
def expand(self):
# first locate Or
for i in range(len(self.args)):
arg = self.args[i]
if isinstance(arg, Or):
arest = self.args[:i] + self.args[i + 1:]
orterms = [And(*(arest + (a,))) for a in arg.args]
for j in range(len(orterms)):
if isinstance(orterms[j], Logic):
orterms[j] = orterms[j].expand()
res = Or(*orterms)
return res
return self
class Or(AndOr_Base):
op_x_notx = True
def _eval_propagate_not(self):
# !(a|b|c ...) == !a & !b & !c ...
return And(*[Not(a) for a in self.args])
class Not(Logic):
def __new__(cls, arg):
if isinstance(arg, str):
return Logic.__new__(cls, arg)
elif isinstance(arg, bool):
return not arg
elif isinstance(arg, Not):
return arg.args[0]
elif isinstance(arg, Logic):
# XXX this is a hack to expand right from the beginning
arg = arg._eval_propagate_not()
return arg
else:
raise ValueError('Not: unknown argument %r' % (arg,))
@property
def arg(self):
return self.args[0]
Logic.op_2class['&'] = And
Logic.op_2class['|'] = Or
Logic.op_2class['!'] = Not
|
39cc73377f8345f5164c893647ad99a84d2c07eb78cd9ca5ea032237a1ef9e76 | from __future__ import print_function, division
from collections import defaultdict
from functools import cmp_to_key
import operator
from .sympify import sympify
from .basic import Basic
from .singleton import S
from .operations import AssocOp
from .cache import cacheit
from .logic import fuzzy_not, _fuzzy_group
from .compatibility import reduce
from .expr import Expr
from .parameters import global_parameters
# internal marker to indicate:
# "there are still non-commutative objects -- don't forget to process them"
class NC_Marker:
is_Order = False
is_Mul = False
is_Number = False
is_Poly = False
is_commutative = False
# Key for sorting commutative args in canonical order
_args_sortkey = cmp_to_key(Basic.compare)
def _mulsort(args):
# in-place sorting of args
args.sort(key=_args_sortkey)
def _unevaluated_Mul(*args):
"""Return a well-formed unevaluated Mul: Numbers are collected and
put in slot 0, any arguments that are Muls will be flattened, and args
are sorted. Use this when args have changed but you still want to return
an unevaluated Mul.
Examples
========
>>> from sympy.core.mul import _unevaluated_Mul as uMul
>>> from sympy import S, sqrt, Mul
>>> from sympy.abc import x
>>> a = uMul(*[S(3.0), x, S(2)])
>>> a.args[0]
6.00000000000000
>>> a.args[1]
x
Two unevaluated Muls with the same arguments will
always compare as equal during testing:
>>> m = uMul(sqrt(2), sqrt(3))
>>> m == uMul(sqrt(3), sqrt(2))
True
>>> u = Mul(sqrt(3), sqrt(2), evaluate=False)
>>> m == uMul(u)
True
>>> m == Mul(*m.args)
False
"""
args = list(args)
newargs = []
ncargs = []
co = S.One
while args:
a = args.pop()
if a.is_Mul:
c, nc = a.args_cnc()
args.extend(c)
if nc:
ncargs.append(Mul._from_args(nc))
elif a.is_Number:
co *= a
else:
newargs.append(a)
_mulsort(newargs)
if co is not S.One:
newargs.insert(0, co)
if ncargs:
newargs.append(Mul._from_args(ncargs))
return Mul._from_args(newargs)
class Mul(Expr, AssocOp):
__slots__ = ()
is_Mul = True
def __neg__(self):
c, args = self.as_coeff_mul()
c = -c
if c is not S.One:
if args[0].is_Number:
args = list(args)
if c is S.NegativeOne:
args[0] = -args[0]
else:
args[0] *= c
else:
args = (c,) + args
return self._from_args(args, self.is_commutative)
@classmethod
def flatten(cls, seq):
"""Return commutative, noncommutative and order arguments by
combining related terms.
Notes
=====
* In an expression like ``a*b*c``, python process this through sympy
as ``Mul(Mul(a, b), c)``. This can have undesirable consequences.
- Sometimes terms are not combined as one would like:
{c.f. https://github.com/sympy/sympy/issues/4596}
>>> from sympy import Mul, sqrt
>>> from sympy.abc import x, y, z
>>> 2*(x + 1) # this is the 2-arg Mul behavior
2*x + 2
>>> y*(x + 1)*2
2*y*(x + 1)
>>> 2*(x + 1)*y # 2-arg result will be obtained first
y*(2*x + 2)
>>> Mul(2, x + 1, y) # all 3 args simultaneously processed
2*y*(x + 1)
>>> 2*((x + 1)*y) # parentheses can control this behavior
2*y*(x + 1)
Powers with compound bases may not find a single base to
combine with unless all arguments are processed at once.
Post-processing may be necessary in such cases.
{c.f. https://github.com/sympy/sympy/issues/5728}
>>> a = sqrt(x*sqrt(y))
>>> a**3
(x*sqrt(y))**(3/2)
>>> Mul(a,a,a)
(x*sqrt(y))**(3/2)
>>> a*a*a
x*sqrt(y)*sqrt(x*sqrt(y))
>>> _.subs(a.base, z).subs(z, a.base)
(x*sqrt(y))**(3/2)
- If more than two terms are being multiplied then all the
previous terms will be re-processed for each new argument.
So if each of ``a``, ``b`` and ``c`` were :class:`Mul`
expression, then ``a*b*c`` (or building up the product
with ``*=``) will process all the arguments of ``a`` and
``b`` twice: once when ``a*b`` is computed and again when
``c`` is multiplied.
Using ``Mul(a, b, c)`` will process all arguments once.
* The results of Mul are cached according to arguments, so flatten
will only be called once for ``Mul(a, b, c)``. If you can
structure a calculation so the arguments are most likely to be
repeats then this can save time in computing the answer. For
example, say you had a Mul, M, that you wished to divide by ``d[i]``
and multiply by ``n[i]`` and you suspect there are many repeats
in ``n``. It would be better to compute ``M*n[i]/d[i]`` rather
than ``M/d[i]*n[i]`` since every time n[i] is a repeat, the
product, ``M*n[i]`` will be returned without flattening -- the
cached value will be returned. If you divide by the ``d[i]``
first (and those are more unique than the ``n[i]``) then that will
create a new Mul, ``M/d[i]`` the args of which will be traversed
again when it is multiplied by ``n[i]``.
{c.f. https://github.com/sympy/sympy/issues/5706}
This consideration is moot if the cache is turned off.
NB
--
The validity of the above notes depends on the implementation
details of Mul and flatten which may change at any time. Therefore,
you should only consider them when your code is highly performance
sensitive.
Removal of 1 from the sequence is already handled by AssocOp.__new__.
"""
from sympy.calculus.util import AccumBounds
from sympy.matrices.expressions import MatrixExpr
rv = None
if len(seq) == 2:
a, b = seq
if b.is_Rational:
a, b = b, a
seq = [a, b]
assert not a is S.One
if not a.is_zero and a.is_Rational:
r, b = b.as_coeff_Mul()
if b.is_Add:
if r is not S.One: # 2-arg hack
# leave the Mul as a Mul
rv = [cls(a*r, b, evaluate=False)], [], None
elif global_parameters.distribute and b.is_commutative:
r, b = b.as_coeff_Add()
bargs = [_keep_coeff(a, bi) for bi in Add.make_args(b)]
_addsort(bargs)
ar = a*r
if ar:
bargs.insert(0, ar)
bargs = [Add._from_args(bargs)]
rv = bargs, [], None
if rv:
return rv
# apply associativity, separate commutative part of seq
c_part = [] # out: commutative factors
nc_part = [] # out: non-commutative factors
nc_seq = []
coeff = S.One # standalone term
# e.g. 3 * ...
c_powers = [] # (base,exp) n
# e.g. (x,n) for x
num_exp = [] # (num-base, exp) y
# e.g. (3, y) for ... * 3 * ...
neg1e = S.Zero # exponent on -1 extracted from Number-based Pow and I
pnum_rat = {} # (num-base, Rat-exp) 1/2
# e.g. (3, 1/2) for ... * 3 * ...
order_symbols = None
# --- PART 1 ---
#
# "collect powers and coeff":
#
# o coeff
# o c_powers
# o num_exp
# o neg1e
# o pnum_rat
#
# NOTE: this is optimized for all-objects-are-commutative case
for o in seq:
# O(x)
if o.is_Order:
o, order_symbols = o.as_expr_variables(order_symbols)
# Mul([...])
if o.is_Mul:
if o.is_commutative:
seq.extend(o.args) # XXX zerocopy?
else:
# NCMul can have commutative parts as well
for q in o.args:
if q.is_commutative:
seq.append(q)
else:
nc_seq.append(q)
# append non-commutative marker, so we don't forget to
# process scheduled non-commutative objects
seq.append(NC_Marker)
continue
# 3
elif o.is_Number:
if o is S.NaN or coeff is S.ComplexInfinity and o.is_zero:
# we know for sure the result will be nan
return [S.NaN], [], None
elif coeff.is_Number or isinstance(coeff, AccumBounds): # it could be zoo
coeff *= o
if coeff is S.NaN:
# we know for sure the result will be nan
return [S.NaN], [], None
continue
elif isinstance(o, AccumBounds):
coeff = o.__mul__(coeff)
continue
elif o is S.ComplexInfinity:
if not coeff:
# 0 * zoo = NaN
return [S.NaN], [], None
coeff = S.ComplexInfinity
continue
elif o is S.ImaginaryUnit:
neg1e += S.Half
continue
elif o.is_commutative:
# e
# o = b
b, e = o.as_base_exp()
# y
# 3
if o.is_Pow:
if b.is_Number:
# get all the factors with numeric base so they can be
# combined below, but don't combine negatives unless
# the exponent is an integer
if e.is_Rational:
if e.is_Integer:
coeff *= Pow(b, e) # it is an unevaluated power
continue
elif e.is_negative: # also a sign of an unevaluated power
seq.append(Pow(b, e))
continue
elif b.is_negative:
neg1e += e
b = -b
if b is not S.One:
pnum_rat.setdefault(b, []).append(e)
continue
elif b.is_positive or e.is_integer:
num_exp.append((b, e))
continue
c_powers.append((b, e))
# NON-COMMUTATIVE
# TODO: Make non-commutative exponents not combine automatically
else:
if o is not NC_Marker:
nc_seq.append(o)
# process nc_seq (if any)
while nc_seq:
o = nc_seq.pop(0)
if not nc_part:
nc_part.append(o)
continue
# b c b+c
# try to combine last terms: a * a -> a
o1 = nc_part.pop()
b1, e1 = o1.as_base_exp()
b2, e2 = o.as_base_exp()
new_exp = e1 + e2
# Only allow powers to combine if the new exponent is
# not an Add. This allow things like a**2*b**3 == a**5
# if a.is_commutative == False, but prohibits
# a**x*a**y and x**a*x**b from combining (x,y commute).
if b1 == b2 and (not new_exp.is_Add):
o12 = b1 ** new_exp
# now o12 could be a commutative object
if o12.is_commutative:
seq.append(o12)
continue
else:
nc_seq.insert(0, o12)
else:
nc_part.append(o1)
nc_part.append(o)
# We do want a combined exponent if it would not be an Add, such as
# y 2y 3y
# x * x -> x
# We determine if two exponents have the same term by using
# as_coeff_Mul.
#
# Unfortunately, this isn't smart enough to consider combining into
# exponents that might already be adds, so things like:
# z - y y
# x * x will be left alone. This is because checking every possible
# combination can slow things down.
# gather exponents of common bases...
def _gather(c_powers):
common_b = {} # b:e
for b, e in c_powers:
co = e.as_coeff_Mul()
common_b.setdefault(b, {}).setdefault(
co[1], []).append(co[0])
for b, d in common_b.items():
for di, li in d.items():
d[di] = Add(*li)
new_c_powers = []
for b, e in common_b.items():
new_c_powers.extend([(b, c*t) for t, c in e.items()])
return new_c_powers
# in c_powers
c_powers = _gather(c_powers)
# and in num_exp
num_exp = _gather(num_exp)
# --- PART 2 ---
#
# o process collected powers (x**0 -> 1; x**1 -> x; otherwise Pow)
# o combine collected powers (2**x * 3**x -> 6**x)
# with numeric base
# ................................
# now we have:
# - coeff:
# - c_powers: (b, e)
# - num_exp: (2, e)
# - pnum_rat: {(1/3, [1/3, 2/3, 1/4])}
# 0 1
# x -> 1 x -> x
# this should only need to run twice; if it fails because
# it needs to be run more times, perhaps this should be
# changed to a "while True" loop -- the only reason it
# isn't such now is to allow a less-than-perfect result to
# be obtained rather than raising an error or entering an
# infinite loop
for i in range(2):
new_c_powers = []
changed = False
for b, e in c_powers:
if e.is_zero:
# canceling out infinities yields NaN
if (b.is_Add or b.is_Mul) and any(infty in b.args
for infty in (S.ComplexInfinity, S.Infinity,
S.NegativeInfinity)):
return [S.NaN], [], None
continue
if e is S.One:
if b.is_Number:
coeff *= b
continue
p = b
if e is not S.One:
p = Pow(b, e)
# check to make sure that the base doesn't change
# after exponentiation; to allow for unevaluated
# Pow, we only do so if b is not already a Pow
if p.is_Pow and not b.is_Pow:
bi = b
b, e = p.as_base_exp()
if b != bi:
changed = True
c_part.append(p)
new_c_powers.append((b, e))
# there might have been a change, but unless the base
# matches some other base, there is nothing to do
if changed and len(set(
b for b, e in new_c_powers)) != len(new_c_powers):
# start over again
c_part = []
c_powers = _gather(new_c_powers)
else:
break
# x x x
# 2 * 3 -> 6
inv_exp_dict = {} # exp:Mul(num-bases) x x
# e.g. x:6 for ... * 2 * 3 * ...
for b, e in num_exp:
inv_exp_dict.setdefault(e, []).append(b)
for e, b in inv_exp_dict.items():
inv_exp_dict[e] = cls(*b)
c_part.extend([Pow(b, e) for e, b in inv_exp_dict.items() if e])
# b, e -> e' = sum(e), b
# {(1/5, [1/3]), (1/2, [1/12, 1/4]} -> {(1/3, [1/5, 1/2])}
comb_e = {}
for b, e in pnum_rat.items():
comb_e.setdefault(Add(*e), []).append(b)
del pnum_rat
# process them, reducing exponents to values less than 1
# and updating coeff if necessary else adding them to
# num_rat for further processing
num_rat = []
for e, b in comb_e.items():
b = cls(*b)
if e.q == 1:
coeff *= Pow(b, e)
continue
if e.p > e.q:
e_i, ep = divmod(e.p, e.q)
coeff *= Pow(b, e_i)
e = Rational(ep, e.q)
num_rat.append((b, e))
del comb_e
# extract gcd of bases in num_rat
# 2**(1/3)*6**(1/4) -> 2**(1/3+1/4)*3**(1/4)
pnew = defaultdict(list)
i = 0 # steps through num_rat which may grow
while i < len(num_rat):
bi, ei = num_rat[i]
grow = []
for j in range(i + 1, len(num_rat)):
bj, ej = num_rat[j]
g = bi.gcd(bj)
if g is not S.One:
# 4**r1*6**r2 -> 2**(r1+r2) * 2**r1 * 3**r2
# this might have a gcd with something else
e = ei + ej
if e.q == 1:
coeff *= Pow(g, e)
else:
if e.p > e.q:
e_i, ep = divmod(e.p, e.q) # change e in place
coeff *= Pow(g, e_i)
e = Rational(ep, e.q)
grow.append((g, e))
# update the jth item
num_rat[j] = (bj/g, ej)
# update bi that we are checking with
bi = bi/g
if bi is S.One:
break
if bi is not S.One:
obj = Pow(bi, ei)
if obj.is_Number:
coeff *= obj
else:
# changes like sqrt(12) -> 2*sqrt(3)
for obj in Mul.make_args(obj):
if obj.is_Number:
coeff *= obj
else:
assert obj.is_Pow
bi, ei = obj.args
pnew[ei].append(bi)
num_rat.extend(grow)
i += 1
# combine bases of the new powers
for e, b in pnew.items():
pnew[e] = cls(*b)
# handle -1 and I
if neg1e:
# treat I as (-1)**(1/2) and compute -1's total exponent
p, q = neg1e.as_numer_denom()
# if the integer part is odd, extract -1
n, p = divmod(p, q)
if n % 2:
coeff = -coeff
# if it's a multiple of 1/2 extract I
if q == 2:
c_part.append(S.ImaginaryUnit)
elif p:
# see if there is any positive base this power of
# -1 can join
neg1e = Rational(p, q)
for e, b in pnew.items():
if e == neg1e and b.is_positive:
pnew[e] = -b
break
else:
# keep it separate; we've already evaluated it as
# much as possible so evaluate=False
c_part.append(Pow(S.NegativeOne, neg1e, evaluate=False))
# add all the pnew powers
c_part.extend([Pow(b, e) for e, b in pnew.items()])
# oo, -oo
if (coeff is S.Infinity) or (coeff is S.NegativeInfinity):
def _handle_for_oo(c_part, coeff_sign):
new_c_part = []
for t in c_part:
if t.is_extended_positive:
continue
if t.is_extended_negative:
coeff_sign *= -1
continue
new_c_part.append(t)
return new_c_part, coeff_sign
c_part, coeff_sign = _handle_for_oo(c_part, 1)
nc_part, coeff_sign = _handle_for_oo(nc_part, coeff_sign)
coeff *= coeff_sign
# zoo
if coeff is S.ComplexInfinity:
# zoo might be
# infinite_real + bounded_im
# bounded_real + infinite_im
# infinite_real + infinite_im
# and non-zero real or imaginary will not change that status.
c_part = [c for c in c_part if not (fuzzy_not(c.is_zero) and
c.is_extended_real is not None)]
nc_part = [c for c in nc_part if not (fuzzy_not(c.is_zero) and
c.is_extended_real is not None)]
# 0
elif coeff.is_zero:
# we know for sure the result will be 0 except the multiplicand
# is infinity or a matrix
if any(isinstance(c, MatrixExpr) for c in nc_part):
return [coeff], nc_part, order_symbols
if any(c.is_finite == False for c in c_part):
return [S.NaN], [], order_symbols
return [coeff], [], order_symbols
# check for straggling Numbers that were produced
_new = []
for i in c_part:
if i.is_Number:
coeff *= i
else:
_new.append(i)
c_part = _new
# order commutative part canonically
_mulsort(c_part)
# current code expects coeff to be always in slot-0
if coeff is not S.One:
c_part.insert(0, coeff)
# we are done
if (global_parameters.distribute and not nc_part and len(c_part) == 2 and
c_part[0].is_Number and c_part[0].is_finite and c_part[1].is_Add):
# 2*(1+a) -> 2 + 2 * a
coeff = c_part[0]
c_part = [Add(*[coeff*f for f in c_part[1].args])]
return c_part, nc_part, order_symbols
def _eval_power(b, e):
# don't break up NC terms: (A*B)**3 != A**3*B**3, it is A*B*A*B*A*B
cargs, nc = b.args_cnc(split_1=False)
if e.is_Integer:
return Mul(*[Pow(b, e, evaluate=False) for b in cargs]) * \
Pow(Mul._from_args(nc), e, evaluate=False)
if e.is_Rational and e.q == 2:
from sympy.core.power import integer_nthroot
from sympy.functions.elementary.complexes import sign
if b.is_imaginary:
a = b.as_real_imag()[1]
if a.is_Rational:
n, d = abs(a/2).as_numer_denom()
n, t = integer_nthroot(n, 2)
if t:
d, t = integer_nthroot(d, 2)
if t:
r = sympify(n)/d
return _unevaluated_Mul(r**e.p, (1 + sign(a)*S.ImaginaryUnit)**e.p)
p = Pow(b, e, evaluate=False)
if e.is_Rational or e.is_Float:
return p._eval_expand_power_base()
return p
@classmethod
def class_key(cls):
return 3, 0, cls.__name__
def _eval_evalf(self, prec):
c, m = self.as_coeff_Mul()
if c is S.NegativeOne:
if m.is_Mul:
rv = -AssocOp._eval_evalf(m, prec)
else:
mnew = m._eval_evalf(prec)
if mnew is not None:
m = mnew
rv = -m
else:
rv = AssocOp._eval_evalf(self, prec)
if rv.is_number:
return rv.expand()
return rv
@property
def _mpc_(self):
"""
Convert self to an mpmath mpc if possible
"""
from sympy.core.numbers import I, Float
im_part, imag_unit = self.as_coeff_Mul()
if not imag_unit == I:
# ValueError may seem more reasonable but since it's a @property,
# we need to use AttributeError to keep from confusing things like
# hasattr.
raise AttributeError("Cannot convert Mul to mpc. Must be of the form Number*I")
return (Float(0)._mpf_, Float(im_part)._mpf_)
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_mul() which gives the head and a tuple containing
the arguments of the tail when treated as a Mul.
- if you want the coefficient when self is treated as an Add
then use self.as_coeff_add()[0]
>>> from sympy.abc import x, y
>>> (3*x*y).as_two_terms()
(3, x*y)
"""
args = self.args
if len(args) == 1:
return S.One, self
elif len(args) == 2:
return args
else:
return args[0], self._new_rawargs(*args[1:])
@cacheit
def as_coefficients_dict(self):
"""Return a dictionary mapping terms to their coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. The dictionary
is considered to have a single term.
Examples
========
>>> from sympy.abc import a, x
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
>>> _[a]
0
"""
d = defaultdict(int)
args = self.args
if len(args) == 1 or not args[0].is_Number:
d[self] = S.One
else:
d[self._new_rawargs(*args[1:])] = args[0]
return d
@cacheit
def as_coeff_mul(self, *deps, **kwargs):
if deps:
from sympy.utilities.iterables import sift
l1, l2 = sift(self.args, lambda x: x.has(*deps), binary=True)
return self._new_rawargs(*l2), tuple(l1)
rational = kwargs.pop('rational', True)
args = self.args
if args[0].is_Number:
if not rational or args[0].is_Rational:
return args[0], args[1:]
elif args[0].is_extended_negative:
return S.NegativeOne, (-args[0],) + args[1:]
return S.One, args
def as_coeff_Mul(self, rational=False):
"""
Efficiently extract the coefficient of a product.
"""
coeff, args = self.args[0], self.args[1:]
if coeff.is_Number:
if not rational or coeff.is_Rational:
if len(args) == 1:
return coeff, args[0]
else:
return coeff, self._new_rawargs(*args)
elif coeff.is_extended_negative:
return S.NegativeOne, self._new_rawargs(*((-coeff,) + args))
return S.One, self
def as_real_imag(self, deep=True, **hints):
from sympy import Abs, expand_mul, im, re
other = []
coeffr = []
coeffi = []
addterms = S.One
for a in self.args:
r, i = a.as_real_imag()
if i.is_zero:
coeffr.append(r)
elif r.is_zero:
coeffi.append(i*S.ImaginaryUnit)
elif a.is_commutative:
# search for complex conjugate pairs:
for i, x in enumerate(other):
if x == a.conjugate():
coeffr.append(Abs(x)**2)
del other[i]
break
else:
if a.is_Add:
addterms *= a
else:
other.append(a)
else:
other.append(a)
m = self.func(*other)
if hints.get('ignore') == m:
return
if len(coeffi) % 2:
imco = im(coeffi.pop(0))
# all other pairs make a real factor; they will be
# put into reco below
else:
imco = S.Zero
reco = self.func(*(coeffr + coeffi))
r, i = (reco*re(m), reco*im(m))
if addterms == 1:
if m == 1:
if imco.is_zero:
return (reco, S.Zero)
else:
return (S.Zero, reco*imco)
if imco is S.Zero:
return (r, i)
return (-imco*i, imco*r)
addre, addim = expand_mul(addterms, deep=False).as_real_imag()
if imco is S.Zero:
return (r*addre - i*addim, i*addre + r*addim)
else:
r, i = -imco*i, imco*r
return (r*addre - i*addim, r*addim + i*addre)
@staticmethod
def _expandsums(sums):
"""
Helper function for _eval_expand_mul.
sums must be a list of instances of Basic.
"""
L = len(sums)
if L == 1:
return sums[0].args
terms = []
left = Mul._expandsums(sums[:L//2])
right = Mul._expandsums(sums[L//2:])
terms = [Mul(a, b) for a in left for b in right]
added = Add(*terms)
return Add.make_args(added) # it may have collapsed down to one term
def _eval_expand_mul(self, **hints):
from sympy import fraction
# Handle things like 1/(x*(x + 1)), which are automatically converted
# to 1/x*1/(x + 1)
expr = self
n, d = fraction(expr)
if d.is_Mul:
n, d = [i._eval_expand_mul(**hints) if i.is_Mul else i
for i in (n, d)]
expr = n/d
if not expr.is_Mul:
return expr
plain, sums, rewrite = [], [], False
for factor in expr.args:
if factor.is_Add:
sums.append(factor)
rewrite = True
else:
if factor.is_commutative:
plain.append(factor)
else:
sums.append(Basic(factor)) # Wrapper
if not rewrite:
return expr
else:
plain = self.func(*plain)
if sums:
deep = hints.get("deep", False)
terms = self.func._expandsums(sums)
args = []
for term in terms:
t = self.func(plain, term)
if t.is_Mul and any(a.is_Add for a in t.args) and deep:
t = t._eval_expand_mul()
args.append(t)
return Add(*args)
else:
return plain
@cacheit
def _eval_derivative(self, s):
args = list(self.args)
terms = []
for i in range(len(args)):
d = args[i].diff(s)
if d:
# Note: reduce is used in step of Mul as Mul is unable to
# handle subtypes and operation priority:
terms.append(reduce(lambda x, y: x*y, (args[:i] + [d] + args[i + 1:]), S.One))
return Add.fromiter(terms)
@cacheit
def _eval_derivative_n_times(self, s, n):
from sympy import Integer, factorial, prod, Sum, Max
from sympy.ntheory.multinomial import multinomial_coefficients_iterator
from .function import AppliedUndef
from .symbol import Symbol, symbols, Dummy
if not isinstance(s, AppliedUndef) and not isinstance(s, Symbol):
# other types of s may not be well behaved, e.g.
# (cos(x)*sin(y)).diff([[x, y, z]])
return super(Mul, self)._eval_derivative_n_times(s, n)
args = self.args
m = len(args)
if isinstance(n, (int, Integer)):
# https://en.wikipedia.org/wiki/General_Leibniz_rule#More_than_two_factors
terms = []
for kvals, c in multinomial_coefficients_iterator(m, n):
p = prod([arg.diff((s, k)) for k, arg in zip(kvals, args)])
terms.append(c * p)
return Add(*terms)
kvals = symbols("k1:%i" % m, cls=Dummy)
klast = n - sum(kvals)
nfact = factorial(n)
e, l = (# better to use the multinomial?
nfact/prod(map(factorial, kvals))/factorial(klast)*\
prod([args[t].diff((s, kvals[t])) for t in range(m-1)])*\
args[-1].diff((s, Max(0, klast))),
[(k, 0, n) for k in kvals])
return Sum(e, *l)
def _eval_difference_delta(self, n, step):
from sympy.series.limitseq import difference_delta as dd
arg0 = self.args[0]
rest = Mul(*self.args[1:])
return (arg0.subs(n, n + step) * dd(rest, n, step) + dd(arg0, n, step) *
rest)
def _matches_simple(self, expr, repl_dict):
# handle (w*3).matches('x*5') -> {w: x*5/3}
coeff, terms = self.as_coeff_Mul()
terms = Mul.make_args(terms)
if len(terms) == 1:
newexpr = self.__class__._combine_inverse(expr, coeff)
return terms[0].matches(newexpr, repl_dict)
return
def matches(self, expr, repl_dict={}, old=False):
expr = sympify(expr)
if self.is_commutative and expr.is_commutative:
return AssocOp._matches_commutative(self, expr, repl_dict, old)
elif self.is_commutative is not expr.is_commutative:
return None
# Proceed only if both both expressions are non-commutative
c1, nc1 = self.args_cnc()
c2, nc2 = expr.args_cnc()
c1, c2 = [c or [1] for c in [c1, c2]]
# TODO: Should these be self.func?
comm_mul_self = Mul(*c1)
comm_mul_expr = Mul(*c2)
repl_dict = comm_mul_self.matches(comm_mul_expr, repl_dict, old)
# If the commutative arguments didn't match and aren't equal, then
# then the expression as a whole doesn't match
if repl_dict is None and c1 != c2:
return None
# Now match the non-commutative arguments, expanding powers to
# multiplications
nc1 = Mul._matches_expand_pows(nc1)
nc2 = Mul._matches_expand_pows(nc2)
repl_dict = Mul._matches_noncomm(nc1, nc2, repl_dict)
return repl_dict or None
@staticmethod
def _matches_expand_pows(arg_list):
new_args = []
for arg in arg_list:
if arg.is_Pow and arg.exp > 0:
new_args.extend([arg.base] * arg.exp)
else:
new_args.append(arg)
return new_args
@staticmethod
def _matches_noncomm(nodes, targets, repl_dict={}):
"""Non-commutative multiplication matcher.
`nodes` is a list of symbols within the matcher multiplication
expression, while `targets` is a list of arguments in the
multiplication expression being matched against.
"""
# List of possible future states to be considered
agenda = []
# The current matching state, storing index in nodes and targets
state = (0, 0)
node_ind, target_ind = state
# Mapping between wildcard indices and the index ranges they match
wildcard_dict = {}
repl_dict = repl_dict.copy()
while target_ind < len(targets) and node_ind < len(nodes):
node = nodes[node_ind]
if node.is_Wild:
Mul._matches_add_wildcard(wildcard_dict, state)
states_matches = Mul._matches_new_states(wildcard_dict, state,
nodes, targets)
if states_matches:
new_states, new_matches = states_matches
agenda.extend(new_states)
if new_matches:
for match in new_matches:
repl_dict[match] = new_matches[match]
if not agenda:
return None
else:
state = agenda.pop()
node_ind, target_ind = state
return repl_dict
@staticmethod
def _matches_add_wildcard(dictionary, state):
node_ind, target_ind = state
if node_ind in dictionary:
begin, end = dictionary[node_ind]
dictionary[node_ind] = (begin, target_ind)
else:
dictionary[node_ind] = (target_ind, target_ind)
@staticmethod
def _matches_new_states(dictionary, state, nodes, targets):
node_ind, target_ind = state
node = nodes[node_ind]
target = targets[target_ind]
# Don't advance at all if we've exhausted the targets but not the nodes
if target_ind >= len(targets) - 1 and node_ind < len(nodes) - 1:
return None
if node.is_Wild:
match_attempt = Mul._matches_match_wilds(dictionary, node_ind,
nodes, targets)
if match_attempt:
# If the same node has been matched before, don't return
# anything if the current match is diverging from the previous
# match
other_node_inds = Mul._matches_get_other_nodes(dictionary,
nodes, node_ind)
for ind in other_node_inds:
other_begin, other_end = dictionary[ind]
curr_begin, curr_end = dictionary[node_ind]
other_targets = targets[other_begin:other_end + 1]
current_targets = targets[curr_begin:curr_end + 1]
for curr, other in zip(current_targets, other_targets):
if curr != other:
return None
# A wildcard node can match more than one target, so only the
# target index is advanced
new_state = [(node_ind, target_ind + 1)]
# Only move on to the next node if there is one
if node_ind < len(nodes) - 1:
new_state.append((node_ind + 1, target_ind + 1))
return new_state, match_attempt
else:
# If we're not at a wildcard, then make sure we haven't exhausted
# nodes but not targets, since in this case one node can only match
# one target
if node_ind >= len(nodes) - 1 and target_ind < len(targets) - 1:
return None
match_attempt = node.matches(target)
if match_attempt:
return [(node_ind + 1, target_ind + 1)], match_attempt
elif node == target:
return [(node_ind + 1, target_ind + 1)], None
else:
return None
@staticmethod
def _matches_match_wilds(dictionary, wildcard_ind, nodes, targets):
"""Determine matches of a wildcard with sub-expression in `target`."""
wildcard = nodes[wildcard_ind]
begin, end = dictionary[wildcard_ind]
terms = targets[begin:end + 1]
# TODO: Should this be self.func?
mul = Mul(*terms) if len(terms) > 1 else terms[0]
return wildcard.matches(mul)
@staticmethod
def _matches_get_other_nodes(dictionary, nodes, node_ind):
"""Find other wildcards that may have already been matched."""
other_node_inds = []
for ind in dictionary:
if nodes[ind] == nodes[node_ind]:
other_node_inds.append(ind)
return other_node_inds
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs/rhs, but treats arguments like symbols, so things
like oo/oo return 1 (instead of a nan) and ``I`` behaves like
a symbol instead of sqrt(-1).
"""
from .symbol import Dummy
if lhs == rhs:
return S.One
def check(l, r):
if l.is_Float and r.is_comparable:
# if both objects are added to 0 they will share the same "normalization"
# and are more likely to compare the same. Since Add(foo, 0) will not allow
# the 0 to pass, we use __add__ directly.
return l.__add__(0) == r.evalf().__add__(0)
return False
if check(lhs, rhs) or check(rhs, lhs):
return S.One
if any(i.is_Pow or i.is_Mul for i in (lhs, rhs)):
# gruntz and limit wants a literal I to not combine
# with a power of -1
d = Dummy('I')
_i = {S.ImaginaryUnit: d}
i_ = {d: S.ImaginaryUnit}
a = lhs.xreplace(_i).as_powers_dict()
b = rhs.xreplace(_i).as_powers_dict()
blen = len(b)
for bi in tuple(b.keys()):
if bi in a:
a[bi] -= b.pop(bi)
if not a[bi]:
a.pop(bi)
if len(b) != blen:
lhs = Mul(*[k**v for k, v in a.items()]).xreplace(i_)
rhs = Mul(*[k**v for k, v in b.items()]).xreplace(i_)
return lhs/rhs
def as_powers_dict(self):
d = defaultdict(int)
for term in self.args:
for b, e in term.as_powers_dict().items():
d[b] += e
return d
def as_numer_denom(self):
# don't use _from_args to rebuild the numerators and denominators
# as the order is not guaranteed to be the same once they have
# been separated from each other
numers, denoms = list(zip(*[f.as_numer_denom() for f in self.args]))
return self.func(*numers), self.func(*denoms)
def as_base_exp(self):
e1 = None
bases = []
nc = 0
for m in self.args:
b, e = m.as_base_exp()
if not b.is_commutative:
nc += 1
if e1 is None:
e1 = e
elif e != e1 or nc > 1:
return self, S.One
bases.append(b)
return self.func(*bases), e1
def _eval_is_polynomial(self, syms):
return all(term._eval_is_polynomial(syms) for term in self.args)
def _eval_is_rational_function(self, syms):
return all(term._eval_is_rational_function(syms) for term in self.args)
def _eval_is_algebraic_expr(self, syms):
return all(term._eval_is_algebraic_expr(syms) for term in self.args)
_eval_is_commutative = lambda self: _fuzzy_group(
a.is_commutative for a in self.args)
def _eval_is_complex(self):
comp = _fuzzy_group((a.is_complex for a in self.args))
if comp is False:
if any(a.is_infinite for a in self.args):
if any(a.is_zero is not False for a in self.args):
return None
return False
return comp
def _eval_is_finite(self):
if all(a.is_finite for a in self.args):
return True
if any(a.is_infinite for a in self.args):
if all(a.is_zero is False for a in self.args):
return False
def _eval_is_infinite(self):
if any(a.is_infinite for a in self.args):
if any(a.is_zero for a in self.args):
return S.NaN.is_infinite
if any(a.is_zero is None for a in self.args):
return None
return True
def _eval_is_rational(self):
r = _fuzzy_group((a.is_rational for a in self.args), quick_exit=True)
if r:
return r
elif r is False:
return self.is_zero
def _eval_is_algebraic(self):
r = _fuzzy_group((a.is_algebraic for a in self.args), quick_exit=True)
if r:
return r
elif r is False:
return self.is_zero
def _eval_is_zero(self):
zero = infinite = False
for a in self.args:
z = a.is_zero
if z:
if infinite:
return # 0*oo is nan and nan.is_zero is None
zero = True
else:
if not a.is_finite:
if zero:
return # 0*oo is nan and nan.is_zero is None
infinite = True
if zero is False and z is None: # trap None
zero = None
return zero
def _eval_is_integer(self):
is_rational = self.is_rational
if is_rational:
n, d = self.as_numer_denom()
if d is S.One:
return True
elif d == S(2):
return n.is_even
elif is_rational is False:
return False
def _eval_is_polar(self):
has_polar = any(arg.is_polar for arg in self.args)
return has_polar and \
all(arg.is_polar or arg.is_positive for arg in self.args)
def _eval_is_extended_real(self):
return self._eval_real_imag(True)
def _eval_real_imag(self, real):
zero = False
t_not_re_im = None
for t in self.args:
if (t.is_complex or t.is_infinite) is False and t.is_extended_real is False:
return False
elif t.is_imaginary: # I
real = not real
elif t.is_extended_real: # 2
if not zero:
z = t.is_zero
if not z and zero is False:
zero = z
elif z:
if all(a.is_finite for a in self.args):
return True
return
elif t.is_extended_real is False:
# symbolic or literal like `2 + I` or symbolic imaginary
if t_not_re_im:
return # complex terms might cancel
t_not_re_im = t
elif t.is_imaginary is False: # symbolic like `2` or `2 + I`
if t_not_re_im:
return # complex terms might cancel
t_not_re_im = t
else:
return
if t_not_re_im:
if t_not_re_im.is_extended_real is False:
if real: # like 3
return zero # 3*(smthng like 2 + I or i) is not real
if t_not_re_im.is_imaginary is False: # symbolic 2 or 2 + I
if not real: # like I
return zero # I*(smthng like 2 or 2 + I) is not real
elif zero is False:
return real # can't be trumped by 0
elif real:
return real # doesn't matter what zero is
def _eval_is_imaginary(self):
z = self.is_zero
if z:
return False
if self.is_finite is False:
return False
elif z is False and self.is_finite is True:
return self._eval_real_imag(False)
def _eval_is_hermitian(self):
return self._eval_herm_antiherm(True)
def _eval_herm_antiherm(self, real):
one_nc = zero = one_neither = False
for t in self.args:
if not t.is_commutative:
if one_nc:
return
one_nc = True
if t.is_antihermitian:
real = not real
elif t.is_hermitian:
if not zero:
z = t.is_zero
if not z and zero is False:
zero = z
elif z:
if all(a.is_finite for a in self.args):
return True
return
elif t.is_hermitian is False:
if one_neither:
return
one_neither = True
else:
return
if one_neither:
if real:
return zero
elif zero is False or real:
return real
def _eval_is_antihermitian(self):
z = self.is_zero
if z:
return False
elif z is False:
return self._eval_herm_antiherm(False)
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
others = list(self.args)
others.remove(t)
if all((x.is_rational and fuzzy_not(x.is_zero)) is True for x in others):
return True
return
if a is None:
return
if all(x.is_real for x in self.args):
return False
def _eval_is_extended_positive(self):
"""Return True if self is positive, False if not, and None if it
cannot be determined.
This algorithm is non-recursive and works by keeping track of the
sign which changes when a negative or nonpositive is encountered.
Whether a nonpositive or nonnegative is seen is also tracked since
the presence of these makes it impossible to return True, but
possible to return False if the end result is nonpositive. e.g.
pos * neg * nonpositive -> pos or zero -> None is returned
pos * neg * nonnegative -> neg or zero -> False is returned
"""
return self._eval_pos_neg(1)
def _eval_pos_neg(self, sign):
saw_NON = saw_NOT = False
for t in self.args:
if t.is_extended_positive:
continue
elif t.is_extended_negative:
sign = -sign
elif t.is_zero:
if all(a.is_finite for a in self.args):
return False
return
elif t.is_extended_nonpositive:
sign = -sign
saw_NON = True
elif t.is_extended_nonnegative:
saw_NON = True
# FIXME: is_positive/is_negative is False doesn't take account of
# Symbol('x', infinite=True, extended_real=True) which has
# e.g. is_positive is False but has uncertain sign.
elif t.is_positive is False:
sign = -sign
if saw_NOT:
return
saw_NOT = True
elif t.is_negative is False:
if saw_NOT:
return
saw_NOT = True
else:
return
if sign == 1 and saw_NON is False and saw_NOT is False:
return True
if sign < 0:
return False
def _eval_is_extended_negative(self):
return self._eval_pos_neg(-1)
def _eval_is_odd(self):
is_integer = self.is_integer
if is_integer:
r, acc = True, 1
for t in self.args:
if not t.is_integer:
return None
elif t.is_even:
r = False
elif t.is_integer:
if r is False:
pass
elif acc != 1 and (acc + t).is_odd:
r = False
elif t.is_odd is None:
r = None
acc = t
return r
# !integer -> !odd
elif is_integer is False:
return False
def _eval_is_even(self):
is_integer = self.is_integer
if is_integer:
return fuzzy_not(self.is_odd)
elif is_integer is False:
return False
def _eval_is_composite(self):
"""
Here we count the number of arguments that have a minimum value
greater than two.
If there are more than one of such a symbol then the result is composite.
Else, the result cannot be determined.
"""
number_of_args = 0 # count of symbols with minimum value greater than one
for arg in self.args:
if not (arg.is_integer and arg.is_positive):
return None
if (arg-1).is_positive:
number_of_args += 1
if number_of_args > 1:
return True
def _eval_subs(self, old, new):
from sympy.functions.elementary.complexes import sign
from sympy.ntheory.factor_ import multiplicity
from sympy.simplify.powsimp import powdenest
from sympy.simplify.radsimp import fraction
if not old.is_Mul:
return None
# try keep replacement literal so -2*x doesn't replace 4*x
if old.args[0].is_Number and old.args[0] < 0:
if self.args[0].is_Number:
if self.args[0] < 0:
return self._subs(-old, -new)
return None
def base_exp(a):
# if I and -1 are in a Mul, they get both end up with
# a -1 base (see issue 6421); all we want here are the
# true Pow or exp separated into base and exponent
from sympy import exp
if a.is_Pow or isinstance(a, exp):
return a.as_base_exp()
return a, S.One
def breakup(eq):
"""break up powers of eq when treated as a Mul:
b**(Rational*e) -> b**e, Rational
commutatives come back as a dictionary {b**e: Rational}
noncommutatives come back as a list [(b**e, Rational)]
"""
(c, nc) = (defaultdict(int), list())
for a in Mul.make_args(eq):
a = powdenest(a)
(b, e) = base_exp(a)
if e is not S.One:
(co, _) = e.as_coeff_mul()
b = Pow(b, e/co)
e = co
if a.is_commutative:
c[b] += e
else:
nc.append([b, e])
return (c, nc)
def rejoin(b, co):
"""
Put rational back with exponent; in general this is not ok, but
since we took it from the exponent for analysis, it's ok to put
it back.
"""
(b, e) = base_exp(b)
return Pow(b, e*co)
def ndiv(a, b):
"""if b divides a in an extractive way (like 1/4 divides 1/2
but not vice versa, and 2/5 does not divide 1/3) then return
the integer number of times it divides, else return 0.
"""
if not b.q % a.q or not a.q % b.q:
return int(a/b)
return 0
# give Muls in the denominator a chance to be changed (see issue 5651)
# rv will be the default return value
rv = None
n, d = fraction(self)
self2 = self
if d is not S.One:
self2 = n._subs(old, new)/d._subs(old, new)
if not self2.is_Mul:
return self2._subs(old, new)
if self2 != self:
rv = self2
# Now continue with regular substitution.
# handle the leading coefficient and use it to decide if anything
# should even be started; we always know where to find the Rational
# so it's a quick test
co_self = self2.args[0]
co_old = old.args[0]
co_xmul = None
if co_old.is_Rational and co_self.is_Rational:
# if coeffs are the same there will be no updating to do
# below after breakup() step; so skip (and keep co_xmul=None)
if co_old != co_self:
co_xmul = co_self.extract_multiplicatively(co_old)
elif co_old.is_Rational:
return rv
# break self and old into factors
(c, nc) = breakup(self2)
(old_c, old_nc) = breakup(old)
# update the coefficients if we had an extraction
# e.g. if co_self were 2*(3/35*x)**2 and co_old = 3/5
# then co_self in c is replaced by (3/5)**2 and co_residual
# is 2*(1/7)**2
if co_xmul and co_xmul.is_Rational and abs(co_old) != 1:
mult = S(multiplicity(abs(co_old), co_self))
c.pop(co_self)
if co_old in c:
c[co_old] += mult
else:
c[co_old] = mult
co_residual = co_self/co_old**mult
else:
co_residual = 1
# do quick tests to see if we can't succeed
ok = True
if len(old_nc) > len(nc):
# more non-commutative terms
ok = False
elif len(old_c) > len(c):
# more commutative terms
ok = False
elif set(i[0] for i in old_nc).difference(set(i[0] for i in nc)):
# unmatched non-commutative bases
ok = False
elif set(old_c).difference(set(c)):
# unmatched commutative terms
ok = False
elif any(sign(c[b]) != sign(old_c[b]) for b in old_c):
# differences in sign
ok = False
if not ok:
return rv
if not old_c:
cdid = None
else:
rat = []
for (b, old_e) in old_c.items():
c_e = c[b]
rat.append(ndiv(c_e, old_e))
if not rat[-1]:
return rv
cdid = min(rat)
if not old_nc:
ncdid = None
for i in range(len(nc)):
nc[i] = rejoin(*nc[i])
else:
ncdid = 0 # number of nc replacements we did
take = len(old_nc) # how much to look at each time
limit = cdid or S.Infinity # max number that we can take
failed = [] # failed terms will need subs if other terms pass
i = 0
while limit and i + take <= len(nc):
hit = False
# the bases must be equivalent in succession, and
# the powers must be extractively compatible on the
# first and last factor but equal in between.
rat = []
for j in range(take):
if nc[i + j][0] != old_nc[j][0]:
break
elif j == 0:
rat.append(ndiv(nc[i + j][1], old_nc[j][1]))
elif j == take - 1:
rat.append(ndiv(nc[i + j][1], old_nc[j][1]))
elif nc[i + j][1] != old_nc[j][1]:
break
else:
rat.append(1)
j += 1
else:
ndo = min(rat)
if ndo:
if take == 1:
if cdid:
ndo = min(cdid, ndo)
nc[i] = Pow(new, ndo)*rejoin(nc[i][0],
nc[i][1] - ndo*old_nc[0][1])
else:
ndo = 1
# the left residual
l = rejoin(nc[i][0], nc[i][1] - ndo*
old_nc[0][1])
# eliminate all middle terms
mid = new
# the right residual (which may be the same as the middle if take == 2)
ir = i + take - 1
r = (nc[ir][0], nc[ir][1] - ndo*
old_nc[-1][1])
if r[1]:
if i + take < len(nc):
nc[i:i + take] = [l*mid, r]
else:
r = rejoin(*r)
nc[i:i + take] = [l*mid*r]
else:
# there was nothing left on the right
nc[i:i + take] = [l*mid]
limit -= ndo
ncdid += ndo
hit = True
if not hit:
# do the subs on this failing factor
failed.append(i)
i += 1
else:
if not ncdid:
return rv
# although we didn't fail, certain nc terms may have
# failed so we rebuild them after attempting a partial
# subs on them
failed.extend(range(i, len(nc)))
for i in failed:
nc[i] = rejoin(*nc[i]).subs(old, new)
# rebuild the expression
if cdid is None:
do = ncdid
elif ncdid is None:
do = cdid
else:
do = min(ncdid, cdid)
margs = []
for b in c:
if b in old_c:
# calculate the new exponent
e = c[b] - old_c[b]*do
margs.append(rejoin(b, e))
else:
margs.append(rejoin(b.subs(old, new), c[b]))
if cdid and not ncdid:
# in case we are replacing commutative with non-commutative,
# we want the new term to come at the front just like the
# rest of this routine
margs = [Pow(new, cdid)] + margs
return co_residual*self2.func(*margs)*self2.func(*nc)
def _eval_nseries(self, x, n, logx):
from sympy import Order, powsimp
terms = [t.nseries(x, n=n, logx=logx) for t in self.args]
res = powsimp(self.func(*terms).expand(), combine='exp', deep=True)
if res.has(Order):
res += Order(x**n, x)
return res
def _eval_as_leading_term(self, x):
return self.func(*[t.as_leading_term(x) for t in self.args])
def _eval_conjugate(self):
return self.func(*[t.conjugate() for t in self.args])
def _eval_transpose(self):
return self.func(*[t.transpose() for t in self.args[::-1]])
def _eval_adjoint(self):
return self.func(*[t.adjoint() for t in self.args[::-1]])
def _sage_(self):
s = 1
for x in self.args:
s *= x._sage_()
return s
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import sqrt
>>> (-3*sqrt(2)*(2 - 2*sqrt(2))).as_content_primitive()
(6, -sqrt(2)*(1 - sqrt(2)))
See docstring of Expr.as_content_primitive for more examples.
"""
coef = S.One
args = []
for i, a in enumerate(self.args):
c, p = a.as_content_primitive(radical=radical, clear=clear)
coef *= c
if p is not S.One:
args.append(p)
# don't use self._from_args here to reconstruct args
# since there may be identical args now that should be combined
# e.g. (2+2*x)*(3+3*x) should be (6, (1 + x)**2) not (6, (1+x)*(1+x))
return coef, self.func(*args)
def as_ordered_factors(self, order=None):
"""Transform an expression into an ordered list of factors.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x, y
>>> (2*x*y*sin(x)*cos(x)).as_ordered_factors()
[2, x, y, sin(x), cos(x)]
"""
cpart, ncpart = self.args_cnc()
cpart.sort(key=lambda expr: expr.sort_key(order=order))
return cpart + ncpart
@property
def _sorted_args(self):
return tuple(self.as_ordered_factors())
def prod(a, start=1):
"""Return product of elements of a. Start with int 1 so if only
ints are included then an int result is returned.
Examples
========
>>> from sympy import prod, S
>>> prod(range(3))
0
>>> type(_) is int
True
>>> prod([S(2), 3])
6
>>> _.is_Integer
True
You can start the product at something other than 1:
>>> prod([1, 2], 3)
6
"""
return reduce(operator.mul, a, start)
def _keep_coeff(coeff, factors, clear=True, sign=False):
"""Return ``coeff*factors`` unevaluated if necessary.
If ``clear`` is False, do not keep the coefficient as a factor
if it can be distributed on a single factor such that one or
more terms will still have integer coefficients.
If ``sign`` is True, allow a coefficient of -1 to remain factored out.
Examples
========
>>> from sympy.core.mul import _keep_coeff
>>> from sympy.abc import x, y
>>> from sympy import S
>>> _keep_coeff(S.Half, x + 2)
(x + 2)/2
>>> _keep_coeff(S.Half, x + 2, clear=False)
x/2 + 1
>>> _keep_coeff(S.Half, (x + 2)*y, clear=False)
y*(x + 2)/2
>>> _keep_coeff(S(-1), x + y)
-x - y
>>> _keep_coeff(S(-1), x + y, sign=True)
-(x + y)
"""
if not coeff.is_Number:
if factors.is_Number:
factors, coeff = coeff, factors
else:
return coeff*factors
if coeff is S.One:
return factors
elif coeff is S.NegativeOne and not sign:
return -factors
elif factors.is_Add:
if not clear and coeff.is_Rational and coeff.q != 1:
q = S(coeff.q)
for i in factors.args:
c, t = i.as_coeff_Mul()
r = c/q
if r == int(r):
return coeff*factors
return Mul(coeff, factors, evaluate=False)
elif factors.is_Mul:
margs = list(factors.args)
if margs[0].is_Number:
margs[0] *= coeff
if margs[0] == 1:
margs.pop(0)
else:
margs.insert(0, coeff)
return Mul._from_args(margs)
else:
return coeff*factors
def expand_2arg(e):
from sympy.simplify.simplify import bottom_up
def do(e):
if e.is_Mul:
c, r = e.as_coeff_Mul()
if c.is_Number and r.is_Add:
return _unevaluated_Add(*[c*ri for ri in r.args])
return e
return bottom_up(e, do)
from .numbers import Rational
from .power import Pow
from .add import Add, _addsort, _unevaluated_Add
|
e27b9b213529f2e1607a864132fb69e9931ff56419774b5e8e55ed4fea521f6c | """Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
import sys
from distutils.version import LooseVersion as V
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.utilities.misc import debug
def _init_python_printing(stringify_func, **settings):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg, **settings))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer, scale, **settings):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
# Guess best font color if none was given based on the ip.colors string.
# From the IPython documentation:
# It has four case-insensitive values: 'nocolor', 'neutral', 'linux',
# 'lightbg'. The default is neutral, which should be legible on either
# dark or light terminal backgrounds. linux is optimised for dark
# backgrounds and lightbg for light ones.
if forecolor is None:
color = ip.colors.lower()
if color == 'lightbg':
forecolor = 'Black'
elif color == 'linux':
forecolor = 'White'
else:
# No idea, go with gray.
forecolor = 'Gray'
debug("init_printing: Automatic foreground color:", forecolor)
preamble = "\\documentclass[varwidth,%s]{standalone}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
if use_latex == "svg":
addpackages = addpackages + "\n\\special{color %s}" % forecolor
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = round(150*scale)
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
svg_scale = 150/72*scale
dvioptions_svg = ["--no-fonts", "--scale={}".format(svg_scale)]
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: DVIOPTIONS_SVG:", dvioptions_svg)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _svg_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='svg', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions_svg)
except Exception as e:
# IPython swallows exceptions
debug("svg printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue().decode('utf-8')
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
# mathtext can't render some LaTeX commands. For example, it can't
# render any LaTeX environments such as array or matrix. So here we
# ensure that if mathtext fails to render, we return None.
try:
try:
return latex_to_png(o, color=forecolor, scale=scale)
except TypeError: # Old IPython version without color and scale
return latex_to_png(o)
except ValueError as e:
debug('matplotlib exception caught:', repr(e))
return None
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
from sympy.tensor.array import NDimArray
# These should all have _repr_latex_ and _repr_latex_orig. If you update
# this also update printable_types below.
sympy_latex_types = (Basic, MatrixBase, Vector, Dyadic, NDimArray)
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
try:
# If you're adding another type, make sure you add it to printable_types
# later in this file as well
builtin_types = (list, tuple, set, frozenset)
if isinstance(o, builtin_types):
# If the object is a custom subclass with a custom str or
# repr, use that instead.
if (type(o).__str__ not in (i.__str__ for i in builtin_types) or
type(o).__repr__ not in (i.__repr__ for i in builtin_types)):
return False
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, sympy_latex_types):
return True
elif isinstance(o, (float, int)) and print_builtin:
return True
return False
except RuntimeError:
return False
# This is in case maximum recursion depth is reached.
# Since RecursionError is for versions of Python 3.5+
# so this is to guard against RecursionError for older versions.
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode, **settings)
if latex_mode == 'plain':
s = '$\\displaystyle %s$' % s
try:
return _preview_wrapper(s)
except RuntimeError as e:
debug('preview failed with:', repr(e),
' Falling back to matplotlib backend')
if latex_mode != 'inline':
s = latex(o, mode='inline', **settings)
return _matplotlib_wrapper(s)
def _print_latex_svg(o):
"""
A function that returns a svg rendered by an external latex
distribution, no fallback available.
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode, **settings)
if latex_mode == 'plain':
s = '$\\displaystyle %s$' % s
try:
return _svg_wrapper(s)
except RuntimeError as e:
debug('preview failed with:', repr(e),
' No fallback available.')
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline', **settings)
return _matplotlib_wrapper(s)
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode, **settings)
if latex_mode == 'plain':
return '$\\displaystyle %s$' % s
return s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if V(IPython.__version__) >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
from sympy.tensor.array import NDimArray
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic, NDimArray, int]
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
svg_formatter = ip.display_formatter.formatters['image/svg+xml']
if use_latex in ('svg', ):
debug("init_printing: using svg formatter")
for cls in printable_types:
svg_formatter.for_type(cls, _print_latex_svg)
else:
debug("init_printing: not using any svg formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in svg_formatter.type_printers:
svg_formatter.type_printers.pop(cls)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
for typ in sympy_latex_types:
typ._repr_latex_ = typ._repr_latex_orig
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
for typ in sympy_latex_types:
typ._repr_latex_ = None
else:
ip.set_hook('result_display', _result_display)
def _is_ipython(shell):
"""Is a shell instance an IPython shell?"""
# shortcut, so we don't import IPython if we don't have to
if 'IPython' not in sys.modules:
return False
try:
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
# IPython < 0.11
try:
from IPython.iplib import InteractiveShell
except ImportError:
# Reaching this points means IPython has changed in a backward-incompatible way
# that we don't know about. Warn?
return False
return isinstance(shell, InteractiveShell)
# Used by the doctester to override the default for no_global
NO_GLOBAL = False
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor=None,
backcolor='Transparent', fontsize='10pt',
latex_mode='plain', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None, scale=1.0, **settings):
r"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print : boolean, default=True
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order : string or None, default='lex'
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode : boolean or None, default=None
If True, use unicode characters;
if False, do not use unicode characters;
if None, make a guess based on the environment.
use_latex : string, boolean, or None, default=None
If True, use default LaTeX rendering in GUI interfaces (png and
mathjax);
if False, do not use LaTeX rendering;
if None, make a guess based on the environment;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable LaTeX rendering with matplotlib;
if 'mathjax', enable LaTeX text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents;
if 'svg', enable LaTeX rendering with an external latex compiler,
no fallback
wrap_line : boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if ``pretty_print`` is
True.
num_columns : int or None, default=None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if ``pretty_print`` is True.
no_global : boolean, default=False
If True, the settings become system wide;
if False, use just for this console/session.
ip : An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler : boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor : string or None, optional, default=None
DVI setting for foreground color. None means that either 'Black',
'White', or 'Gray' will be selected based on a guess of the IPython
terminal color setting. See notes.
backcolor : string, optional, default='Transparent'
DVI setting for background color. See notes.
fontsize : string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble. Note that the options are limited by the documentclass.
Consider using scale instead.
latex_mode : string, optional, default='plain'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin : boolean, optional, default=True
If ``True`` then floats and integers will be printed. If ``False`` the
printer will only print SymPy types.
str_printer : function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer : function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer : function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex().
scale : float, optional, default=1.0
Scale the LaTeX output when using the ``png`` or ``svg`` backends.
Useful for high dpi screens.
settings :
Any additional settings for the ``latex`` and ``pretty`` commands can
be used to fine-tune the output.
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
Notes
=====
The foreground and background colors can be selected when using 'png' or
'svg' LaTeX rendering. Note that before the ``init_printing`` command is
executed, the LaTeX rendering is handled by the IPython console and not SymPy.
The colors can be selected among the 68 standard colors known to ``dvips``,
for a list see [1]_. In addition, the background color can be
set to 'Transparent' (which is the default value).
When using the 'Auto' foreground color, the guess is based on the
``colors`` variable in the IPython console, see [2]_. Hence, if
that variable is set correctly in your IPython console, there is a high
chance that the output will be readable, although manual settings may be
needed.
References
==========
.. [1] https://en.wikibooks.org/wiki/LaTeX/Colors#The_68_standard_colors_known_to_dvips
.. [2] https://ipython.readthedocs.io/en/stable/config/details.html#terminal-colors
See Also
========
sympy.printing.latex
sympy.printing.pretty
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
in_ipython = False
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
else:
in_ipython = (ip is not None)
if ip and not in_ipython:
in_ipython = _is_ipython(ip)
if in_ipython and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
debug("init_printing: Setting use_latex to True")
use_latex = True
if not NO_GLOBAL and not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr, **settings: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns,
**settings)
else:
stringify_func = \
lambda expr, **settings: _stringify_func(
expr, order=order, **settings)
if in_ipython:
mode_in_settings = settings.pop("mode", None)
if mode_in_settings:
debug("init_printing: Mode is not able to be set due to internals"
"of IPython printing")
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer, scale,
**settings)
else:
_init_python_printing(stringify_func, **settings)
|
e4025b99cdc81855c907ffe88a211285081dbf28a3aca7666df8f0a31dd66b5f | from __future__ import print_function
from sympy.matrices.dense import MutableDenseMatrix
from sympy.polys.polytools import Poly
from sympy.polys.domains import EX
class MutablePolyDenseMatrix(MutableDenseMatrix):
"""
A mutable matrix of objects from poly module or to operate with them.
Examples
========
>>> from sympy.polys.polymatrix import PolyMatrix
>>> from sympy import Symbol, Poly, ZZ
>>> x = Symbol('x')
>>> pm1 = PolyMatrix([[Poly(x**2, x), Poly(-x, x)], [Poly(x**3, x), Poly(-1 + x, x)]])
>>> v1 = PolyMatrix([[1, 0], [-1, 0]])
>>> pm1*v1
Matrix([
[ Poly(x**2 + x, x, domain='ZZ'), Poly(0, x, domain='ZZ')],
[Poly(x**3 - x + 1, x, domain='ZZ'), Poly(0, x, domain='ZZ')]])
>>> pm1.ring
ZZ[x]
>>> v1*pm1
Matrix([
[ Poly(x**2, x, domain='ZZ'), Poly(-x, x, domain='ZZ')],
[Poly(-x**2, x, domain='ZZ'), Poly(x, x, domain='ZZ')]])
>>> pm2 = PolyMatrix([[Poly(x**2, x, domain='QQ'), Poly(0, x, domain='QQ'), Poly(1, x, domain='QQ'), \
Poly(x**3, x, domain='QQ'), Poly(0, x, domain='QQ'), Poly(-x**3, x, domain='QQ')]])
>>> v2 = PolyMatrix([1, 0, 0, 0, 0, 0], ring=ZZ)
>>> v2.ring
ZZ
>>> pm2*v2
Matrix([[Poly(x**2, x, domain='QQ')]])
"""
_class_priority = 10
# we don't want to sympify the elements of PolyMatrix
_sympify = staticmethod(lambda x: x)
def __init__(self, *args, **kwargs):
# if any non-Poly element is given as input then
# 'ring' defaults 'EX'
ring = kwargs.get('ring', EX)
if all(isinstance(p, Poly) for p in self._mat) and self._mat:
domain = tuple([p.domain[p.gens] for p in self._mat])
ring = domain[0]
for i in range(1, len(domain)):
ring = ring.unify(domain[i])
self.ring = ring
def _eval_matrix_mul(self, other):
self_cols = self.cols
other_rows, other_cols = other.rows, other.cols
other_len = other_rows*other_cols
new_mat_rows = self.rows
new_mat_cols = other.cols
new_mat = [0]*new_mat_rows*new_mat_cols
if self.cols != 0 and other.rows != 0:
mat = self._mat
other_mat = other._mat
for i in range(len(new_mat)):
row, col = i // new_mat_cols, i % new_mat_cols
row_indices = range(self_cols*row, self_cols*(row+1))
col_indices = range(col, other_len, other_cols)
vec = (mat[a]*other_mat[b] for a,b in zip(row_indices, col_indices))
# 'Add' shouldn't be used here
new_mat[i] = sum(vec)
return self.__class__(new_mat_rows, new_mat_cols, new_mat, copy=False)
def _eval_scalar_mul(self, other):
mat = [Poly(a.as_expr()*other, *a.gens) if isinstance(a, Poly) else a*other for a in self._mat]
return self.__class__(self.rows, self.cols, mat, copy=False)
def _eval_scalar_rmul(self, other):
mat = [Poly(other*a.as_expr(), *a.gens) if isinstance(a, Poly) else other*a for a in self._mat]
return self.__class__(self.rows, self.cols, mat, copy=False)
MutablePolyMatrix = PolyMatrix = MutablePolyDenseMatrix
|
69316b6202f94c610c5bda0d1d6826b082cfbacd7e1c4292d769b9903036ec8c | """Definitions of monomial orderings. """
from __future__ import print_function, division
from typing import Optional
__all__ = ["lex", "grlex", "grevlex", "ilex", "igrlex", "igrevlex"]
from sympy.core import Symbol
from sympy.core.compatibility import iterable
class MonomialOrder(object):
"""Base class for monomial orderings. """
alias = None # type: Optional[str]
is_global = None # type: Optional[bool]
is_default = False
def __repr__(self):
return self.__class__.__name__ + "()"
def __str__(self):
return self.alias
def __call__(self, monomial):
raise NotImplementedError
def __eq__(self, other):
return self.__class__ == other.__class__
def __hash__(self):
return hash(self.__class__)
def __ne__(self, other):
return not (self == other)
class LexOrder(MonomialOrder):
"""Lexicographic order of monomials. """
alias = 'lex'
is_global = True
is_default = True
def __call__(self, monomial):
return monomial
class GradedLexOrder(MonomialOrder):
"""Graded lexicographic order of monomials. """
alias = 'grlex'
is_global = True
def __call__(self, monomial):
return (sum(monomial), monomial)
class ReversedGradedLexOrder(MonomialOrder):
"""Reversed graded lexicographic order of monomials. """
alias = 'grevlex'
is_global = True
def __call__(self, monomial):
return (sum(monomial), tuple(reversed([-m for m in monomial])))
class ProductOrder(MonomialOrder):
"""
A product order built from other monomial orders.
Given (not necessarily total) orders O1, O2, ..., On, their product order
P is defined as M1 > M2 iff there exists i such that O1(M1) = O2(M2),
..., Oi(M1) = Oi(M2), O{i+1}(M1) > O{i+1}(M2).
Product orders are typically built from monomial orders on different sets
of variables.
ProductOrder is constructed by passing a list of pairs
[(O1, L1), (O2, L2), ...] where Oi are MonomialOrders and Li are callables.
Upon comparison, the Li are passed the total monomial, and should filter
out the part of the monomial to pass to Oi.
Examples
========
We can use a lexicographic order on x_1, x_2 and also on
y_1, y_2, y_3, and their product on {x_i, y_i} as follows:
>>> from sympy.polys.orderings import lex, grlex, ProductOrder
>>> P = ProductOrder(
... (lex, lambda m: m[:2]), # lex order on x_1 and x_2 of monomial
... (grlex, lambda m: m[2:]) # grlex on y_1, y_2, y_3
... )
>>> P((2, 1, 1, 0, 0)) > P((1, 10, 0, 2, 0))
True
Here the exponent `2` of `x_1` in the first monomial
(`x_1^2 x_2 y_1`) is bigger than the exponent `1` of `x_1` in the
second monomial (`x_1 x_2^10 y_2^2`), so the first monomial is greater
in the product ordering.
>>> P((2, 1, 1, 0, 0)) < P((2, 1, 0, 2, 0))
True
Here the exponents of `x_1` and `x_2` agree, so the grlex order on
`y_1, y_2, y_3` is used to decide the ordering. In this case the monomial
`y_2^2` is ordered larger than `y_1`, since for the grlex order the degree
of the monomial is most important.
"""
def __init__(self, *args):
self.args = args
def __call__(self, monomial):
return tuple(O(lamda(monomial)) for (O, lamda) in self.args)
def __repr__(self):
from sympy.core import Tuple
return self.__class__.__name__ + repr(Tuple(*[x[0] for x in self.args]))
def __str__(self):
from sympy.core import Tuple
return self.__class__.__name__ + str(Tuple(*[x[0] for x in self.args]))
def __eq__(self, other):
if not isinstance(other, ProductOrder):
return False
return self.args == other.args
def __hash__(self):
return hash((self.__class__, self.args))
@property
def is_global(self):
if all(o.is_global is True for o, _ in self.args):
return True
if all(o.is_global is False for o, _ in self.args):
return False
return None
class InverseOrder(MonomialOrder):
"""
The "inverse" of another monomial order.
If O is any monomial order, we can construct another monomial order iO
such that `A >_{iO} B` if and only if `B >_O A`. This is useful for
constructing local orders.
Note that many algorithms only work with *global* orders.
For example, in the inverse lexicographic order on a single variable `x`,
high powers of `x` count as small:
>>> from sympy.polys.orderings import lex, InverseOrder
>>> ilex = InverseOrder(lex)
>>> ilex((5,)) < ilex((0,))
True
"""
def __init__(self, O):
self.O = O
def __str__(self):
return "i" + str(self.O)
def __call__(self, monomial):
def inv(l):
if iterable(l):
return tuple(inv(x) for x in l)
return -l
return inv(self.O(monomial))
@property
def is_global(self):
if self.O.is_global is True:
return False
if self.O.is_global is False:
return True
return None
def __eq__(self, other):
return isinstance(other, InverseOrder) and other.O == self.O
def __hash__(self):
return hash((self.__class__, self.O))
lex = LexOrder()
grlex = GradedLexOrder()
grevlex = ReversedGradedLexOrder()
ilex = InverseOrder(lex)
igrlex = InverseOrder(grlex)
igrevlex = InverseOrder(grevlex)
_monomial_key = {
'lex': lex,
'grlex': grlex,
'grevlex': grevlex,
'ilex': ilex,
'igrlex': igrlex,
'igrevlex': igrevlex
}
def monomial_key(order=None, gens=None):
"""
Return a function defining admissible order on monomials.
The result of a call to :func:`monomial_key` is a function which should
be used as a key to :func:`sorted` built-in function, to provide order
in a set of monomials of the same length.
Currently supported monomial orderings are:
1. lex - lexicographic order (default)
2. grlex - graded lexicographic order
3. grevlex - reversed graded lexicographic order
4. ilex, igrlex, igrevlex - the corresponding inverse orders
If the ``order`` input argument is not a string but has ``__call__``
attribute, then it will pass through with an assumption that the
callable object defines an admissible order on monomials.
If the ``gens`` input argument contains a list of generators, the
resulting key function can be used to sort SymPy ``Expr`` objects.
"""
if order is None:
order = lex
if isinstance(order, Symbol):
order = str(order)
if isinstance(order, str):
try:
order = _monomial_key[order]
except KeyError:
raise ValueError("supported monomial orderings are 'lex', 'grlex' and 'grevlex', got %r" % order)
if hasattr(order, '__call__'):
if gens is not None:
def _order(expr):
return order(expr.as_poly(*gens).degree_list())
return _order
return order
else:
raise ValueError("monomial ordering specification must be a string or a callable, got %s" % order)
class _ItemGetter(object):
"""Helper class to return a subsequence of values."""
def __init__(self, seq):
self.seq = tuple(seq)
def __call__(self, m):
return tuple(m[idx] for idx in self.seq)
def __eq__(self, other):
if not isinstance(other, _ItemGetter):
return False
return self.seq == other.seq
def build_product_order(arg, gens):
"""
Build a monomial order on ``gens``.
``arg`` should be a tuple of iterables. The first element of each iterable
should be a string or monomial order (will be passed to monomial_key),
the others should be subsets of the generators. This function will build
the corresponding product order.
For example, build a product of two grlex orders:
>>> from sympy.polys.orderings import grlex, build_product_order
>>> from sympy.abc import x, y, z, t
>>> O = build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t])
>>> O((1, 2, 3, 4))
((3, (1, 2)), (7, (3, 4)))
"""
gens2idx = {}
for i, g in enumerate(gens):
gens2idx[g] = i
order = []
for expr in arg:
name = expr[0]
var = expr[1:]
def makelambda(var):
return _ItemGetter(gens2idx[g] for g in var)
order.append((monomial_key(name), makelambda(var)))
return ProductOrder(*order)
|
aea255ba0dd1c8e3aaf4775a6f776f1712092cd4689fe928137a98d4e594c641 | """Power series evaluation and manipulation using sparse Polynomials
Implementing a new function
---------------------------
There are a few things to be kept in mind when adding a new function here::
- The implementation should work on all possible input domains/rings.
Special cases include the ``EX`` ring and a constant term in the series
to be expanded. There can be two types of constant terms in the series:
+ A constant value or symbol.
+ A term of a multivariate series not involving the generator, with
respect to which the series is to expanded.
Strictly speaking, a generator of a ring should not be considered a
constant. However, for series expansion both the cases need similar
treatment (as the user doesn't care about inner details), i.e, use an
addition formula to separate the constant part and the variable part (see
rs_sin for reference).
- All the algorithms used here are primarily designed to work for Taylor
series (number of iterations in the algo equals the required order).
Hence, it becomes tricky to get the series of the right order if a
Puiseux series is input. Use rs_puiseux? in your function if your
algorithm is not designed to handle fractional powers.
Extending rs_series
-------------------
To make a function work with rs_series you need to do two things::
- Many sure it works with a constant term (as explained above).
- If the series contains constant terms, you might need to extend its ring.
You do so by adding the new terms to the rings as generators.
``PolyRing.compose`` and ``PolyRing.add_gens`` are two functions that do
so and need to be called every time you expand a series containing a
constant term.
Look at rs_sin and rs_series for further reference.
"""
from sympy.polys.domains import QQ, EX
from sympy.polys.rings import PolyElement, ring, sring
from sympy.polys.polyerrors import DomainError
from sympy.polys.monomials import (monomial_min, monomial_mul, monomial_div,
monomial_ldiv)
from mpmath.libmp.libintmath import ifac
from sympy.core import PoleError, Function, Expr
from sympy.core.numbers import Rational, igcd
from sympy.core.compatibility import as_int
from sympy.functions import sin, cos, tan, atan, exp, atanh, tanh, log, ceiling
from mpmath.libmp.libintmath import giant_steps
import math
def _invert_monoms(p1):
"""
Compute ``x**n * p1(1/x)`` for a univariate polynomial ``p1`` in ``x``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _invert_monoms
>>> R, x = ring('x', ZZ)
>>> p = x**2 + 2*x + 3
>>> _invert_monoms(p)
3*x**2 + 2*x + 1
See Also
========
sympy.polys.densebasic.dup_reverse
"""
terms = list(p1.items())
terms.sort()
deg = p1.degree()
R = p1.ring
p = R.zero
cv = p1.listcoeffs()
mv = p1.listmonoms()
for i in range(len(mv)):
p[(deg - mv[i][0],)] = cv[i]
return p
def _giant_steps(target):
"""Return a list of precision steps for the Newton's method"""
res = giant_steps(2, target)
if res[0] != 2:
res = [2] + res
return res
def rs_trunc(p1, x, prec):
"""
Truncate the series in the ``x`` variable with precision ``prec``,
that is, modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_trunc
>>> R, x = ring('x', QQ)
>>> p = x**10 + x**5 + x + 1
>>> rs_trunc(p, x, 12)
x**10 + x**5 + x + 1
>>> rs_trunc(p, x, 10)
x**5 + x + 1
"""
R = p1.ring
p = R.zero
i = R.gens.index(x)
for exp1 in p1:
if exp1[i] >= prec:
continue
p[exp1] = p1[exp1]
return p
def rs_is_puiseux(p, x):
"""
Test if ``p`` is Puiseux series in ``x``.
Raise an exception if it has a negative power in ``x``.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_is_puiseux
>>> R, x = ring('x', QQ)
>>> p = x**QQ(2,5) + x**QQ(2,3) + x
>>> rs_is_puiseux(p, x)
True
"""
index = p.ring.gens.index(x)
for k in p:
if k[index] != int(k[index]):
return True
if k[index] < 0:
raise ValueError('The series is not regular in %s' % x)
return False
def rs_puiseux(f, p, x, prec):
"""
Return the puiseux series for `f(p, x, prec)`.
To be used when function ``f`` is implemented only for regular series.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_puiseux, rs_exp
>>> R, x = ring('x', QQ)
>>> p = x**QQ(2,5) + x**QQ(2,3) + x
>>> rs_puiseux(rs_exp,p, x, 1)
1/2*x**(4/5) + x**(2/3) + x**(2/5) + 1
"""
index = p.ring.gens.index(x)
n = 1
for k in p:
power = k[index]
if isinstance(power, Rational):
num, den = power.as_numer_denom()
n = int(n*den // igcd(n, den))
elif power != int(power):
den = power.denominator
n = int(n*den // igcd(n, den))
if n != 1:
p1 = pow_xin(p, index, n)
r = f(p1, x, prec*n)
n1 = QQ(1, n)
if isinstance(r, tuple):
r = tuple([pow_xin(rx, index, n1) for rx in r])
else:
r = pow_xin(r, index, n1)
else:
r = f(p, x, prec)
return r
def rs_puiseux2(f, p, q, x, prec):
"""
Return the puiseux series for `f(p, q, x, prec)`.
To be used when function ``f`` is implemented only for regular series.
"""
index = p.ring.gens.index(x)
n = 1
for k in p:
power = k[index]
if isinstance(power, Rational):
num, den = power.as_numer_denom()
n = n*den // igcd(n, den)
elif power != int(power):
den = power.denominator
n = n*den // igcd(n, den)
if n != 1:
p1 = pow_xin(p, index, n)
r = f(p1, q, x, prec*n)
n1 = QQ(1, n)
r = pow_xin(r, index, n1)
else:
r = f(p, q, x, prec)
return r
def rs_mul(p1, p2, x, prec):
"""
Return the product of the given two series, modulo ``O(x**prec)``.
``x`` is the series variable or its position in the generators.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_mul
>>> R, x = ring('x', QQ)
>>> p1 = x**2 + 2*x + 1
>>> p2 = x + 1
>>> rs_mul(p1, p2, x, 3)
3*x**2 + 3*x + 1
"""
R = p1.ring
p = R.zero
if R.__class__ != p2.ring.__class__ or R != p2.ring:
raise ValueError('p1 and p2 must have the same ring')
iv = R.gens.index(x)
if not isinstance(p2, PolyElement):
raise ValueError('p1 and p2 must have the same ring')
if R == p2.ring:
get = p.get
items2 = list(p2.items())
items2.sort(key=lambda e: e[0][iv])
if R.ngens == 1:
for exp1, v1 in p1.items():
for exp2, v2 in items2:
exp = exp1[0] + exp2[0]
if exp < prec:
exp = (exp, )
p[exp] = get(exp, 0) + v1*v2
else:
break
else:
monomial_mul = R.monomial_mul
for exp1, v1 in p1.items():
for exp2, v2 in items2:
if exp1[iv] + exp2[iv] < prec:
exp = monomial_mul(exp1, exp2)
p[exp] = get(exp, 0) + v1*v2
else:
break
p.strip_zero()
return p
def rs_square(p1, x, prec):
"""
Square the series modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_square
>>> R, x = ring('x', QQ)
>>> p = x**2 + 2*x + 1
>>> rs_square(p, x, 3)
6*x**2 + 4*x + 1
"""
R = p1.ring
p = R.zero
iv = R.gens.index(x)
get = p.get
items = list(p1.items())
items.sort(key=lambda e: e[0][iv])
monomial_mul = R.monomial_mul
for i in range(len(items)):
exp1, v1 = items[i]
for j in range(i):
exp2, v2 = items[j]
if exp1[iv] + exp2[iv] < prec:
exp = monomial_mul(exp1, exp2)
p[exp] = get(exp, 0) + v1*v2
else:
break
p = p.imul_num(2)
get = p.get
for expv, v in p1.items():
if 2*expv[iv] < prec:
e2 = monomial_mul(expv, expv)
p[e2] = get(e2, 0) + v**2
p.strip_zero()
return p
def rs_pow(p1, n, x, prec):
"""
Return ``p1**n`` modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_pow
>>> R, x = ring('x', QQ)
>>> p = x + 1
>>> rs_pow(p, 4, x, 3)
6*x**2 + 4*x + 1
"""
R = p1.ring
if isinstance(n, Rational):
np = int(n.p)
nq = int(n.q)
if nq != 1:
res = rs_nth_root(p1, nq, x, prec)
if np != 1:
res = rs_pow(res, np, x, prec)
else:
res = rs_pow(p1, np, x, prec)
return res
n = as_int(n)
if n == 0:
if p1:
return R(1)
else:
raise ValueError('0**0 is undefined')
if n < 0:
p1 = rs_pow(p1, -n, x, prec)
return rs_series_inversion(p1, x, prec)
if n == 1:
return rs_trunc(p1, x, prec)
if n == 2:
return rs_square(p1, x, prec)
if n == 3:
p2 = rs_square(p1, x, prec)
return rs_mul(p1, p2, x, prec)
p = R(1)
while 1:
if n & 1:
p = rs_mul(p1, p, x, prec)
n -= 1
if not n:
break
p1 = rs_square(p1, x, prec)
n = n // 2
return p
def rs_subs(p, rules, x, prec):
"""
Substitution with truncation according to the mapping in ``rules``.
Return a series with precision ``prec`` in the generator ``x``
Note that substitutions are not done one after the other
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_subs
>>> R, x, y = ring('x, y', QQ)
>>> p = x**2 + y**2
>>> rs_subs(p, {x: x+ y, y: x+ 2*y}, x, 3)
2*x**2 + 6*x*y + 5*y**2
>>> (x + y)**2 + (x + 2*y)**2
2*x**2 + 6*x*y + 5*y**2
which differs from
>>> rs_subs(rs_subs(p, {x: x+ y}, x, 3), {y: x+ 2*y}, x, 3)
5*x**2 + 12*x*y + 8*y**2
Parameters
----------
p : :class:`~.PolyElement` Input series.
rules : ``dict`` with substitution mappings.
x : :class:`~.PolyElement` in which the series truncation is to be done.
prec : :class:`~.Integer` order of the series after truncation.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_subs
>>> R, x, y = ring('x, y', QQ)
>>> rs_subs(x**2+y**2, {y: (x+y)**2}, x, 3)
6*x**2*y**2 + x**2 + 4*x*y**3 + y**4
"""
R = p.ring
ngens = R.ngens
d = R(0)
for i in range(ngens):
d[(i, 1)] = R.gens[i]
for var in rules:
d[(R.index(var), 1)] = rules[var]
p1 = R(0)
p_keys = sorted(p.keys())
for expv in p_keys:
p2 = R(1)
for i in range(ngens):
power = expv[i]
if power == 0:
continue
if (i, power) not in d:
q, r = divmod(power, 2)
if r == 0 and (i, q) in d:
d[(i, power)] = rs_square(d[(i, q)], x, prec)
elif (i, power - 1) in d:
d[(i, power)] = rs_mul(d[(i, power - 1)], d[(i, 1)],
x, prec)
else:
d[(i, power)] = rs_pow(d[(i, 1)], power, x, prec)
p2 = rs_mul(p2, d[(i, power)], x, prec)
p1 += p2*p[expv]
return p1
def _has_constant_term(p, x):
"""
Check if ``p`` has a constant term in ``x``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _has_constant_term
>>> R, x = ring('x', QQ)
>>> p = x**2 + x + 1
>>> _has_constant_term(p, x)
True
"""
R = p.ring
iv = R.gens.index(x)
zm = R.zero_monom
a = [0]*R.ngens
a[iv] = 1
miv = tuple(a)
for expv in p:
if monomial_min(expv, miv) == zm:
return True
return False
def _get_constant_term(p, x):
"""Return constant term in p with respect to x
Note that it is not simply `p[R.zero_monom]` as there might be multiple
generators in the ring R. We want the `x`-free term which can contain other
generators.
"""
R = p.ring
i = R.gens.index(x)
zm = R.zero_monom
a = [0]*R.ngens
a[i] = 1
miv = tuple(a)
c = 0
for expv in p:
if monomial_min(expv, miv) == zm:
c += R({expv: p[expv]})
return c
def _check_series_var(p, x, name):
index = p.ring.gens.index(x)
m = min(p, key=lambda k: k[index])[index]
if m < 0:
raise PoleError("Asymptotic expansion of %s around [oo] not "
"implemented." % name)
return index, m
def _series_inversion1(p, x, prec):
"""
Univariate series inversion ``1/p`` modulo ``O(x**prec)``.
The Newton method is used.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _series_inversion1
>>> R, x = ring('x', QQ)
>>> p = x + 1
>>> _series_inversion1(p, x, 4)
-x**3 + x**2 - x + 1
"""
if rs_is_puiseux(p, x):
return rs_puiseux(_series_inversion1, p, x, prec)
R = p.ring
zm = R.zero_monom
c = p[zm]
# giant_steps does not seem to work with PythonRational numbers with 1 as
# denominator. This makes sure such a number is converted to integer.
if prec == int(prec):
prec = int(prec)
if zm not in p:
raise ValueError("No constant term in series")
if _has_constant_term(p - c, x):
raise ValueError("p cannot contain a constant term depending on "
"parameters")
one = R(1)
if R.domain is EX:
one = 1
if c != one:
# TODO add check that it is a unit
p1 = R(1)/c
else:
p1 = R(1)
for precx in _giant_steps(prec):
t = 1 - rs_mul(p1, p, x, precx)
p1 = p1 + rs_mul(p1, t, x, precx)
return p1
def rs_series_inversion(p, x, prec):
"""
Multivariate series inversion ``1/p`` modulo ``O(x**prec)``.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_series_inversion
>>> R, x, y = ring('x, y', QQ)
>>> rs_series_inversion(1 + x*y**2, x, 4)
-x**3*y**6 + x**2*y**4 - x*y**2 + 1
>>> rs_series_inversion(1 + x*y**2, y, 4)
-x*y**2 + 1
>>> rs_series_inversion(x + x**2, x, 4)
x**3 - x**2 + x - 1 + x**(-1)
"""
R = p.ring
if p == R.zero:
raise ZeroDivisionError
zm = R.zero_monom
index = R.gens.index(x)
m = min(p, key=lambda k: k[index])[index]
if m:
p = mul_xin(p, index, -m)
prec = prec + m
if zm not in p:
raise NotImplementedError("No constant term in series")
if _has_constant_term(p - p[zm], x):
raise NotImplementedError("p - p[0] must not have a constant term in "
"the series variables")
r = _series_inversion1(p, x, prec)
if m != 0:
r = mul_xin(r, index, -m)
return r
def _coefficient_t(p, t):
r"""Coefficient of `x_i**j` in p, where ``t`` = (i, j)"""
i, j = t
R = p.ring
expv1 = [0]*R.ngens
expv1[i] = j
expv1 = tuple(expv1)
p1 = R(0)
for expv in p:
if expv[i] == j:
p1[monomial_div(expv, expv1)] = p[expv]
return p1
def rs_series_reversion(p, x, n, y):
r"""
Reversion of a series.
``p`` is a series with ``O(x**n)`` of the form $p = ax + f(x)$
where $a$ is a number different from 0.
$f(x) = \sum_{k=2}^{n-1} a_kx_k$
Parameters
==========
a_k : Can depend polynomially on other variables, not indicated.
x : Variable with name x.
y : Variable with name y.
Returns
=======
Solve $p = y$, that is, given $ax + f(x) - y = 0$,
find the solution $x = r(y)$ up to $O(y^n)$.
Algorithm
=========
If $r_i$ is the solution at order $i$, then:
$ar_i + f(r_i) - y = O\left(y^{i + 1}\right)$
and if $r_{i + 1}$ is the solution at order $i + 1$, then:
$ar_{i + 1} + f(r_{i + 1}) - y = O\left(y^{i + 2}\right)$
We have, $r_{i + 1} = r_i + e$, such that,
$ae + f(r_i) = O\left(y^{i + 2}\right)$
or $e = -f(r_i)/a$
So we use the recursion relation:
$r_{i + 1} = r_i - f(r_i)/a$
with the boundary condition: $r_1 = y$
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_series_reversion, rs_trunc
>>> R, x, y, a, b = ring('x, y, a, b', QQ)
>>> p = x - x**2 - 2*b*x**2 + 2*a*b*x**2
>>> p1 = rs_series_reversion(p, x, 3, y); p1
-2*y**2*a*b + 2*y**2*b + y**2 + y
>>> rs_trunc(p.compose(x, p1), y, 3)
y
"""
if rs_is_puiseux(p, x):
raise NotImplementedError
R = p.ring
nx = R.gens.index(x)
y = R(y)
ny = R.gens.index(y)
if _has_constant_term(p, x):
raise ValueError("p must not contain a constant term in the series "
"variable")
a = _coefficient_t(p, (nx, 1))
zm = R.zero_monom
assert zm in a and len(a) == 1
a = a[zm]
r = y/a
for i in range(2, n):
sp = rs_subs(p, {x: r}, y, i + 1)
sp = _coefficient_t(sp, (ny, i))*y**i
r -= sp/a
return r
def rs_series_from_list(p, c, x, prec, concur=1):
"""
Return a series `sum c[n]*p**n` modulo `O(x**prec)`.
It reduces the number of multiplications by summing concurrently.
`ax = [1, p, p**2, .., p**(J - 1)]`
`s = sum(c[i]*ax[i]` for i in `range(r, (r + 1)*J))*p**((K - 1)*J)`
with `K >= (n + 1)/J`
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_series_from_list, rs_trunc
>>> R, x = ring('x', QQ)
>>> p = x**2 + x + 1
>>> c = [1, 2, 3]
>>> rs_series_from_list(p, c, x, 4)
6*x**3 + 11*x**2 + 8*x + 6
>>> rs_trunc(1 + 2*p + 3*p**2, x, 4)
6*x**3 + 11*x**2 + 8*x + 6
>>> pc = R.from_list(list(reversed(c)))
>>> rs_trunc(pc.compose(x, p), x, 4)
6*x**3 + 11*x**2 + 8*x + 6
"""
# TODO: Add this when it is documented in Sphinx
"""
See Also
========
sympy.polys.rings.PolyRing.compose
"""
R = p.ring
n = len(c)
if not concur:
q = R(1)
s = c[0]*q
for i in range(1, n):
q = rs_mul(q, p, x, prec)
s += c[i]*q
return s
J = int(math.sqrt(n) + 1)
K, r = divmod(n, J)
if r:
K += 1
ax = [R(1)]
q = R(1)
if len(p) < 20:
for i in range(1, J):
q = rs_mul(q, p, x, prec)
ax.append(q)
else:
for i in range(1, J):
if i % 2 == 0:
q = rs_square(ax[i//2], x, prec)
else:
q = rs_mul(q, p, x, prec)
ax.append(q)
# optimize using rs_square
pj = rs_mul(ax[-1], p, x, prec)
b = R(1)
s = R(0)
for k in range(K - 1):
r = J*k
s1 = c[r]
for j in range(1, J):
s1 += c[r + j]*ax[j]
s1 = rs_mul(s1, b, x, prec)
s += s1
b = rs_mul(b, pj, x, prec)
if not b:
break
k = K - 1
r = J*k
if r < n:
s1 = c[r]*R(1)
for j in range(1, J):
if r + j >= n:
break
s1 += c[r + j]*ax[j]
s1 = rs_mul(s1, b, x, prec)
s += s1
return s
def rs_diff(p, x):
"""
Return partial derivative of ``p`` with respect to ``x``.
Parameters
==========
x : :class:`~.PolyElement` with respect to which ``p`` is differentiated.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_diff
>>> R, x, y = ring('x, y', QQ)
>>> p = x + x**2*y**3
>>> rs_diff(p, x)
2*x*y**3 + 1
"""
R = p.ring
n = R.gens.index(x)
p1 = R.zero
mn = [0]*R.ngens
mn[n] = 1
mn = tuple(mn)
for expv in p:
if expv[n]:
e = monomial_ldiv(expv, mn)
p1[e] = R.domain_new(p[expv]*expv[n])
return p1
def rs_integrate(p, x):
"""
Integrate ``p`` with respect to ``x``.
Parameters
==========
x : :class:`~.PolyElement` with respect to which ``p`` is integrated.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_integrate
>>> R, x, y = ring('x, y', QQ)
>>> p = x + x**2*y**3
>>> rs_integrate(p, x)
1/3*x**3*y**3 + 1/2*x**2
"""
R = p.ring
p1 = R.zero
n = R.gens.index(x)
mn = [0]*R.ngens
mn[n] = 1
mn = tuple(mn)
for expv in p:
e = monomial_mul(expv, mn)
p1[e] = R.domain_new(p[expv]/(expv[n] + 1))
return p1
def rs_fun(p, f, *args):
r"""
Function of a multivariate series computed by substitution.
The case with f method name is used to compute `rs\_tan` and `rs\_nth\_root`
of a multivariate series:
`rs\_fun(p, tan, iv, prec)`
tan series is first computed for a dummy variable _x,
i.e, `rs\_tan(\_x, iv, prec)`. Then we substitute _x with p to get the
desired series
Parameters
==========
p : :class:`~.PolyElement` The multivariate series to be expanded.
f : `ring\_series` function to be applied on `p`.
args[-2] : :class:`~.PolyElement` with respect to which, the series is to be expanded.
args[-1] : Required order of the expanded series.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_fun, _tan1
>>> R, x, y = ring('x, y', QQ)
>>> p = x + x*y + x**2*y + x**3*y**2
>>> rs_fun(p, _tan1, x, 4)
1/3*x**3*y**3 + 2*x**3*y**2 + x**3*y + 1/3*x**3 + x**2*y + x*y + x
"""
_R = p.ring
R1, _x = ring('_x', _R.domain)
h = int(args[-1])
args1 = args[:-2] + (_x, h)
zm = _R.zero_monom
# separate the constant term of the series
# compute the univariate series f(_x, .., 'x', sum(nv))
if zm in p:
x1 = _x + p[zm]
p1 = p - p[zm]
else:
x1 = _x
p1 = p
if isinstance(f, str):
q = getattr(x1, f)(*args1)
else:
q = f(x1, *args1)
a = sorted(q.items())
c = [0]*h
for x in a:
c[x[0][0]] = x[1]
p1 = rs_series_from_list(p1, c, args[-2], args[-1])
return p1
def mul_xin(p, i, n):
r"""
Return `p*x_i**n`.
`x\_i` is the ith variable in ``p``.
"""
R = p.ring
q = R(0)
for k, v in p.items():
k1 = list(k)
k1[i] += n
q[tuple(k1)] = v
return q
def pow_xin(p, i, n):
"""
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import pow_xin
>>> R, x, y = ring('x, y', QQ)
>>> p = x**QQ(2,5) + x + x**QQ(2,3)
>>> index = p.ring.gens.index(x)
>>> pow_xin(p, index, 15)
x**15 + x**10 + x**6
"""
R = p.ring
q = R(0)
for k, v in p.items():
k1 = list(k)
k1[i] *= n
q[tuple(k1)] = v
return q
def _nth_root1(p, n, x, prec):
"""
Univariate series expansion of the nth root of ``p``.
The Newton method is used.
"""
if rs_is_puiseux(p, x):
return rs_puiseux2(_nth_root1, p, n, x, prec)
R = p.ring
zm = R.zero_monom
if zm not in p:
raise NotImplementedError('No constant term in series')
n = as_int(n)
assert p[zm] == 1
p1 = R(1)
if p == 1:
return p
if n == 0:
return R(1)
if n == 1:
return p
if n < 0:
n = -n
sign = 1
else:
sign = 0
for precx in _giant_steps(prec):
tmp = rs_pow(p1, n + 1, x, precx)
tmp = rs_mul(tmp, p, x, precx)
p1 += p1/n - tmp/n
if sign:
return p1
else:
return _series_inversion1(p1, x, prec)
def rs_nth_root(p, n, x, prec):
"""
Multivariate series expansion of the nth root of ``p``.
Parameters
==========
p : Expr
The polynomial to computer the root of.
n : integer
The order of the root to be computed.
x : :class:`~.PolyElement`
prec : integer
Order of the expanded series.
Notes
=====
The result of this function is dependent on the ring over which the
polynomial has been defined. If the answer involves a root of a constant,
make sure that the polynomial is over a real field. It can not yet handle
roots of symbols.
Examples
========
>>> from sympy.polys.domains import QQ, RR
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_nth_root
>>> R, x, y = ring('x, y', QQ)
>>> rs_nth_root(1 + x + x*y, -3, x, 3)
2/9*x**2*y**2 + 4/9*x**2*y + 2/9*x**2 - 1/3*x*y - 1/3*x + 1
>>> R, x, y = ring('x, y', RR)
>>> rs_nth_root(3 + x + x*y, 3, x, 2)
0.160249952256379*x*y + 0.160249952256379*x + 1.44224957030741
"""
if n == 0:
if p == 0:
raise ValueError('0**0 expression')
else:
return p.ring(1)
if n == 1:
return rs_trunc(p, x, prec)
R = p.ring
index = R.gens.index(x)
m = min(p, key=lambda k: k[index])[index]
p = mul_xin(p, index, -m)
prec -= m
if _has_constant_term(p - 1, x):
zm = R.zero_monom
c = p[zm]
if R.domain is EX:
c_expr = c.as_expr()
const = c_expr**QQ(1, n)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(c_expr**(QQ(1, n)))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
else:
try: # RealElement doesn't support
const = R(c**Rational(1, n)) # exponentiation with mpq object
except ValueError: # as exponent
raise DomainError("The given series can't be expanded in "
"this domain.")
res = rs_nth_root(p/c, n, x, prec)*const
else:
res = _nth_root1(p, n, x, prec)
if m:
m = QQ(m, n)
res = mul_xin(res, index, m)
return res
def rs_log(p, x, prec):
"""
The Logarithm of ``p`` modulo ``O(x**prec)``.
Notes
=====
Truncation of ``integral dx p**-1*d p/dx`` is used.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_log
>>> R, x = ring('x', QQ)
>>> rs_log(1 + x, x, 8)
1/7*x**7 - 1/6*x**6 + 1/5*x**5 - 1/4*x**4 + 1/3*x**3 - 1/2*x**2 + x
>>> rs_log(x**QQ(3, 2) + 1, x, 5)
1/3*x**(9/2) - 1/2*x**3 + x**(3/2)
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_log, p, x, prec)
R = p.ring
if p == 1:
return R.zero
c = _get_constant_term(p, x)
if c:
const = 0
if c == 1:
pass
else:
c_expr = c.as_expr()
if R.domain is EX:
const = log(c_expr)
elif isinstance(c, PolyElement):
try:
const = R(log(c_expr))
except ValueError:
R = R.add_gens([log(c_expr)])
p = p.set_ring(R)
x = x.set_ring(R)
c = c.set_ring(R)
const = R(log(c_expr))
else:
try:
const = R(log(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
dlog = p.diff(x)
dlog = rs_mul(dlog, _series_inversion1(p, x, prec), x, prec - 1)
return rs_integrate(dlog, x) + const
else:
raise NotImplementedError
def rs_LambertW(p, x, prec):
"""
Calculate the series expansion of the principal branch of the Lambert W
function.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_LambertW
>>> R, x, y = ring('x, y', QQ)
>>> rs_LambertW(x + x*y, x, 3)
-x**2*y**2 - 2*x**2*y - x**2 + x*y + x
See Also
========
LambertW
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_LambertW, p, x, prec)
R = p.ring
p1 = R(0)
if _has_constant_term(p, x):
raise NotImplementedError("Polynomial must not have constant term in "
"the series variables")
if x in R.gens:
for precx in _giant_steps(prec):
e = rs_exp(p1, x, precx)
p2 = rs_mul(e, p1, x, precx) - p
p3 = rs_mul(e, p1 + 1, x, precx)
p3 = rs_series_inversion(p3, x, precx)
tmp = rs_mul(p2, p3, x, precx)
p1 -= tmp
return p1
else:
raise NotImplementedError
def _exp1(p, x, prec):
r"""Helper function for `rs\_exp`. """
R = p.ring
p1 = R(1)
for precx in _giant_steps(prec):
pt = p - rs_log(p1, x, precx)
tmp = rs_mul(pt, p1, x, precx)
p1 += tmp
return p1
def rs_exp(p, x, prec):
"""
Exponentiation of a series modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_exp
>>> R, x = ring('x', QQ)
>>> rs_exp(x**2, x, 7)
1/6*x**6 + 1/2*x**4 + x**2 + 1
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_exp, p, x, prec)
R = p.ring
c = _get_constant_term(p, x)
if c:
if R.domain is EX:
c_expr = c.as_expr()
const = exp(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(exp(c_expr))
except ValueError:
R = R.add_gens([exp(c_expr)])
p = p.set_ring(R)
x = x.set_ring(R)
c = c.set_ring(R)
const = R(exp(c_expr))
else:
try:
const = R(exp(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
# Makes use of sympy functions to evaluate the values of the cos/sin
# of the constant term.
return const*rs_exp(p1, x, prec)
if len(p) > 20:
return _exp1(p, x, prec)
one = R(1)
n = 1
c = []
for k in range(prec):
c.append(one/n)
k += 1
n *= k
r = rs_series_from_list(p, c, x, prec)
return r
def _atan(p, iv, prec):
"""
Expansion using formula.
Faster on very small and univariate series.
"""
R = p.ring
mo = R(-1)
c = [-mo]
p2 = rs_square(p, iv, prec)
for k in range(1, prec):
c.append(mo**k/(2*k + 1))
s = rs_series_from_list(p2, c, iv, prec)
s = rs_mul(s, p, iv, prec)
return s
def rs_atan(p, x, prec):
"""
The arctangent of a series
Return the series expansion of the atan of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_atan
>>> R, x, y = ring('x, y', QQ)
>>> rs_atan(x + x*y, x, 4)
-1/3*x**3*y**3 - x**3*y**2 - x**3*y - 1/3*x**3 + x*y + x
See Also
========
atan
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_atan, p, x, prec)
R = p.ring
const = 0
if _has_constant_term(p, x):
zm = R.zero_monom
c = p[zm]
if R.domain is EX:
c_expr = c.as_expr()
const = atan(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(atan(c_expr))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
else:
try:
const = R(atan(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
# Instead of using a closed form formula, we differentiate atan(p) to get
# `1/(1+p**2) * dp`, whose series expansion is much easier to calculate.
# Finally we integrate to get back atan
dp = p.diff(x)
p1 = rs_square(p, x, prec) + R(1)
p1 = rs_series_inversion(p1, x, prec - 1)
p1 = rs_mul(dp, p1, x, prec - 1)
return rs_integrate(p1, x) + const
def rs_asin(p, x, prec):
"""
Arcsine of a series
Return the series expansion of the asin of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_asin
>>> R, x, y = ring('x, y', QQ)
>>> rs_asin(x, x, 8)
5/112*x**7 + 3/40*x**5 + 1/6*x**3 + x
See Also
========
asin
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_asin, p, x, prec)
if _has_constant_term(p, x):
raise NotImplementedError("Polynomial must not have constant term in "
"series variables")
R = p.ring
if x in R.gens:
# get a good value
if len(p) > 20:
dp = rs_diff(p, x)
p1 = 1 - rs_square(p, x, prec - 1)
p1 = rs_nth_root(p1, -2, x, prec - 1)
p1 = rs_mul(dp, p1, x, prec - 1)
return rs_integrate(p1, x)
one = R(1)
c = [0, one, 0]
for k in range(3, prec, 2):
c.append((k - 2)**2*c[-2]/(k*(k - 1)))
c.append(0)
return rs_series_from_list(p, c, x, prec)
else:
raise NotImplementedError
def _tan1(p, x, prec):
r"""
Helper function of :func:`rs_tan`.
Return the series expansion of tan of a univariate series using Newton's
method. It takes advantage of the fact that series expansion of atan is
easier than that of tan.
Consider `f(x) = y - \arctan(x)`
Let r be a root of f(x) found using Newton's method.
Then `f(r) = 0`
Or `y = \arctan(x)` where `x = \tan(y)` as required.
"""
R = p.ring
p1 = R(0)
for precx in _giant_steps(prec):
tmp = p - rs_atan(p1, x, precx)
tmp = rs_mul(tmp, 1 + rs_square(p1, x, precx), x, precx)
p1 += tmp
return p1
def rs_tan(p, x, prec):
"""
Tangent of a series.
Return the series expansion of the tan of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_tan
>>> R, x, y = ring('x, y', QQ)
>>> rs_tan(x + x*y, x, 4)
1/3*x**3*y**3 + x**3*y**2 + x**3*y + 1/3*x**3 + x*y + x
See Also
========
_tan1, tan
"""
if rs_is_puiseux(p, x):
r = rs_puiseux(rs_tan, p, x, prec)
return r
R = p.ring
const = 0
c = _get_constant_term(p, x)
if c:
if R.domain is EX:
c_expr = c.as_expr()
const = tan(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(tan(c_expr))
except ValueError:
R = R.add_gens([tan(c_expr, )])
p = p.set_ring(R)
x = x.set_ring(R)
c = c.set_ring(R)
const = R(tan(c_expr))
else:
try:
const = R(tan(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
# Makes use of sympy functions to evaluate the values of the cos/sin
# of the constant term.
t2 = rs_tan(p1, x, prec)
t = rs_series_inversion(1 - const*t2, x, prec)
return rs_mul(const + t2, t, x, prec)
if R.ngens == 1:
return _tan1(p, x, prec)
else:
return rs_fun(p, rs_tan, x, prec)
def rs_cot(p, x, prec):
"""
Cotangent of a series
Return the series expansion of the cot of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_cot
>>> R, x, y = ring('x, y', QQ)
>>> rs_cot(x, x, 6)
-2/945*x**5 - 1/45*x**3 - 1/3*x + x**(-1)
See Also
========
cot
"""
# It can not handle series like `p = x + x*y` where the coefficient of the
# linear term in the series variable is symbolic.
if rs_is_puiseux(p, x):
r = rs_puiseux(rs_cot, p, x, prec)
return r
i, m = _check_series_var(p, x, 'cot')
prec1 = prec + 2*m
c, s = rs_cos_sin(p, x, prec1)
s = mul_xin(s, i, -m)
s = rs_series_inversion(s, x, prec1)
res = rs_mul(c, s, x, prec1)
res = mul_xin(res, i, -m)
res = rs_trunc(res, x, prec)
return res
def rs_sin(p, x, prec):
"""
Sine of a series
Return the series expansion of the sin of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_sin
>>> R, x, y = ring('x, y', QQ)
>>> rs_sin(x + x*y, x, 4)
-1/6*x**3*y**3 - 1/2*x**3*y**2 - 1/2*x**3*y - 1/6*x**3 + x*y + x
>>> rs_sin(x**QQ(3, 2) + x*y**QQ(7, 5), x, 4)
-1/2*x**(7/2)*y**(14/5) - 1/6*x**3*y**(21/5) + x**(3/2) + x*y**(7/5)
See Also
========
sin
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_sin, p, x, prec)
R = x.ring
if not p:
return R(0)
c = _get_constant_term(p, x)
if c:
if R.domain is EX:
c_expr = c.as_expr()
t1, t2 = sin(c_expr), cos(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
t1, t2 = R(sin(c_expr)), R(cos(c_expr))
except ValueError:
R = R.add_gens([sin(c_expr), cos(c_expr)])
p = p.set_ring(R)
x = x.set_ring(R)
c = c.set_ring(R)
t1, t2 = R(sin(c_expr)), R(cos(c_expr))
else:
try:
t1, t2 = R(sin(c)), R(cos(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
# Makes use of sympy cos, sin functions to evaluate the values of the
# cos/sin of the constant term.
return rs_sin(p1, x, prec)*t2 + rs_cos(p1, x, prec)*t1
# Series is calculated in terms of tan as its evaluation is fast.
if len(p) > 20 and R.ngens == 1:
t = rs_tan(p/2, x, prec)
t2 = rs_square(t, x, prec)
p1 = rs_series_inversion(1 + t2, x, prec)
return rs_mul(p1, 2*t, x, prec)
one = R(1)
n = 1
c = [0]
for k in range(2, prec + 2, 2):
c.append(one/n)
c.append(0)
n *= -k*(k + 1)
return rs_series_from_list(p, c, x, prec)
def rs_cos(p, x, prec):
"""
Cosine of a series
Return the series expansion of the cos of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_cos
>>> R, x, y = ring('x, y', QQ)
>>> rs_cos(x + x*y, x, 4)
-1/2*x**2*y**2 - x**2*y - 1/2*x**2 + 1
>>> rs_cos(x + x*y, x, 4)/x**QQ(7, 5)
-1/2*x**(3/5)*y**2 - x**(3/5)*y - 1/2*x**(3/5) + x**(-7/5)
See Also
========
cos
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_cos, p, x, prec)
R = p.ring
c = _get_constant_term(p, x)
if c:
if R.domain is EX:
c_expr = c.as_expr()
_, _ = sin(c_expr), cos(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
_, _ = R(sin(c_expr)), R(cos(c_expr))
except ValueError:
R = R.add_gens([sin(c_expr), cos(c_expr)])
p = p.set_ring(R)
x = x.set_ring(R)
c = c.set_ring(R)
else:
try:
_, _ = R(sin(c)), R(cos(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
# Makes use of sympy cos, sin functions to evaluate the values of the
# cos/sin of the constant term.
p_cos = rs_cos(p1, x, prec)
p_sin = rs_sin(p1, x, prec)
R = R.compose(p_cos.ring).compose(p_sin.ring)
p_cos.set_ring(R)
p_sin.set_ring(R)
t1, t2 = R(sin(c_expr)), R(cos(c_expr))
return p_cos*t2 - p_sin*t1
# Series is calculated in terms of tan as its evaluation is fast.
if len(p) > 20 and R.ngens == 1:
t = rs_tan(p/2, x, prec)
t2 = rs_square(t, x, prec)
p1 = rs_series_inversion(1+t2, x, prec)
return rs_mul(p1, 1 - t2, x, prec)
one = R(1)
n = 1
c = []
for k in range(2, prec + 2, 2):
c.append(one/n)
c.append(0)
n *= -k*(k - 1)
return rs_series_from_list(p, c, x, prec)
def rs_cos_sin(p, x, prec):
r"""
Return the tuple ``(rs_cos(p, x, prec)`, `rs_sin(p, x, prec))``.
Is faster than calling rs_cos and rs_sin separately
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_cos_sin, p, x, prec)
t = rs_tan(p/2, x, prec)
t2 = rs_square(t, x, prec)
p1 = rs_series_inversion(1 + t2, x, prec)
return (rs_mul(p1, 1 - t2, x, prec), rs_mul(p1, 2*t, x, prec))
def _atanh(p, x, prec):
"""
Expansion using formula
Faster for very small and univariate series
"""
R = p.ring
one = R(1)
c = [one]
p2 = rs_square(p, x, prec)
for k in range(1, prec):
c.append(one/(2*k + 1))
s = rs_series_from_list(p2, c, x, prec)
s = rs_mul(s, p, x, prec)
return s
def rs_atanh(p, x, prec):
"""
Hyperbolic arctangent of a series
Return the series expansion of the atanh of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_atanh
>>> R, x, y = ring('x, y', QQ)
>>> rs_atanh(x + x*y, x, 4)
1/3*x**3*y**3 + x**3*y**2 + x**3*y + 1/3*x**3 + x*y + x
See Also
========
atanh
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_atanh, p, x, prec)
R = p.ring
const = 0
if _has_constant_term(p, x):
zm = R.zero_monom
c = p[zm]
if R.domain is EX:
c_expr = c.as_expr()
const = atanh(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(atanh(c_expr))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
else:
try:
const = R(atanh(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
# Instead of using a closed form formula, we differentiate atanh(p) to get
# `1/(1-p**2) * dp`, whose series expansion is much easier to calculate.
# Finally we integrate to get back atanh
dp = rs_diff(p, x)
p1 = - rs_square(p, x, prec) + 1
p1 = rs_series_inversion(p1, x, prec - 1)
p1 = rs_mul(dp, p1, x, prec - 1)
return rs_integrate(p1, x) + const
def rs_sinh(p, x, prec):
"""
Hyperbolic sine of a series
Return the series expansion of the sinh of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_sinh
>>> R, x, y = ring('x, y', QQ)
>>> rs_sinh(x + x*y, x, 4)
1/6*x**3*y**3 + 1/2*x**3*y**2 + 1/2*x**3*y + 1/6*x**3 + x*y + x
See Also
========
sinh
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_sinh, p, x, prec)
t = rs_exp(p, x, prec)
t1 = rs_series_inversion(t, x, prec)
return (t - t1)/2
def rs_cosh(p, x, prec):
"""
Hyperbolic cosine of a series
Return the series expansion of the cosh of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_cosh
>>> R, x, y = ring('x, y', QQ)
>>> rs_cosh(x + x*y, x, 4)
1/2*x**2*y**2 + x**2*y + 1/2*x**2 + 1
See Also
========
cosh
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_cosh, p, x, prec)
t = rs_exp(p, x, prec)
t1 = rs_series_inversion(t, x, prec)
return (t + t1)/2
def _tanh(p, x, prec):
r"""
Helper function of :func:`rs_tanh`
Return the series expansion of tanh of a univariate series using Newton's
method. It takes advantage of the fact that series expansion of atanh is
easier than that of tanh.
See Also
========
_tanh
"""
R = p.ring
p1 = R(0)
for precx in _giant_steps(prec):
tmp = p - rs_atanh(p1, x, precx)
tmp = rs_mul(tmp, 1 - rs_square(p1, x, prec), x, precx)
p1 += tmp
return p1
def rs_tanh(p, x, prec):
"""
Hyperbolic tangent of a series
Return the series expansion of the tanh of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_tanh
>>> R, x, y = ring('x, y', QQ)
>>> rs_tanh(x + x*y, x, 4)
-1/3*x**3*y**3 - x**3*y**2 - x**3*y - 1/3*x**3 + x*y + x
See Also
========
tanh
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_tanh, p, x, prec)
R = p.ring
const = 0
if _has_constant_term(p, x):
zm = R.zero_monom
c = p[zm]
if R.domain is EX:
c_expr = c.as_expr()
const = tanh(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(tanh(c_expr))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
else:
try:
const = R(tanh(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
t1 = rs_tanh(p1, x, prec)
t = rs_series_inversion(1 + const*t1, x, prec)
return rs_mul(const + t1, t, x, prec)
if R.ngens == 1:
return _tanh(p, x, prec)
else:
return rs_fun(p, _tanh, x, prec)
def rs_newton(p, x, prec):
"""
Compute the truncated Newton sum of the polynomial ``p``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_newton
>>> R, x = ring('x', QQ)
>>> p = x**2 - 2
>>> rs_newton(p, x, 5)
8*x**4 + 4*x**2 + 2
"""
deg = p.degree()
p1 = _invert_monoms(p)
p2 = rs_series_inversion(p1, x, prec)
p3 = rs_mul(p1.diff(x), p2, x, prec)
res = deg - p3*x
return res
def rs_hadamard_exp(p1, inverse=False):
"""
Return ``sum f_i/i!*x**i`` from ``sum f_i*x**i``,
where ``x`` is the first variable.
If ``invers=True`` return ``sum f_i*i!*x**i``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_hadamard_exp
>>> R, x = ring('x', QQ)
>>> p = 1 + x + x**2 + x**3
>>> rs_hadamard_exp(p)
1/6*x**3 + 1/2*x**2 + x + 1
"""
R = p1.ring
if R.domain != QQ:
raise NotImplementedError
p = R.zero
if not inverse:
for exp1, v1 in p1.items():
p[exp1] = v1/int(ifac(exp1[0]))
else:
for exp1, v1 in p1.items():
p[exp1] = v1*int(ifac(exp1[0]))
return p
def rs_compose_add(p1, p2):
"""
compute the composed sum ``prod(p2(x - beta) for beta root of p1)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_compose_add
>>> R, x = ring('x', QQ)
>>> f = x**2 - 2
>>> g = x**2 - 3
>>> rs_compose_add(f, g)
x**4 - 10*x**2 + 1
References
==========
.. [1] A. Bostan, P. Flajolet, B. Salvy and E. Schost
"Fast Computation with Two Algebraic Numbers",
(2002) Research Report 4579, Institut
National de Recherche en Informatique et en Automatique
"""
R = p1.ring
x = R.gens[0]
prec = p1.degree()*p2.degree() + 1
np1 = rs_newton(p1, x, prec)
np1e = rs_hadamard_exp(np1)
np2 = rs_newton(p2, x, prec)
np2e = rs_hadamard_exp(np2)
np3e = rs_mul(np1e, np2e, x, prec)
np3 = rs_hadamard_exp(np3e, True)
np3a = (np3[(0,)] - np3)/x
q = rs_integrate(np3a, x)
q = rs_exp(q, x, prec)
q = _invert_monoms(q)
q = q.primitive()[1]
dp = p1.degree()*p2.degree() - q.degree()
# `dp` is the multiplicity of the zeroes of the resultant;
# these zeroes are missed in this computation so they are put here.
# if p1 and p2 are monic irreducible polynomials,
# there are zeroes in the resultant
# if and only if p1 = p2 ; in fact in that case p1 and p2 have a
# root in common, so gcd(p1, p2) != 1; being p1 and p2 irreducible
# this means p1 = p2
if dp:
q = q*x**dp
return q
_convert_func = {
'sin': 'rs_sin',
'cos': 'rs_cos',
'exp': 'rs_exp',
'tan': 'rs_tan',
'log': 'rs_log'
}
def rs_min_pow(expr, series_rs, a):
"""Find the minimum power of `a` in the series expansion of expr"""
series = 0
n = 2
while series == 0:
series = _rs_series(expr, series_rs, a, n)
n *= 2
R = series.ring
a = R(a)
i = R.gens.index(a)
return min(series, key=lambda t: t[i])[i]
def _rs_series(expr, series_rs, a, prec):
# TODO Use _parallel_dict_from_expr instead of sring as sring is
# inefficient. For details, read the todo in sring.
args = expr.args
R = series_rs.ring
# expr does not contain any function to be expanded
if not any(arg.has(Function) for arg in args) and not expr.is_Function:
return series_rs
if not expr.has(a):
return series_rs
elif expr.is_Function:
arg = args[0]
if len(args) > 1:
raise NotImplementedError
R1, series = sring(arg, domain=QQ, expand=False, series=True)
series_inner = _rs_series(arg, series, a, prec)
# Why do we need to compose these three rings?
#
# We want to use a simple domain (like ``QQ`` or ``RR``) but they don't
# support symbolic coefficients. We need a ring that for example lets
# us have `sin(1)` and `cos(1)` as coefficients if we are expanding
# `sin(x + 1)`. The ``EX`` domain allows all symbolic coefficients, but
# that makes it very complex and hence slow.
#
# To solve this problem, we add only those symbolic elements as
# generators to our ring, that we need. Here, series_inner might
# involve terms like `sin(4)`, `exp(a)`, etc, which are not there in
# R1 or R. Hence, we compose these three rings to create one that has
# the generators of all three.
R = R.compose(R1).compose(series_inner.ring)
series_inner = series_inner.set_ring(R)
series = eval(_convert_func[str(expr.func)])(series_inner,
R(a), prec)
return series
elif expr.is_Mul:
n = len(args)
for arg in args: # XXX Looks redundant
if not arg.is_Number:
R1, _ = sring(arg, expand=False, series=True)
R = R.compose(R1)
min_pows = list(map(rs_min_pow, args, [R(arg) for arg in args],
[a]*len(args)))
sum_pows = sum(min_pows)
series = R(1)
for i in range(n):
_series = _rs_series(args[i], R(args[i]), a, prec - sum_pows +
min_pows[i])
R = R.compose(_series.ring)
_series = _series.set_ring(R)
series = series.set_ring(R)
series *= _series
series = rs_trunc(series, R(a), prec)
return series
elif expr.is_Add:
n = len(args)
series = R(0)
for i in range(n):
_series = _rs_series(args[i], R(args[i]), a, prec)
R = R.compose(_series.ring)
_series = _series.set_ring(R)
series = series.set_ring(R)
series += _series
return series
elif expr.is_Pow:
R1, _ = sring(expr.base, domain=QQ, expand=False, series=True)
R = R.compose(R1)
series_inner = _rs_series(expr.base, R(expr.base), a, prec)
return rs_pow(series_inner, expr.exp, series_inner.ring(a), prec)
# The `is_constant` method is buggy hence we check it at the end.
# See issue #9786 for details.
elif isinstance(expr, Expr) and expr.is_constant():
return sring(expr, domain=QQ, expand=False, series=True)[1]
else:
raise NotImplementedError
def rs_series(expr, a, prec):
"""Return the series expansion of an expression about 0.
Parameters
==========
expr : :class:`Expr`
a : :class:`Symbol` with respect to which expr is to be expanded
prec : order of the series expansion
Currently supports multivariate Taylor series expansion. This is much
faster that Sympy's series method as it uses sparse polynomial operations.
It automatically creates the simplest ring required to represent the series
expansion through repeated calls to sring.
Examples
========
>>> from sympy.polys.ring_series import rs_series
>>> from sympy.functions import sin, cos, exp, tan
>>> from sympy.core import symbols
>>> from sympy.polys.domains import QQ
>>> a, b, c = symbols('a, b, c')
>>> rs_series(sin(a) + exp(a), a, 5)
1/24*a**4 + 1/2*a**2 + 2*a + 1
>>> series = rs_series(tan(a + b)*cos(a + c), a, 2)
>>> series.as_expr()
-a*sin(c)*tan(b) + a*cos(c)*tan(b)**2 + a*cos(c) + cos(c)*tan(b)
>>> series = rs_series(exp(a**QQ(1,3) + a**QQ(2, 5)), a, 1)
>>> series.as_expr()
a**(11/15) + a**(4/5)/2 + a**(2/5) + a**(2/3)/2 + a**(1/3) + 1
"""
R, series = sring(expr, domain=QQ, expand=False, series=True)
if a not in R.symbols:
R = R.add_gens([a, ])
series = series.set_ring(R)
series = _rs_series(expr, series, a, prec)
R = series.ring
gen = R(a)
prec_got = series.degree(gen) + 1
if prec_got >= prec:
return rs_trunc(series, gen, prec)
else:
# increase the requested number of terms to get the desired
# number keep increasing (up to 9) until the received order
# is different than the original order and then predict how
# many additional terms are needed
for more in range(1, 9):
p1 = _rs_series(expr, series, a, prec=prec + more)
gen = gen.set_ring(p1.ring)
new_prec = p1.degree(gen) + 1
if new_prec != prec_got:
prec_do = ceiling(prec + (prec - prec_got)*more/(new_prec -
prec_got))
p1 = _rs_series(expr, series, a, prec=prec_do)
while p1.degree(gen) + 1 < prec:
p1 = _rs_series(expr, series, a, prec=prec_do)
gen = gen.set_ring(p1.ring)
prec_do *= 2
break
else:
break
else:
raise ValueError('Could not calculate %s terms for %s'
% (str(prec), expr))
return rs_trunc(p1, gen, prec)
|
d482a05ab18fa8641a1f2a1ff1e54508d903f96f43b1f7dc789b681086c3db38 | """OO layer for several polynomial representations. """
from __future__ import print_function, division
from sympy import oo
from sympy.core.sympify import CantSympify
from sympy.polys.polyerrors import CoercionFailed, NotReversible, NotInvertible
from sympy.polys.polyutils import PicklableWithSlots
class GenericPoly(PicklableWithSlots):
"""Base class for low-level polynomial representations. """
def ground_to_ring(f):
"""Make the ground domain a ring. """
return f.set_domain(f.dom.get_ring())
def ground_to_field(f):
"""Make the ground domain a field. """
return f.set_domain(f.dom.get_field())
def ground_to_exact(f):
"""Make the ground domain exact. """
return f.set_domain(f.dom.get_exact())
@classmethod
def _perify_factors(per, result, include):
if include:
coeff, factors = result
else:
coeff = result
factors = [ (per(g), k) for g, k in factors ]
if include:
return coeff, factors
else:
return factors
from sympy.polys.densebasic import (
dmp_validate,
dup_normal, dmp_normal,
dup_convert, dmp_convert,
dmp_from_sympy,
dup_strip,
dup_degree, dmp_degree_in,
dmp_degree_list,
dmp_negative_p,
dup_LC, dmp_ground_LC,
dup_TC, dmp_ground_TC,
dmp_ground_nth,
dmp_one, dmp_ground,
dmp_zero_p, dmp_one_p, dmp_ground_p,
dup_from_dict, dmp_from_dict,
dmp_to_dict,
dmp_deflate,
dmp_inject, dmp_eject,
dmp_terms_gcd,
dmp_list_terms, dmp_exclude,
dmp_slice_in, dmp_permute,
dmp_to_tuple,)
from sympy.polys.densearith import (
dmp_add_ground,
dmp_sub_ground,
dmp_mul_ground,
dmp_quo_ground,
dmp_exquo_ground,
dmp_abs,
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dmp_sqr,
dup_pow, dmp_pow,
dmp_pdiv,
dmp_prem,
dmp_pquo,
dmp_pexquo,
dmp_div,
dup_rem, dmp_rem,
dmp_quo,
dmp_exquo,
dmp_add_mul, dmp_sub_mul,
dmp_max_norm,
dmp_l1_norm)
from sympy.polys.densetools import (
dmp_clear_denoms,
dmp_integrate_in,
dmp_diff_in,
dmp_eval_in,
dup_revert,
dmp_ground_trunc,
dmp_ground_content,
dmp_ground_primitive,
dmp_ground_monic,
dmp_compose,
dup_decompose,
dup_shift,
dup_transform,
dmp_lift)
from sympy.polys.euclidtools import (
dup_half_gcdex, dup_gcdex, dup_invert,
dmp_subresultants,
dmp_resultant,
dmp_discriminant,
dmp_inner_gcd,
dmp_gcd,
dmp_lcm,
dmp_cancel)
from sympy.polys.sqfreetools import (
dup_gff_list,
dmp_norm,
dmp_sqf_p,
dmp_sqf_norm,
dmp_sqf_part,
dmp_sqf_list, dmp_sqf_list_include)
from sympy.polys.factortools import (
dup_cyclotomic_p, dmp_irreducible_p,
dmp_factor_list, dmp_factor_list_include)
from sympy.polys.rootisolation import (
dup_isolate_real_roots_sqf,
dup_isolate_real_roots,
dup_isolate_all_roots_sqf,
dup_isolate_all_roots,
dup_refine_real_root,
dup_count_real_roots,
dup_count_complex_roots,
dup_sturm)
from sympy.polys.polyerrors import (
UnificationFailed,
PolynomialError)
def init_normal_DMP(rep, lev, dom):
return DMP(dmp_normal(rep, lev, dom), dom, lev)
class DMP(PicklableWithSlots, CantSympify):
"""Dense Multivariate Polynomials over `K`. """
__slots__ = ('rep', 'lev', 'dom', 'ring')
def __init__(self, rep, dom, lev=None, ring=None):
if lev is not None:
if type(rep) is dict:
rep = dmp_from_dict(rep, lev, dom)
elif type(rep) is not list:
rep = dmp_ground(dom.convert(rep), lev)
else:
rep, lev = dmp_validate(rep)
self.rep = rep
self.lev = lev
self.dom = dom
self.ring = ring
def __repr__(f):
return "%s(%s, %s, %s)" % (f.__class__.__name__, f.rep, f.dom, f.ring)
def __hash__(f):
return hash((f.__class__.__name__, f.to_tuple(), f.lev, f.dom, f.ring))
def unify(f, g):
"""Unify representations of two multivariate polynomials. """
if not isinstance(g, DMP) or f.lev != g.lev:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if f.dom == g.dom and f.ring == g.ring:
return f.lev, f.dom, f.per, f.rep, g.rep
else:
lev, dom = f.lev, f.dom.unify(g.dom)
ring = f.ring
if g.ring is not None:
if ring is not None:
ring = ring.unify(g.ring)
else:
ring = g.ring
F = dmp_convert(f.rep, lev, f.dom, dom)
G = dmp_convert(g.rep, lev, g.dom, dom)
def per(rep, dom=dom, lev=lev, kill=False):
if kill:
if not lev:
return rep
else:
lev -= 1
return DMP(rep, dom, lev, ring)
return lev, dom, per, F, G
def per(f, rep, dom=None, kill=False, ring=None):
"""Create a DMP out of the given representation. """
lev = f.lev
if kill:
if not lev:
return rep
else:
lev -= 1
if dom is None:
dom = f.dom
if ring is None:
ring = f.ring
return DMP(rep, dom, lev, ring)
@classmethod
def zero(cls, lev, dom, ring=None):
return DMP(0, dom, lev, ring)
@classmethod
def one(cls, lev, dom, ring=None):
return DMP(1, dom, lev, ring)
@classmethod
def from_list(cls, rep, lev, dom):
"""Create an instance of ``cls`` given a list of native coefficients. """
return cls(dmp_convert(rep, lev, None, dom), dom, lev)
@classmethod
def from_sympy_list(cls, rep, lev, dom):
"""Create an instance of ``cls`` given a list of SymPy coefficients. """
return cls(dmp_from_sympy(rep, lev, dom), dom, lev)
def to_dict(f, zero=False):
"""Convert ``f`` to a dict representation with native coefficients. """
return dmp_to_dict(f.rep, f.lev, f.dom, zero=zero)
def to_sympy_dict(f, zero=False):
"""Convert ``f`` to a dict representation with SymPy coefficients. """
rep = dmp_to_dict(f.rep, f.lev, f.dom, zero=zero)
for k, v in rep.items():
rep[k] = f.dom.to_sympy(v)
return rep
def to_list(f):
"""Convert ``f`` to a list representation with native coefficients. """
return f.rep
def to_sympy_list(f):
"""Convert ``f`` to a list representation with SymPy coefficients. """
def sympify_nested_list(rep):
out = []
for val in rep:
if isinstance(val, list):
out.append(sympify_nested_list(val))
else:
out.append(f.dom.to_sympy(val))
return out
return sympify_nested_list(f.rep)
def to_tuple(f):
"""
Convert ``f`` to a tuple representation with native coefficients.
This is needed for hashing.
"""
return dmp_to_tuple(f.rep, f.lev)
@classmethod
def from_dict(cls, rep, lev, dom):
"""Construct and instance of ``cls`` from a ``dict`` representation. """
return cls(dmp_from_dict(rep, lev, dom), dom, lev)
@classmethod
def from_monoms_coeffs(cls, monoms, coeffs, lev, dom, ring=None):
return DMP(dict(list(zip(monoms, coeffs))), dom, lev, ring)
def to_ring(f):
"""Make the ground domain a ring. """
return f.convert(f.dom.get_ring())
def to_field(f):
"""Make the ground domain a field. """
return f.convert(f.dom.get_field())
def to_exact(f):
"""Make the ground domain exact. """
return f.convert(f.dom.get_exact())
def convert(f, dom):
"""Convert the ground domain of ``f``. """
if f.dom == dom:
return f
else:
return DMP(dmp_convert(f.rep, f.lev, f.dom, dom), dom, f.lev)
def slice(f, m, n, j=0):
"""Take a continuous subsequence of terms of ``f``. """
return f.per(dmp_slice_in(f.rep, m, n, j, f.lev, f.dom))
def coeffs(f, order=None):
"""Returns all non-zero coefficients from ``f`` in lex order. """
return [ c for _, c in dmp_list_terms(f.rep, f.lev, f.dom, order=order) ]
def monoms(f, order=None):
"""Returns all non-zero monomials from ``f`` in lex order. """
return [ m for m, _ in dmp_list_terms(f.rep, f.lev, f.dom, order=order) ]
def terms(f, order=None):
"""Returns all non-zero terms from ``f`` in lex order. """
return dmp_list_terms(f.rep, f.lev, f.dom, order=order)
def all_coeffs(f):
"""Returns all coefficients from ``f``. """
if not f.lev:
if not f:
return [f.dom.zero]
else:
return [ c for c in f.rep ]
else:
raise PolynomialError('multivariate polynomials not supported')
def all_monoms(f):
"""Returns all monomials from ``f``. """
if not f.lev:
n = dup_degree(f.rep)
if n < 0:
return [(0,)]
else:
return [ (n - i,) for i, c in enumerate(f.rep) ]
else:
raise PolynomialError('multivariate polynomials not supported')
def all_terms(f):
"""Returns all terms from a ``f``. """
if not f.lev:
n = dup_degree(f.rep)
if n < 0:
return [((0,), f.dom.zero)]
else:
return [ ((n - i,), c) for i, c in enumerate(f.rep) ]
else:
raise PolynomialError('multivariate polynomials not supported')
def lift(f):
"""Convert algebraic coefficients to rationals. """
return f.per(dmp_lift(f.rep, f.lev, f.dom), dom=f.dom.dom)
def deflate(f):
"""Reduce degree of `f` by mapping `x_i^m` to `y_i`. """
J, F = dmp_deflate(f.rep, f.lev, f.dom)
return J, f.per(F)
def inject(f, front=False):
"""Inject ground domain generators into ``f``. """
F, lev = dmp_inject(f.rep, f.lev, f.dom, front=front)
return f.__class__(F, f.dom.dom, lev)
def eject(f, dom, front=False):
"""Eject selected generators into the ground domain. """
F = dmp_eject(f.rep, f.lev, dom, front=front)
return f.__class__(F, dom, f.lev - len(dom.symbols))
def exclude(f):
r"""
Remove useless generators from ``f``.
Returns the removed generators and the new excluded ``f``.
Examples
========
>>> from sympy.polys.polyclasses import DMP
>>> from sympy.polys.domains import ZZ
>>> DMP([[[ZZ(1)]], [[ZZ(1)], [ZZ(2)]]], ZZ).exclude()
([2], DMP([[1], [1, 2]], ZZ, None))
"""
J, F, u = dmp_exclude(f.rep, f.lev, f.dom)
return J, f.__class__(F, f.dom, u)
def permute(f, P):
r"""
Returns a polynomial in `K[x_{P(1)}, ..., x_{P(n)}]`.
Examples
========
>>> from sympy.polys.polyclasses import DMP
>>> from sympy.polys.domains import ZZ
>>> DMP([[[ZZ(2)], [ZZ(1), ZZ(0)]], [[]]], ZZ).permute([1, 0, 2])
DMP([[[2], []], [[1, 0], []]], ZZ, None)
>>> DMP([[[ZZ(2)], [ZZ(1), ZZ(0)]], [[]]], ZZ).permute([1, 2, 0])
DMP([[[1], []], [[2, 0], []]], ZZ, None)
"""
return f.per(dmp_permute(f.rep, P, f.lev, f.dom))
def terms_gcd(f):
"""Remove GCD of terms from the polynomial ``f``. """
J, F = dmp_terms_gcd(f.rep, f.lev, f.dom)
return J, f.per(F)
def add_ground(f, c):
"""Add an element of the ground domain to ``f``. """
return f.per(dmp_add_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def sub_ground(f, c):
"""Subtract an element of the ground domain from ``f``. """
return f.per(dmp_sub_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def mul_ground(f, c):
"""Multiply ``f`` by a an element of the ground domain. """
return f.per(dmp_mul_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def quo_ground(f, c):
"""Quotient of ``f`` by a an element of the ground domain. """
return f.per(dmp_quo_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def exquo_ground(f, c):
"""Exact quotient of ``f`` by a an element of the ground domain. """
return f.per(dmp_exquo_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def abs(f):
"""Make all coefficients in ``f`` positive. """
return f.per(dmp_abs(f.rep, f.lev, f.dom))
def neg(f):
"""Negate all coefficients in ``f``. """
return f.per(dmp_neg(f.rep, f.lev, f.dom))
def add(f, g):
"""Add two multivariate polynomials ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_add(F, G, lev, dom))
def sub(f, g):
"""Subtract two multivariate polynomials ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_sub(F, G, lev, dom))
def mul(f, g):
"""Multiply two multivariate polynomials ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_mul(F, G, lev, dom))
def sqr(f):
"""Square a multivariate polynomial ``f``. """
return f.per(dmp_sqr(f.rep, f.lev, f.dom))
def pow(f, n):
"""Raise ``f`` to a non-negative power ``n``. """
if isinstance(n, int):
return f.per(dmp_pow(f.rep, n, f.lev, f.dom))
else:
raise TypeError("``int`` expected, got %s" % type(n))
def pdiv(f, g):
"""Polynomial pseudo-division of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
q, r = dmp_pdiv(F, G, lev, dom)
return per(q), per(r)
def prem(f, g):
"""Polynomial pseudo-remainder of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_prem(F, G, lev, dom))
def pquo(f, g):
"""Polynomial pseudo-quotient of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_pquo(F, G, lev, dom))
def pexquo(f, g):
"""Polynomial exact pseudo-quotient of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_pexquo(F, G, lev, dom))
def div(f, g):
"""Polynomial division with remainder of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
q, r = dmp_div(F, G, lev, dom)
return per(q), per(r)
def rem(f, g):
"""Computes polynomial remainder of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_rem(F, G, lev, dom))
def quo(f, g):
"""Computes polynomial quotient of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_quo(F, G, lev, dom))
def exquo(f, g):
"""Computes polynomial exact quotient of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
res = per(dmp_exquo(F, G, lev, dom))
if f.ring and res not in f.ring:
from sympy.polys.polyerrors import ExactQuotientFailed
raise ExactQuotientFailed(f, g, f.ring)
return res
def degree(f, j=0):
"""Returns the leading degree of ``f`` in ``x_j``. """
if isinstance(j, int):
return dmp_degree_in(f.rep, j, f.lev)
else:
raise TypeError("``int`` expected, got %s" % type(j))
def degree_list(f):
"""Returns a list of degrees of ``f``. """
return dmp_degree_list(f.rep, f.lev)
def total_degree(f):
"""Returns the total degree of ``f``. """
return max(sum(m) for m in f.monoms())
def homogenize(f, s):
"""Return homogeneous polynomial of ``f``"""
td = f.total_degree()
result = {}
new_symbol = (s == len(f.terms()[0][0]))
for term in f.terms():
d = sum(term[0])
if d < td:
i = td - d
else:
i = 0
if new_symbol:
result[term[0] + (i,)] = term[1]
else:
l = list(term[0])
l[s] += i
result[tuple(l)] = term[1]
return DMP(result, f.dom, f.lev + int(new_symbol), f.ring)
def homogeneous_order(f):
"""Returns the homogeneous order of ``f``. """
if f.is_zero:
return -oo
monoms = f.monoms()
tdeg = sum(monoms[0])
for monom in monoms:
_tdeg = sum(monom)
if _tdeg != tdeg:
return None
return tdeg
def LC(f):
"""Returns the leading coefficient of ``f``. """
return dmp_ground_LC(f.rep, f.lev, f.dom)
def TC(f):
"""Returns the trailing coefficient of ``f``. """
return dmp_ground_TC(f.rep, f.lev, f.dom)
def nth(f, *N):
"""Returns the ``n``-th coefficient of ``f``. """
if all(isinstance(n, int) for n in N):
return dmp_ground_nth(f.rep, N, f.lev, f.dom)
else:
raise TypeError("a sequence of integers expected")
def max_norm(f):
"""Returns maximum norm of ``f``. """
return dmp_max_norm(f.rep, f.lev, f.dom)
def l1_norm(f):
"""Returns l1 norm of ``f``. """
return dmp_l1_norm(f.rep, f.lev, f.dom)
def clear_denoms(f):
"""Clear denominators, but keep the ground domain. """
coeff, F = dmp_clear_denoms(f.rep, f.lev, f.dom)
return coeff, f.per(F)
def integrate(f, m=1, j=0):
"""Computes the ``m``-th order indefinite integral of ``f`` in ``x_j``. """
if not isinstance(m, int):
raise TypeError("``int`` expected, got %s" % type(m))
if not isinstance(j, int):
raise TypeError("``int`` expected, got %s" % type(j))
return f.per(dmp_integrate_in(f.rep, m, j, f.lev, f.dom))
def diff(f, m=1, j=0):
"""Computes the ``m``-th order derivative of ``f`` in ``x_j``. """
if not isinstance(m, int):
raise TypeError("``int`` expected, got %s" % type(m))
if not isinstance(j, int):
raise TypeError("``int`` expected, got %s" % type(j))
return f.per(dmp_diff_in(f.rep, m, j, f.lev, f.dom))
def eval(f, a, j=0):
"""Evaluates ``f`` at the given point ``a`` in ``x_j``. """
if not isinstance(j, int):
raise TypeError("``int`` expected, got %s" % type(j))
return f.per(dmp_eval_in(f.rep,
f.dom.convert(a), j, f.lev, f.dom), kill=True)
def half_gcdex(f, g):
"""Half extended Euclidean algorithm, if univariate. """
lev, dom, per, F, G = f.unify(g)
if not lev:
s, h = dup_half_gcdex(F, G, dom)
return per(s), per(h)
else:
raise ValueError('univariate polynomial expected')
def gcdex(f, g):
"""Extended Euclidean algorithm, if univariate. """
lev, dom, per, F, G = f.unify(g)
if not lev:
s, t, h = dup_gcdex(F, G, dom)
return per(s), per(t), per(h)
else:
raise ValueError('univariate polynomial expected')
def invert(f, g):
"""Invert ``f`` modulo ``g``, if possible. """
lev, dom, per, F, G = f.unify(g)
if not lev:
return per(dup_invert(F, G, dom))
else:
raise ValueError('univariate polynomial expected')
def revert(f, n):
"""Compute ``f**(-1)`` mod ``x**n``. """
if not f.lev:
return f.per(dup_revert(f.rep, n, f.dom))
else:
raise ValueError('univariate polynomial expected')
def subresultants(f, g):
"""Computes subresultant PRS sequence of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
R = dmp_subresultants(F, G, lev, dom)
return list(map(per, R))
def resultant(f, g, includePRS=False):
"""Computes resultant of ``f`` and ``g`` via PRS. """
lev, dom, per, F, G = f.unify(g)
if includePRS:
res, R = dmp_resultant(F, G, lev, dom, includePRS=includePRS)
return per(res, kill=True), list(map(per, R))
return per(dmp_resultant(F, G, lev, dom), kill=True)
def discriminant(f):
"""Computes discriminant of ``f``. """
return f.per(dmp_discriminant(f.rep, f.lev, f.dom), kill=True)
def cofactors(f, g):
"""Returns GCD of ``f`` and ``g`` and their cofactors. """
lev, dom, per, F, G = f.unify(g)
h, cff, cfg = dmp_inner_gcd(F, G, lev, dom)
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""Returns polynomial GCD of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_gcd(F, G, lev, dom))
def lcm(f, g):
"""Returns polynomial LCM of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_lcm(F, G, lev, dom))
def cancel(f, g, include=True):
"""Cancel common factors in a rational function ``f/g``. """
lev, dom, per, F, G = f.unify(g)
if include:
F, G = dmp_cancel(F, G, lev, dom, include=True)
else:
cF, cG, F, G = dmp_cancel(F, G, lev, dom, include=False)
F, G = per(F), per(G)
if include:
return F, G
else:
return cF, cG, F, G
def trunc(f, p):
"""Reduce ``f`` modulo a constant ``p``. """
return f.per(dmp_ground_trunc(f.rep, f.dom.convert(p), f.lev, f.dom))
def monic(f):
"""Divides all coefficients by ``LC(f)``. """
return f.per(dmp_ground_monic(f.rep, f.lev, f.dom))
def content(f):
"""Returns GCD of polynomial coefficients. """
return dmp_ground_content(f.rep, f.lev, f.dom)
def primitive(f):
"""Returns content and a primitive form of ``f``. """
cont, F = dmp_ground_primitive(f.rep, f.lev, f.dom)
return cont, f.per(F)
def compose(f, g):
"""Computes functional composition of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_compose(F, G, lev, dom))
def decompose(f):
"""Computes functional decomposition of ``f``. """
if not f.lev:
return list(map(f.per, dup_decompose(f.rep, f.dom)))
else:
raise ValueError('univariate polynomial expected')
def shift(f, a):
"""Efficiently compute Taylor shift ``f(x + a)``. """
if not f.lev:
return f.per(dup_shift(f.rep, f.dom.convert(a), f.dom))
else:
raise ValueError('univariate polynomial expected')
def transform(f, p, q):
"""Evaluate functional transformation ``q**n * f(p/q)``."""
if f.lev:
raise ValueError('univariate polynomial expected')
lev, dom, per, P, Q = p.unify(q)
lev, dom, per, F, P = f.unify(per(P, dom, lev))
lev, dom, per, F, Q = per(F, dom, lev).unify(per(Q, dom, lev))
if not lev:
return per(dup_transform(F, P, Q, dom))
else:
raise ValueError('univariate polynomial expected')
def sturm(f):
"""Computes the Sturm sequence of ``f``. """
if not f.lev:
return list(map(f.per, dup_sturm(f.rep, f.dom)))
else:
raise ValueError('univariate polynomial expected')
def gff_list(f):
"""Computes greatest factorial factorization of ``f``. """
if not f.lev:
return [ (f.per(g), k) for g, k in dup_gff_list(f.rep, f.dom) ]
else:
raise ValueError('univariate polynomial expected')
def norm(f):
"""Computes ``Norm(f)``."""
r = dmp_norm(f.rep, f.lev, f.dom)
return f.per(r, dom=f.dom.dom)
def sqf_norm(f):
"""Computes square-free norm of ``f``. """
s, g, r = dmp_sqf_norm(f.rep, f.lev, f.dom)
return s, f.per(g), f.per(r, dom=f.dom.dom)
def sqf_part(f):
"""Computes square-free part of ``f``. """
return f.per(dmp_sqf_part(f.rep, f.lev, f.dom))
def sqf_list(f, all=False):
"""Returns a list of square-free factors of ``f``. """
coeff, factors = dmp_sqf_list(f.rep, f.lev, f.dom, all)
return coeff, [ (f.per(g), k) for g, k in factors ]
def sqf_list_include(f, all=False):
"""Returns a list of square-free factors of ``f``. """
factors = dmp_sqf_list_include(f.rep, f.lev, f.dom, all)
return [ (f.per(g), k) for g, k in factors ]
def factor_list(f):
"""Returns a list of irreducible factors of ``f``. """
coeff, factors = dmp_factor_list(f.rep, f.lev, f.dom)
return coeff, [ (f.per(g), k) for g, k in factors ]
def factor_list_include(f):
"""Returns a list of irreducible factors of ``f``. """
factors = dmp_factor_list_include(f.rep, f.lev, f.dom)
return [ (f.per(g), k) for g, k in factors ]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""Compute isolating intervals for roots of ``f``. """
if not f.lev:
if not all:
if not sqf:
return dup_isolate_real_roots(f.rep, f.dom, eps=eps, inf=inf, sup=sup, fast=fast)
else:
return dup_isolate_real_roots_sqf(f.rep, f.dom, eps=eps, inf=inf, sup=sup, fast=fast)
else:
if not sqf:
return dup_isolate_all_roots(f.rep, f.dom, eps=eps, inf=inf, sup=sup, fast=fast)
else:
return dup_isolate_all_roots_sqf(f.rep, f.dom, eps=eps, inf=inf, sup=sup, fast=fast)
else:
raise PolynomialError(
"can't isolate roots of a multivariate polynomial")
def refine_root(f, s, t, eps=None, steps=None, fast=False):
"""
Refine an isolating interval to the given precision.
``eps`` should be a rational number.
"""
if not f.lev:
return dup_refine_real_root(f.rep, s, t, f.dom, eps=eps, steps=steps, fast=fast)
else:
raise PolynomialError(
"can't refine a root of a multivariate polynomial")
def count_real_roots(f, inf=None, sup=None):
"""Return the number of real roots of ``f`` in ``[inf, sup]``. """
return dup_count_real_roots(f.rep, f.dom, inf=inf, sup=sup)
def count_complex_roots(f, inf=None, sup=None):
"""Return the number of complex roots of ``f`` in ``[inf, sup]``. """
return dup_count_complex_roots(f.rep, f.dom, inf=inf, sup=sup)
@property
def is_zero(f):
"""Returns ``True`` if ``f`` is a zero polynomial. """
return dmp_zero_p(f.rep, f.lev)
@property
def is_one(f):
"""Returns ``True`` if ``f`` is a unit polynomial. """
return dmp_one_p(f.rep, f.lev, f.dom)
@property
def is_ground(f):
"""Returns ``True`` if ``f`` is an element of the ground domain. """
return dmp_ground_p(f.rep, None, f.lev)
@property
def is_sqf(f):
"""Returns ``True`` if ``f`` is a square-free polynomial. """
return dmp_sqf_p(f.rep, f.lev, f.dom)
@property
def is_monic(f):
"""Returns ``True`` if the leading coefficient of ``f`` is one. """
return f.dom.is_one(dmp_ground_LC(f.rep, f.lev, f.dom))
@property
def is_primitive(f):
"""Returns ``True`` if the GCD of the coefficients of ``f`` is one. """
return f.dom.is_one(dmp_ground_content(f.rep, f.lev, f.dom))
@property
def is_linear(f):
"""Returns ``True`` if ``f`` is linear in all its variables. """
return all(sum(monom) <= 1 for monom in dmp_to_dict(f.rep, f.lev, f.dom).keys())
@property
def is_quadratic(f):
"""Returns ``True`` if ``f`` is quadratic in all its variables. """
return all(sum(monom) <= 2 for monom in dmp_to_dict(f.rep, f.lev, f.dom).keys())
@property
def is_monomial(f):
"""Returns ``True`` if ``f`` is zero or has only one term. """
return len(f.to_dict()) <= 1
@property
def is_homogeneous(f):
"""Returns ``True`` if ``f`` is a homogeneous polynomial. """
return f.homogeneous_order() is not None
@property
def is_irreducible(f):
"""Returns ``True`` if ``f`` has no factors over its domain. """
return dmp_irreducible_p(f.rep, f.lev, f.dom)
@property
def is_cyclotomic(f):
"""Returns ``True`` if ``f`` is a cyclotomic polynomial. """
if not f.lev:
return dup_cyclotomic_p(f.rep, f.dom)
else:
return False
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
def __add__(f, g):
if not isinstance(g, DMP):
try:
g = f.per(dmp_ground(f.dom.convert(g), f.lev))
except TypeError:
return NotImplemented
except (CoercionFailed, NotImplementedError):
if f.ring is not None:
try:
g = f.ring.convert(g)
except (CoercionFailed, NotImplementedError):
return NotImplemented
return f.add(g)
def __radd__(f, g):
return f.__add__(g)
def __sub__(f, g):
if not isinstance(g, DMP):
try:
g = f.per(dmp_ground(f.dom.convert(g), f.lev))
except TypeError:
return NotImplemented
except (CoercionFailed, NotImplementedError):
if f.ring is not None:
try:
g = f.ring.convert(g)
except (CoercionFailed, NotImplementedError):
return NotImplemented
return f.sub(g)
def __rsub__(f, g):
return (-f).__add__(g)
def __mul__(f, g):
if isinstance(g, DMP):
return f.mul(g)
else:
try:
return f.mul_ground(g)
except TypeError:
return NotImplemented
except (CoercionFailed, NotImplementedError):
if f.ring is not None:
try:
return f.mul(f.ring.convert(g))
except (CoercionFailed, NotImplementedError):
pass
return NotImplemented
def __div__(f, g):
if isinstance(g, DMP):
return f.exquo(g)
else:
try:
return f.mul_ground(g)
except TypeError:
return NotImplemented
except (CoercionFailed, NotImplementedError):
if f.ring is not None:
try:
return f.exquo(f.ring.convert(g))
except (CoercionFailed, NotImplementedError):
pass
return NotImplemented
def __rdiv__(f, g):
if isinstance(g, DMP):
return g.exquo(f)
elif f.ring is not None:
try:
return f.ring.convert(g).exquo(f)
except (CoercionFailed, NotImplementedError):
pass
return NotImplemented
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __rmul__(f, g):
return f.__mul__(g)
def __pow__(f, n):
return f.pow(n)
def __divmod__(f, g):
return f.div(g)
def __mod__(f, g):
return f.rem(g)
def __floordiv__(f, g):
if isinstance(g, DMP):
return f.quo(g)
else:
try:
return f.quo_ground(g)
except TypeError:
return NotImplemented
def __eq__(f, g):
try:
_, _, _, F, G = f.unify(g)
if f.lev == g.lev:
return F == G
except UnificationFailed:
pass
return False
def __ne__(f, g):
return not f == g
def eq(f, g, strict=False):
if not strict:
return f == g
else:
return f._strict_eq(g)
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.lev == g.lev \
and f.dom == g.dom \
and f.rep == g.rep
def __lt__(f, g):
_, _, _, F, G = f.unify(g)
return F < G
def __le__(f, g):
_, _, _, F, G = f.unify(g)
return F <= G
def __gt__(f, g):
_, _, _, F, G = f.unify(g)
return F > G
def __ge__(f, g):
_, _, _, F, G = f.unify(g)
return F >= G
def __nonzero__(f):
return not dmp_zero_p(f.rep, f.lev)
__bool__ = __nonzero__
def init_normal_DMF(num, den, lev, dom):
return DMF(dmp_normal(num, lev, dom),
dmp_normal(den, lev, dom), dom, lev)
class DMF(PicklableWithSlots, CantSympify):
"""Dense Multivariate Fractions over `K`. """
__slots__ = ('num', 'den', 'lev', 'dom', 'ring')
def __init__(self, rep, dom, lev=None, ring=None):
num, den, lev = self._parse(rep, dom, lev)
num, den = dmp_cancel(num, den, lev, dom)
self.num = num
self.den = den
self.lev = lev
self.dom = dom
self.ring = ring
@classmethod
def new(cls, rep, dom, lev=None, ring=None):
num, den, lev = cls._parse(rep, dom, lev)
obj = object.__new__(cls)
obj.num = num
obj.den = den
obj.lev = lev
obj.dom = dom
obj.ring = ring
return obj
@classmethod
def _parse(cls, rep, dom, lev=None):
if type(rep) is tuple:
num, den = rep
if lev is not None:
if type(num) is dict:
num = dmp_from_dict(num, lev, dom)
if type(den) is dict:
den = dmp_from_dict(den, lev, dom)
else:
num, num_lev = dmp_validate(num)
den, den_lev = dmp_validate(den)
if num_lev == den_lev:
lev = num_lev
else:
raise ValueError('inconsistent number of levels')
if dmp_zero_p(den, lev):
raise ZeroDivisionError('fraction denominator')
if dmp_zero_p(num, lev):
den = dmp_one(lev, dom)
else:
if dmp_negative_p(den, lev, dom):
num = dmp_neg(num, lev, dom)
den = dmp_neg(den, lev, dom)
else:
num = rep
if lev is not None:
if type(num) is dict:
num = dmp_from_dict(num, lev, dom)
elif type(num) is not list:
num = dmp_ground(dom.convert(num), lev)
else:
num, lev = dmp_validate(num)
den = dmp_one(lev, dom)
return num, den, lev
def __repr__(f):
return "%s((%s, %s), %s, %s)" % (f.__class__.__name__, f.num, f.den,
f.dom, f.ring)
def __hash__(f):
return hash((f.__class__.__name__, dmp_to_tuple(f.num, f.lev),
dmp_to_tuple(f.den, f.lev), f.lev, f.dom, f.ring))
def poly_unify(f, g):
"""Unify a multivariate fraction and a polynomial. """
if not isinstance(g, DMP) or f.lev != g.lev:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if f.dom == g.dom and f.ring == g.ring:
return (f.lev, f.dom, f.per, (f.num, f.den), g.rep)
else:
lev, dom = f.lev, f.dom.unify(g.dom)
ring = f.ring
if g.ring is not None:
if ring is not None:
ring = ring.unify(g.ring)
else:
ring = g.ring
F = (dmp_convert(f.num, lev, f.dom, dom),
dmp_convert(f.den, lev, f.dom, dom))
G = dmp_convert(g.rep, lev, g.dom, dom)
def per(num, den, cancel=True, kill=False, lev=lev):
if kill:
if not lev:
return num/den
else:
lev = lev - 1
if cancel:
num, den = dmp_cancel(num, den, lev, dom)
return f.__class__.new((num, den), dom, lev, ring=ring)
return lev, dom, per, F, G
def frac_unify(f, g):
"""Unify representations of two multivariate fractions. """
if not isinstance(g, DMF) or f.lev != g.lev:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if f.dom == g.dom and f.ring == g.ring:
return (f.lev, f.dom, f.per, (f.num, f.den),
(g.num, g.den))
else:
lev, dom = f.lev, f.dom.unify(g.dom)
ring = f.ring
if g.ring is not None:
if ring is not None:
ring = ring.unify(g.ring)
else:
ring = g.ring
F = (dmp_convert(f.num, lev, f.dom, dom),
dmp_convert(f.den, lev, f.dom, dom))
G = (dmp_convert(g.num, lev, g.dom, dom),
dmp_convert(g.den, lev, g.dom, dom))
def per(num, den, cancel=True, kill=False, lev=lev):
if kill:
if not lev:
return num/den
else:
lev = lev - 1
if cancel:
num, den = dmp_cancel(num, den, lev, dom)
return f.__class__.new((num, den), dom, lev, ring=ring)
return lev, dom, per, F, G
def per(f, num, den, cancel=True, kill=False, ring=None):
"""Create a DMF out of the given representation. """
lev, dom = f.lev, f.dom
if kill:
if not lev:
return num/den
else:
lev -= 1
if cancel:
num, den = dmp_cancel(num, den, lev, dom)
if ring is None:
ring = f.ring
return f.__class__.new((num, den), dom, lev, ring=ring)
def half_per(f, rep, kill=False):
"""Create a DMP out of the given representation. """
lev = f.lev
if kill:
if not lev:
return rep
else:
lev -= 1
return DMP(rep, f.dom, lev)
@classmethod
def zero(cls, lev, dom, ring=None):
return cls.new(0, dom, lev, ring=ring)
@classmethod
def one(cls, lev, dom, ring=None):
return cls.new(1, dom, lev, ring=ring)
def numer(f):
"""Returns the numerator of ``f``. """
return f.half_per(f.num)
def denom(f):
"""Returns the denominator of ``f``. """
return f.half_per(f.den)
def cancel(f):
"""Remove common factors from ``f.num`` and ``f.den``. """
return f.per(f.num, f.den)
def neg(f):
"""Negate all coefficients in ``f``. """
return f.per(dmp_neg(f.num, f.lev, f.dom), f.den, cancel=False)
def add(f, g):
"""Add two multivariate fractions ``f`` and ``g``. """
if isinstance(g, DMP):
lev, dom, per, (F_num, F_den), G = f.poly_unify(g)
num, den = dmp_add_mul(F_num, F_den, G, lev, dom), F_den
else:
lev, dom, per, F, G = f.frac_unify(g)
(F_num, F_den), (G_num, G_den) = F, G
num = dmp_add(dmp_mul(F_num, G_den, lev, dom),
dmp_mul(F_den, G_num, lev, dom), lev, dom)
den = dmp_mul(F_den, G_den, lev, dom)
return per(num, den)
def sub(f, g):
"""Subtract two multivariate fractions ``f`` and ``g``. """
if isinstance(g, DMP):
lev, dom, per, (F_num, F_den), G = f.poly_unify(g)
num, den = dmp_sub_mul(F_num, F_den, G, lev, dom), F_den
else:
lev, dom, per, F, G = f.frac_unify(g)
(F_num, F_den), (G_num, G_den) = F, G
num = dmp_sub(dmp_mul(F_num, G_den, lev, dom),
dmp_mul(F_den, G_num, lev, dom), lev, dom)
den = dmp_mul(F_den, G_den, lev, dom)
return per(num, den)
def mul(f, g):
"""Multiply two multivariate fractions ``f`` and ``g``. """
if isinstance(g, DMP):
lev, dom, per, (F_num, F_den), G = f.poly_unify(g)
num, den = dmp_mul(F_num, G, lev, dom), F_den
else:
lev, dom, per, F, G = f.frac_unify(g)
(F_num, F_den), (G_num, G_den) = F, G
num = dmp_mul(F_num, G_num, lev, dom)
den = dmp_mul(F_den, G_den, lev, dom)
return per(num, den)
def pow(f, n):
"""Raise ``f`` to a non-negative power ``n``. """
if isinstance(n, int):
return f.per(dmp_pow(f.num, n, f.lev, f.dom),
dmp_pow(f.den, n, f.lev, f.dom), cancel=False)
else:
raise TypeError("``int`` expected, got %s" % type(n))
def quo(f, g):
"""Computes quotient of fractions ``f`` and ``g``. """
if isinstance(g, DMP):
lev, dom, per, (F_num, F_den), G = f.poly_unify(g)
num, den = F_num, dmp_mul(F_den, G, lev, dom)
else:
lev, dom, per, F, G = f.frac_unify(g)
(F_num, F_den), (G_num, G_den) = F, G
num = dmp_mul(F_num, G_den, lev, dom)
den = dmp_mul(F_den, G_num, lev, dom)
res = per(num, den)
if f.ring is not None and res not in f.ring:
from sympy.polys.polyerrors import ExactQuotientFailed
raise ExactQuotientFailed(f, g, f.ring)
return res
exquo = quo
def invert(f, check=True):
"""Computes inverse of a fraction ``f``. """
if check and f.ring is not None and not f.ring.is_unit(f):
raise NotReversible(f, f.ring)
res = f.per(f.den, f.num, cancel=False)
return res
@property
def is_zero(f):
"""Returns ``True`` if ``f`` is a zero fraction. """
return dmp_zero_p(f.num, f.lev)
@property
def is_one(f):
"""Returns ``True`` if ``f`` is a unit fraction. """
return dmp_one_p(f.num, f.lev, f.dom) and \
dmp_one_p(f.den, f.lev, f.dom)
def __neg__(f):
return f.neg()
def __add__(f, g):
if isinstance(g, (DMP, DMF)):
return f.add(g)
try:
return f.add(f.half_per(g))
except TypeError:
return NotImplemented
except (CoercionFailed, NotImplementedError):
if f.ring is not None:
try:
return f.add(f.ring.convert(g))
except (CoercionFailed, NotImplementedError):
pass
return NotImplemented
def __radd__(f, g):
return f.__add__(g)
def __sub__(f, g):
if isinstance(g, (DMP, DMF)):
return f.sub(g)
try:
return f.sub(f.half_per(g))
except TypeError:
return NotImplemented
except (CoercionFailed, NotImplementedError):
if f.ring is not None:
try:
return f.sub(f.ring.convert(g))
except (CoercionFailed, NotImplementedError):
pass
return NotImplemented
def __rsub__(f, g):
return (-f).__add__(g)
def __mul__(f, g):
if isinstance(g, (DMP, DMF)):
return f.mul(g)
try:
return f.mul(f.half_per(g))
except TypeError:
return NotImplemented
except (CoercionFailed, NotImplementedError):
if f.ring is not None:
try:
return f.mul(f.ring.convert(g))
except (CoercionFailed, NotImplementedError):
pass
return NotImplemented
def __rmul__(f, g):
return f.__mul__(g)
def __pow__(f, n):
return f.pow(n)
def __div__(f, g):
if isinstance(g, (DMP, DMF)):
return f.quo(g)
try:
return f.quo(f.half_per(g))
except TypeError:
return NotImplemented
except (CoercionFailed, NotImplementedError):
if f.ring is not None:
try:
return f.quo(f.ring.convert(g))
except (CoercionFailed, NotImplementedError):
pass
return NotImplemented
def __rdiv__(self, g):
r = self.invert(check=False)*g
if self.ring and r not in self.ring:
from sympy.polys.polyerrors import ExactQuotientFailed
raise ExactQuotientFailed(g, self, self.ring)
return r
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __eq__(f, g):
try:
if isinstance(g, DMP):
_, _, _, (F_num, F_den), G = f.poly_unify(g)
if f.lev == g.lev:
return dmp_one_p(F_den, f.lev, f.dom) and F_num == G
else:
_, _, _, F, G = f.frac_unify(g)
if f.lev == g.lev:
return F == G
except UnificationFailed:
pass
return False
def __ne__(f, g):
try:
if isinstance(g, DMP):
_, _, _, (F_num, F_den), G = f.poly_unify(g)
if f.lev == g.lev:
return not (dmp_one_p(F_den, f.lev, f.dom) and F_num == G)
else:
_, _, _, F, G = f.frac_unify(g)
if f.lev == g.lev:
return F != G
except UnificationFailed:
pass
return True
def __lt__(f, g):
_, _, _, F, G = f.frac_unify(g)
return F < G
def __le__(f, g):
_, _, _, F, G = f.frac_unify(g)
return F <= G
def __gt__(f, g):
_, _, _, F, G = f.frac_unify(g)
return F > G
def __ge__(f, g):
_, _, _, F, G = f.frac_unify(g)
return F >= G
def __nonzero__(f):
return not dmp_zero_p(f.num, f.lev)
__bool__ = __nonzero__
def init_normal_ANP(rep, mod, dom):
return ANP(dup_normal(rep, dom),
dup_normal(mod, dom), dom)
class ANP(PicklableWithSlots, CantSympify):
"""Dense Algebraic Number Polynomials over a field. """
__slots__ = ('rep', 'mod', 'dom')
def __init__(self, rep, mod, dom):
if type(rep) is dict:
self.rep = dup_from_dict(rep, dom)
else:
if type(rep) is not list:
rep = [dom.convert(rep)]
self.rep = dup_strip(rep)
if isinstance(mod, DMP):
self.mod = mod.rep
else:
if type(mod) is dict:
self.mod = dup_from_dict(mod, dom)
else:
self.mod = dup_strip(mod)
self.dom = dom
def __repr__(f):
return "%s(%s, %s, %s)" % (f.__class__.__name__, f.rep, f.mod, f.dom)
def __hash__(f):
return hash((f.__class__.__name__, f.to_tuple(), dmp_to_tuple(f.mod, 0), f.dom))
def unify(f, g):
"""Unify representations of two algebraic numbers. """
if not isinstance(g, ANP) or f.mod != g.mod:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if f.dom == g.dom:
return f.dom, f.per, f.rep, g.rep, f.mod
else:
dom = f.dom.unify(g.dom)
F = dup_convert(f.rep, f.dom, dom)
G = dup_convert(g.rep, g.dom, dom)
if dom != f.dom and dom != g.dom:
mod = dup_convert(f.mod, f.dom, dom)
else:
if dom == f.dom:
mod = f.mod
else:
mod = g.mod
per = lambda rep: ANP(rep, mod, dom)
return dom, per, F, G, mod
def per(f, rep, mod=None, dom=None):
return ANP(rep, mod or f.mod, dom or f.dom)
@classmethod
def zero(cls, mod, dom):
return ANP(0, mod, dom)
@classmethod
def one(cls, mod, dom):
return ANP(1, mod, dom)
def to_dict(f):
"""Convert ``f`` to a dict representation with native coefficients. """
return dmp_to_dict(f.rep, 0, f.dom)
def to_sympy_dict(f):
"""Convert ``f`` to a dict representation with SymPy coefficients. """
rep = dmp_to_dict(f.rep, 0, f.dom)
for k, v in rep.items():
rep[k] = f.dom.to_sympy(v)
return rep
def to_list(f):
"""Convert ``f`` to a list representation with native coefficients. """
return f.rep
def to_sympy_list(f):
"""Convert ``f`` to a list representation with SymPy coefficients. """
return [ f.dom.to_sympy(c) for c in f.rep ]
def to_tuple(f):
"""
Convert ``f`` to a tuple representation with native coefficients.
This is needed for hashing.
"""
return dmp_to_tuple(f.rep, 0)
@classmethod
def from_list(cls, rep, mod, dom):
return ANP(dup_strip(list(map(dom.convert, rep))), mod, dom)
def neg(f):
return f.per(dup_neg(f.rep, f.dom))
def add(f, g):
dom, per, F, G, mod = f.unify(g)
return per(dup_add(F, G, dom))
def sub(f, g):
dom, per, F, G, mod = f.unify(g)
return per(dup_sub(F, G, dom))
def mul(f, g):
dom, per, F, G, mod = f.unify(g)
return per(dup_rem(dup_mul(F, G, dom), mod, dom))
def pow(f, n):
"""Raise ``f`` to a non-negative power ``n``. """
if isinstance(n, int):
if n < 0:
F, n = dup_invert(f.rep, f.mod, f.dom), -n
else:
F = f.rep
return f.per(dup_rem(dup_pow(F, n, f.dom), f.mod, f.dom))
else:
raise TypeError("``int`` expected, got %s" % type(n))
def div(f, g):
dom, per, F, G, mod = f.unify(g)
return (per(dup_rem(dup_mul(F, dup_invert(G, mod, dom), dom), mod, dom)), f.zero(mod, dom))
def rem(f, g):
dom, _, _, G, mod = f.unify(g)
s, h = dup_half_gcdex(G, mod, dom)
if h == [dom.one]:
return f.zero(mod, dom)
else:
raise NotInvertible("zero divisor")
def quo(f, g):
dom, per, F, G, mod = f.unify(g)
return per(dup_rem(dup_mul(F, dup_invert(G, mod, dom), dom), mod, dom))
exquo = quo
def LC(f):
"""Returns the leading coefficient of ``f``. """
return dup_LC(f.rep, f.dom)
def TC(f):
"""Returns the trailing coefficient of ``f``. """
return dup_TC(f.rep, f.dom)
@property
def is_zero(f):
"""Returns ``True`` if ``f`` is a zero algebraic number. """
return not f
@property
def is_one(f):
"""Returns ``True`` if ``f`` is a unit algebraic number. """
return f.rep == [f.dom.one]
@property
def is_ground(f):
"""Returns ``True`` if ``f`` is an element of the ground domain. """
return not f.rep or len(f.rep) == 1
def __neg__(f):
return f.neg()
def __add__(f, g):
if isinstance(g, ANP):
return f.add(g)
else:
try:
return f.add(f.per(g))
except (CoercionFailed, TypeError):
return NotImplemented
def __radd__(f, g):
return f.__add__(g)
def __sub__(f, g):
if isinstance(g, ANP):
return f.sub(g)
else:
try:
return f.sub(f.per(g))
except (CoercionFailed, TypeError):
return NotImplemented
def __rsub__(f, g):
return (-f).__add__(g)
def __mul__(f, g):
if isinstance(g, ANP):
return f.mul(g)
else:
try:
return f.mul(f.per(g))
except (CoercionFailed, TypeError):
return NotImplemented
def __rmul__(f, g):
return f.__mul__(g)
def __pow__(f, n):
return f.pow(n)
def __divmod__(f, g):
return f.div(g)
def __mod__(f, g):
return f.rem(g)
def __div__(f, g):
if isinstance(g, ANP):
return f.quo(g)
else:
try:
return f.quo(f.per(g))
except (CoercionFailed, TypeError):
return NotImplemented
__truediv__ = __div__
def __eq__(f, g):
try:
_, _, F, G, _ = f.unify(g)
return F == G
except UnificationFailed:
return False
def __ne__(f, g):
try:
_, _, F, G, _ = f.unify(g)
return F != G
except UnificationFailed:
return True
def __lt__(f, g):
_, _, F, G, _ = f.unify(g)
return F < G
def __le__(f, g):
_, _, F, G, _ = f.unify(g)
return F <= G
def __gt__(f, g):
_, _, F, G, _ = f.unify(g)
return F > G
def __ge__(f, g):
_, _, F, G, _ = f.unify(g)
return F >= G
def __nonzero__(f):
return bool(f.rep)
__bool__ = __nonzero__
|
6545882b79159cde3a0f15be5ad45aa150e5d016110fa226b4e46d00f5860e93 | """Implementation of RootOf class and related tools. """
from __future__ import print_function, division
from sympy.core import (S, Expr, Integer, Float, I, oo, Add, Lambda,
symbols, sympify, Rational, Dummy)
from sympy.core.cache import cacheit
from sympy.core.compatibility import ordered
from sympy.polys.domains import QQ
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
GeneratorsNeeded,
PolynomialError,
DomainError)
from sympy.polys.polyfuncs import symmetrize, viete
from sympy.polys.polyroots import (
roots_linear, roots_quadratic, roots_binomial,
preprocess_roots, roots)
from sympy.polys.polytools import Poly, PurePoly, factor
from sympy.polys.rationaltools import together
from sympy.polys.rootisolation import (
dup_isolate_complex_roots_sqf,
dup_isolate_real_roots_sqf)
from sympy.utilities import lambdify, public, sift
from mpmath import mpf, mpc, findroot, workprec
from mpmath.libmp.libmpf import dps_to_prec, prec_to_dps
__all__ = ['CRootOf']
class _pure_key_dict(object):
"""A minimal dictionary that makes sure that the key is a
univariate PurePoly instance.
Examples
========
Only the following actions are guaranteed:
>>> from sympy.polys.rootoftools import _pure_key_dict
>>> from sympy import S, PurePoly
>>> from sympy.abc import x, y
1) creation
>>> P = _pure_key_dict()
2) assignment for a PurePoly or univariate polynomial
>>> P[x] = 1
>>> P[PurePoly(x - y, x)] = 2
3) retrieval based on PurePoly key comparison (use this
instead of the get method)
>>> P[y]
1
4) KeyError when trying to retrieve a nonexisting key
>>> P[y + 1]
Traceback (most recent call last):
...
KeyError: PurePoly(y + 1, y, domain='ZZ')
5) ability to query with ``in``
>>> x + 1 in P
False
NOTE: this is a *not* a dictionary. It is a very basic object
for internal use that makes sure to always address its cache
via PurePoly instances. It does not, for example, implement
``get`` or ``setdefault``.
"""
def __init__(self):
self._dict = {}
def __getitem__(self, k):
if not isinstance(k, PurePoly):
if not (isinstance(k, Expr) and len(k.free_symbols) == 1):
raise KeyError
k = PurePoly(k, expand=False)
return self._dict[k]
def __setitem__(self, k, v):
if not isinstance(k, PurePoly):
if not (isinstance(k, Expr) and len(k.free_symbols) == 1):
raise ValueError('expecting univariate expression')
k = PurePoly(k, expand=False)
self._dict[k] = v
def __contains__(self, k):
try:
self[k]
return True
except KeyError:
return False
_reals_cache = _pure_key_dict()
_complexes_cache = _pure_key_dict()
def _pure_factors(poly):
_, factors = poly.factor_list()
return [(PurePoly(f, expand=False), m) for f, m in factors]
def _imag_count_of_factor(f):
"""Return the number of imaginary roots for irreducible
univariate polynomial ``f``.
"""
terms = [(i, j) for (i,), j in f.terms()]
if any(i % 2 for i, j in terms):
return 0
# update signs
even = [(i, I**i*j) for i, j in terms]
even = Poly.from_dict(dict(even), Dummy('x'))
return int(even.count_roots(-oo, oo))
@public
def rootof(f, x, index=None, radicals=True, expand=True):
"""An indexed root of a univariate polynomial.
Returns either a :obj:`ComplexRootOf` object or an explicit
expression involving radicals.
Parameters
==========
f : Expr
Univariate polynomial.
x : Symbol, optional
Generator for ``f``.
index : int or Integer
radicals : bool
Return a radical expression if possible.
expand : bool
Expand ``f``.
"""
return CRootOf(f, x, index=index, radicals=radicals, expand=expand)
@public
class RootOf(Expr):
"""Represents a root of a univariate polynomial.
Base class for roots of different kinds of polynomials.
Only complex roots are currently supported.
"""
__slots__ = ('poly',)
def __new__(cls, f, x, index=None, radicals=True, expand=True):
"""Construct a new ``CRootOf`` object for ``k``-th root of ``f``."""
return rootof(f, x, index=index, radicals=radicals, expand=expand)
@public
class ComplexRootOf(RootOf):
"""Represents an indexed complex root of a polynomial.
Roots of a univariate polynomial separated into disjoint
real or complex intervals and indexed in a fixed order.
Currently only rational coefficients are allowed.
Can be imported as ``CRootOf``. To avoid confusion, the
generator must be a Symbol.
Examples
========
>>> from sympy import CRootOf, rootof
>>> from sympy.abc import x
CRootOf is a way to reference a particular root of a
polynomial. If there is a rational root, it will be returned:
>>> CRootOf.clear_cache() # for doctest reproducibility
>>> CRootOf(x**2 - 4, 0)
-2
Whether roots involving radicals are returned or not
depends on whether the ``radicals`` flag is true (which is
set to True with rootof):
>>> CRootOf(x**2 - 3, 0)
CRootOf(x**2 - 3, 0)
>>> CRootOf(x**2 - 3, 0, radicals=True)
-sqrt(3)
>>> rootof(x**2 - 3, 0)
-sqrt(3)
The following cannot be expressed in terms of radicals:
>>> r = rootof(4*x**5 + 16*x**3 + 12*x**2 + 7, 0); r
CRootOf(4*x**5 + 16*x**3 + 12*x**2 + 7, 0)
The root bounds can be seen, however, and they are used by the
evaluation methods to get numerical approximations for the root.
>>> interval = r._get_interval(); interval
(-1, 0)
>>> r.evalf(2)
-0.98
The evalf method refines the width of the root bounds until it
guarantees that any decimal approximation within those bounds
will satisfy the desired precision. It then stores the refined
interval so subsequent requests at or below the requested
precision will not have to recompute the root bounds and will
return very quickly.
Before evaluation above, the interval was
>>> interval
(-1, 0)
After evaluation it is now
>>> r._get_interval() # doctest: +SKIP
(-165/169, -206/211)
To reset all intervals for a given polynomial, the :meth:`_reset` method
can be called from any CRootOf instance of the polynomial:
>>> r._reset()
>>> r._get_interval()
(-1, 0)
The :meth:`eval_approx` method will also find the root to a given
precision but the interval is not modified unless the search
for the root fails to converge within the root bounds. And
the secant method is used to find the root. (The ``evalf``
method uses bisection and will always update the interval.)
>>> r.eval_approx(2)
-0.98
The interval needed to be slightly updated to find that root:
>>> r._get_interval()
(-1, -1/2)
The ``evalf_rational`` will compute a rational approximation
of the root to the desired accuracy or precision.
>>> r.eval_rational(n=2)
-69629/71318
>>> t = CRootOf(x**3 + 10*x + 1, 1)
>>> t.eval_rational(1e-1)
15/256 - 805*I/256
>>> t.eval_rational(1e-1, 1e-4)
3275/65536 - 414645*I/131072
>>> t.eval_rational(1e-4, 1e-4)
6545/131072 - 414645*I/131072
>>> t.eval_rational(n=2)
104755/2097152 - 6634255*I/2097152
Notes
=====
Although a PurePoly can be constructed from a non-symbol generator
RootOf instances of non-symbols are disallowed to avoid confusion
over what root is being represented.
>>> from sympy import exp, PurePoly
>>> PurePoly(x) == PurePoly(exp(x))
True
>>> CRootOf(x - 1, 0)
1
>>> CRootOf(exp(x) - 1, 0) # would correspond to x == 0
Traceback (most recent call last):
...
sympy.polys.polyerrors.PolynomialError: generator must be a Symbol
See Also
========
eval_approx
eval_rational
"""
__slots__ = ('index',)
is_complex = True
is_number = True
is_finite = True
def __new__(cls, f, x, index=None, radicals=False, expand=True):
""" Construct an indexed complex root of a polynomial.
See ``rootof`` for the parameters.
The default value of ``radicals`` is ``False`` to satisfy
``eval(srepr(expr) == expr``.
"""
x = sympify(x)
if index is None and x.is_Integer:
x, index = None, x
else:
index = sympify(index)
if index is not None and index.is_Integer:
index = int(index)
else:
raise ValueError("expected an integer root index, got %s" % index)
poly = PurePoly(f, x, greedy=False, expand=expand)
if not poly.is_univariate:
raise PolynomialError("only univariate polynomials are allowed")
if not poly.gen.is_Symbol:
# PurePoly(sin(x) + 1) == PurePoly(x + 1) but the roots of
# x for each are not the same: issue 8617
raise PolynomialError("generator must be a Symbol")
degree = poly.degree()
if degree <= 0:
raise PolynomialError("can't construct CRootOf object for %s" % f)
if index < -degree or index >= degree:
raise IndexError("root index out of [%d, %d] range, got %d" %
(-degree, degree - 1, index))
elif index < 0:
index += degree
dom = poly.get_domain()
if not dom.is_Exact:
poly = poly.to_exact()
roots = cls._roots_trivial(poly, radicals)
if roots is not None:
return roots[index]
coeff, poly = preprocess_roots(poly)
dom = poly.get_domain()
if not dom.is_ZZ:
raise NotImplementedError("CRootOf is not supported over %s" % dom)
root = cls._indexed_root(poly, index)
return coeff * cls._postprocess_root(root, radicals)
@classmethod
def _new(cls, poly, index):
"""Construct new ``CRootOf`` object from raw data. """
obj = Expr.__new__(cls)
obj.poly = PurePoly(poly)
obj.index = index
try:
_reals_cache[obj.poly] = _reals_cache[poly]
_complexes_cache[obj.poly] = _complexes_cache[poly]
except KeyError:
pass
return obj
def _hashable_content(self):
return (self.poly, self.index)
@property
def expr(self):
return self.poly.as_expr()
@property
def args(self):
return (self.expr, Integer(self.index))
@property
def free_symbols(self):
# CRootOf currently only works with univariate expressions
# whose poly attribute should be a PurePoly with no free
# symbols
return set()
def _eval_is_real(self):
"""Return ``True`` if the root is real. """
return self.index < len(_reals_cache[self.poly])
def _eval_is_imaginary(self):
"""Return ``True`` if the root is imaginary. """
if self.index >= len(_reals_cache[self.poly]):
ivl = self._get_interval()
return ivl.ax*ivl.bx <= 0 # all others are on one side or the other
return False # XXX is this necessary?
@classmethod
def real_roots(cls, poly, radicals=True):
"""Get real roots of a polynomial. """
return cls._get_roots("_real_roots", poly, radicals)
@classmethod
def all_roots(cls, poly, radicals=True):
"""Get real and complex roots of a polynomial. """
return cls._get_roots("_all_roots", poly, radicals)
@classmethod
def _get_reals_sqf(cls, currentfactor, use_cache=True):
"""Get real root isolating intervals for a square-free factor."""
if use_cache and currentfactor in _reals_cache:
real_part = _reals_cache[currentfactor]
else:
_reals_cache[currentfactor] = real_part = \
dup_isolate_real_roots_sqf(
currentfactor.rep.rep, currentfactor.rep.dom, blackbox=True)
return real_part
@classmethod
def _get_complexes_sqf(cls, currentfactor, use_cache=True):
"""Get complex root isolating intervals for a square-free factor."""
if use_cache and currentfactor in _complexes_cache:
complex_part = _complexes_cache[currentfactor]
else:
_complexes_cache[currentfactor] = complex_part = \
dup_isolate_complex_roots_sqf(
currentfactor.rep.rep, currentfactor.rep.dom, blackbox=True)
return complex_part
@classmethod
def _get_reals(cls, factors, use_cache=True):
"""Compute real root isolating intervals for a list of factors. """
reals = []
for currentfactor, k in factors:
try:
if not use_cache:
raise KeyError
r = _reals_cache[currentfactor]
reals.extend([(i, currentfactor, k) for i in r])
except KeyError:
real_part = cls._get_reals_sqf(currentfactor, use_cache)
new = [(root, currentfactor, k) for root in real_part]
reals.extend(new)
reals = cls._reals_sorted(reals)
return reals
@classmethod
def _get_complexes(cls, factors, use_cache=True):
"""Compute complex root isolating intervals for a list of factors. """
complexes = []
for currentfactor, k in ordered(factors):
try:
if not use_cache:
raise KeyError
c = _complexes_cache[currentfactor]
complexes.extend([(i, currentfactor, k) for i in c])
except KeyError:
complex_part = cls._get_complexes_sqf(currentfactor, use_cache)
new = [(root, currentfactor, k) for root in complex_part]
complexes.extend(new)
complexes = cls._complexes_sorted(complexes)
return complexes
@classmethod
def _reals_sorted(cls, reals):
"""Make real isolating intervals disjoint and sort roots. """
cache = {}
for i, (u, f, k) in enumerate(reals):
for j, (v, g, m) in enumerate(reals[i + 1:]):
u, v = u.refine_disjoint(v)
reals[i + j + 1] = (v, g, m)
reals[i] = (u, f, k)
reals = sorted(reals, key=lambda r: r[0].a)
for root, currentfactor, _ in reals:
if currentfactor in cache:
cache[currentfactor].append(root)
else:
cache[currentfactor] = [root]
for currentfactor, root in cache.items():
_reals_cache[currentfactor] = root
return reals
@classmethod
def _refine_imaginary(cls, complexes):
sifted = sift(complexes, lambda c: c[1])
complexes = []
for f in ordered(sifted):
nimag = _imag_count_of_factor(f)
if nimag == 0:
# refine until xbounds are neg or pos
for u, f, k in sifted[f]:
while u.ax*u.bx <= 0:
u = u._inner_refine()
complexes.append((u, f, k))
else:
# refine until all but nimag xbounds are neg or pos
potential_imag = list(range(len(sifted[f])))
while True:
assert len(potential_imag) > 1
for i in list(potential_imag):
u, f, k = sifted[f][i]
if u.ax*u.bx > 0:
potential_imag.remove(i)
elif u.ax != u.bx:
u = u._inner_refine()
sifted[f][i] = u, f, k
if len(potential_imag) == nimag:
break
complexes.extend(sifted[f])
return complexes
@classmethod
def _refine_complexes(cls, complexes):
"""return complexes such that no bounding rectangles of non-conjugate
roots would intersect. In addition, assure that neither ay nor by is
0 to guarantee that non-real roots are distinct from real roots in
terms of the y-bounds.
"""
# get the intervals pairwise-disjoint.
# If rectangles were drawn around the coordinates of the bounding
# rectangles, no rectangles would intersect after this procedure.
for i, (u, f, k) in enumerate(complexes):
for j, (v, g, m) in enumerate(complexes[i + 1:]):
u, v = u.refine_disjoint(v)
complexes[i + j + 1] = (v, g, m)
complexes[i] = (u, f, k)
# refine until the x-bounds are unambiguously positive or negative
# for non-imaginary roots
complexes = cls._refine_imaginary(complexes)
# make sure that all y bounds are off the real axis
# and on the same side of the axis
for i, (u, f, k) in enumerate(complexes):
while u.ay*u.by <= 0:
u = u.refine()
complexes[i] = u, f, k
return complexes
@classmethod
def _complexes_sorted(cls, complexes):
"""Make complex isolating intervals disjoint and sort roots. """
complexes = cls._refine_complexes(complexes)
# XXX don't sort until you are sure that it is compatible
# with the indexing method but assert that the desired state
# is not broken
C, F = 0, 1 # location of ComplexInterval and factor
fs = set([i[F] for i in complexes])
for i in range(1, len(complexes)):
if complexes[i][F] != complexes[i - 1][F]:
# if this fails the factors of a root were not
# contiguous because a discontinuity should only
# happen once
fs.remove(complexes[i - 1][F])
for i in range(len(complexes)):
# negative im part (conj=True) comes before
# positive im part (conj=False)
assert complexes[i][C].conj is (i % 2 == 0)
# update cache
cache = {}
# -- collate
for root, currentfactor, _ in complexes:
cache.setdefault(currentfactor, []).append(root)
# -- store
for currentfactor, root in cache.items():
_complexes_cache[currentfactor] = root
return complexes
@classmethod
def _reals_index(cls, reals, index):
"""
Map initial real root index to an index in a factor where
the root belongs.
"""
i = 0
for j, (_, currentfactor, k) in enumerate(reals):
if index < i + k:
poly, index = currentfactor, 0
for _, currentfactor, _ in reals[:j]:
if currentfactor == poly:
index += 1
return poly, index
else:
i += k
@classmethod
def _complexes_index(cls, complexes, index):
"""
Map initial complex root index to an index in a factor where
the root belongs.
"""
i = 0
for j, (_, currentfactor, k) in enumerate(complexes):
if index < i + k:
poly, index = currentfactor, 0
for _, currentfactor, _ in complexes[:j]:
if currentfactor == poly:
index += 1
index += len(_reals_cache[poly])
return poly, index
else:
i += k
@classmethod
def _count_roots(cls, roots):
"""Count the number of real or complex roots with multiplicities."""
return sum([k for _, _, k in roots])
@classmethod
def _indexed_root(cls, poly, index):
"""Get a root of a composite polynomial by index. """
factors = _pure_factors(poly)
reals = cls._get_reals(factors)
reals_count = cls._count_roots(reals)
if index < reals_count:
return cls._reals_index(reals, index)
else:
complexes = cls._get_complexes(factors)
return cls._complexes_index(complexes, index - reals_count)
@classmethod
def _real_roots(cls, poly):
"""Get real roots of a composite polynomial. """
factors = _pure_factors(poly)
reals = cls._get_reals(factors)
reals_count = cls._count_roots(reals)
roots = []
for index in range(0, reals_count):
roots.append(cls._reals_index(reals, index))
return roots
def _reset(self):
"""
Reset all intervals
"""
self._all_roots(self.poly, use_cache=False)
@classmethod
def _all_roots(cls, poly, use_cache=True):
"""Get real and complex roots of a composite polynomial. """
factors = _pure_factors(poly)
reals = cls._get_reals(factors, use_cache=use_cache)
reals_count = cls._count_roots(reals)
roots = []
for index in range(0, reals_count):
roots.append(cls._reals_index(reals, index))
complexes = cls._get_complexes(factors, use_cache=use_cache)
complexes_count = cls._count_roots(complexes)
for index in range(0, complexes_count):
roots.append(cls._complexes_index(complexes, index))
return roots
@classmethod
@cacheit
def _roots_trivial(cls, poly, radicals):
"""Compute roots in linear, quadratic and binomial cases. """
if poly.degree() == 1:
return roots_linear(poly)
if not radicals:
return None
if poly.degree() == 2:
return roots_quadratic(poly)
elif poly.length() == 2 and poly.TC():
return roots_binomial(poly)
else:
return None
@classmethod
def _preprocess_roots(cls, poly):
"""Take heroic measures to make ``poly`` compatible with ``CRootOf``."""
dom = poly.get_domain()
if not dom.is_Exact:
poly = poly.to_exact()
coeff, poly = preprocess_roots(poly)
dom = poly.get_domain()
if not dom.is_ZZ:
raise NotImplementedError(
"sorted roots not supported over %s" % dom)
return coeff, poly
@classmethod
def _postprocess_root(cls, root, radicals):
"""Return the root if it is trivial or a ``CRootOf`` object. """
poly, index = root
roots = cls._roots_trivial(poly, radicals)
if roots is not None:
return roots[index]
else:
return cls._new(poly, index)
@classmethod
def _get_roots(cls, method, poly, radicals):
"""Return postprocessed roots of specified kind. """
if not poly.is_univariate:
raise PolynomialError("only univariate polynomials are allowed")
coeff, poly = cls._preprocess_roots(poly)
roots = []
for root in getattr(cls, method)(poly):
roots.append(coeff*cls._postprocess_root(root, radicals))
return roots
@classmethod
def clear_cache(cls):
"""Reset cache for reals and complexes.
The intervals used to approximate a root instance are updated
as needed. When a request is made to see the intervals, the
most current values are shown. `clear_cache` will reset all
CRootOf instances back to their original state.
See Also
========
_reset
"""
global _reals_cache, _complexes_cache
_reals_cache = _pure_key_dict()
_complexes_cache = _pure_key_dict()
def _get_interval(self):
"""Internal function for retrieving isolation interval from cache. """
if self.is_real:
return _reals_cache[self.poly][self.index]
else:
reals_count = len(_reals_cache[self.poly])
return _complexes_cache[self.poly][self.index - reals_count]
def _set_interval(self, interval):
"""Internal function for updating isolation interval in cache. """
if self.is_real:
_reals_cache[self.poly][self.index] = interval
else:
reals_count = len(_reals_cache[self.poly])
_complexes_cache[self.poly][self.index - reals_count] = interval
def _eval_subs(self, old, new):
# don't allow subs to change anything
return self
def _eval_conjugate(self):
if self.is_real:
return self
expr, i = self.args
return self.func(expr, i + (1 if self._get_interval().conj else -1))
def eval_approx(self, n):
"""Evaluate this complex root to the given precision.
This uses secant method and root bounds are used to both
generate an initial guess and to check that the root
returned is valid. If ever the method converges outside the
root bounds, the bounds will be made smaller and updated.
"""
prec = dps_to_prec(n)
with workprec(prec):
g = self.poly.gen
if not g.is_Symbol:
d = Dummy('x')
if self.is_imaginary:
d *= I
func = lambdify(d, self.expr.subs(g, d))
else:
expr = self.expr
if self.is_imaginary:
expr = self.expr.subs(g, I*g)
func = lambdify(g, expr)
interval = self._get_interval()
while True:
if self.is_real:
a = mpf(str(interval.a))
b = mpf(str(interval.b))
if a == b:
root = a
break
x0 = mpf(str(interval.center))
x1 = x0 + mpf(str(interval.dx))/4
elif self.is_imaginary:
a = mpf(str(interval.ay))
b = mpf(str(interval.by))
if a == b:
root = mpc(mpf('0'), a)
break
x0 = mpf(str(interval.center[1]))
x1 = x0 + mpf(str(interval.dy))/4
else:
ax = mpf(str(interval.ax))
bx = mpf(str(interval.bx))
ay = mpf(str(interval.ay))
by = mpf(str(interval.by))
if ax == bx and ay == by:
root = mpc(ax, ay)
break
x0 = mpc(*map(str, interval.center))
x1 = x0 + mpc(*map(str, (interval.dx, interval.dy)))/4
try:
# without a tolerance, this will return when (to within
# the given precision) x_i == x_{i-1}
root = findroot(func, (x0, x1))
# If the (real or complex) root is not in the 'interval',
# then keep refining the interval. This happens if findroot
# accidentally finds a different root outside of this
# interval because our initial estimate 'x0' was not close
# enough. It is also possible that the secant method will
# get trapped by a max/min in the interval; the root
# verification by findroot will raise a ValueError in this
# case and the interval will then be tightened -- and
# eventually the root will be found.
#
# It is also possible that findroot will not have any
# successful iterations to process (in which case it
# will fail to initialize a variable that is tested
# after the iterations and raise an UnboundLocalError).
if self.is_real or self.is_imaginary:
if not bool(root.imag) == self.is_real and (
a <= root <= b):
if self.is_imaginary:
root = mpc(mpf('0'), root.real)
break
elif (ax <= root.real <= bx and ay <= root.imag <= by):
break
except (UnboundLocalError, ValueError):
pass
interval = interval.refine()
# update the interval so we at least (for this precision or
# less) don't have much work to do to recompute the root
self._set_interval(interval)
return (Float._new(root.real._mpf_, prec) +
I*Float._new(root.imag._mpf_, prec))
def _eval_evalf(self, prec, **kwargs):
"""Evaluate this complex root to the given precision."""
# all kwargs are ignored
return self.eval_rational(n=prec_to_dps(prec))._evalf(prec)
def eval_rational(self, dx=None, dy=None, n=15):
"""
Return a Rational approximation of ``self`` that has real
and imaginary component approximations that are within ``dx``
and ``dy`` of the true values, respectively. Alternatively,
``n`` digits of precision can be specified.
The interval is refined with bisection and is sure to
converge. The root bounds are updated when the refinement
is complete so recalculation at the same or lesser precision
will not have to repeat the refinement and should be much
faster.
The following example first obtains Rational approximation to
1e-8 accuracy for all roots of the 4-th order Legendre
polynomial. Since the roots are all less than 1, this will
ensure the decimal representation of the approximation will be
correct (including rounding) to 6 digits:
>>> from sympy import S, legendre_poly, Symbol
>>> x = Symbol("x")
>>> p = legendre_poly(4, x, polys=True)
>>> r = p.real_roots()[-1]
>>> r.eval_rational(10**-8).n(6)
0.861136
It is not necessary to a two-step calculation, however: the
decimal representation can be computed directly:
>>> r.evalf(17)
0.86113631159405258
"""
dy = dy or dx
if dx:
rtol = None
dx = dx if isinstance(dx, Rational) else Rational(str(dx))
dy = dy if isinstance(dy, Rational) else Rational(str(dy))
else:
# 5 binary (or 2 decimal) digits are needed to ensure that
# a given digit is correctly rounded
# prec_to_dps(dps_to_prec(n) + 5) - n <= 2 (tested for
# n in range(1000000)
rtol = S(10)**-(n + 2) # +2 for guard digits
interval = self._get_interval()
while True:
if self.is_real:
if rtol:
dx = abs(interval.center*rtol)
interval = interval.refine_size(dx=dx)
c = interval.center
real = Rational(c)
imag = S.Zero
if not rtol or interval.dx < abs(c*rtol):
break
elif self.is_imaginary:
if rtol:
dy = abs(interval.center[1]*rtol)
dx = 1
interval = interval.refine_size(dx=dx, dy=dy)
c = interval.center[1]
imag = Rational(c)
real = S.Zero
if not rtol or interval.dy < abs(c*rtol):
break
else:
if rtol:
dx = abs(interval.center[0]*rtol)
dy = abs(interval.center[1]*rtol)
interval = interval.refine_size(dx, dy)
c = interval.center
real, imag = map(Rational, c)
if not rtol or (
interval.dx < abs(c[0]*rtol) and
interval.dy < abs(c[1]*rtol)):
break
# update the interval so we at least (for this precision or
# less) don't have much work to do to recompute the root
self._set_interval(interval)
return real + I*imag
def _eval_Eq(self, other):
# CRootOf represents a Root, so if other is that root, it should set
# the expression to zero *and* it should be in the interval of the
# CRootOf instance. It must also be a number that agrees with the
# is_real value of the CRootOf instance.
if type(self) == type(other):
return sympify(self == other)
if not other.is_number:
return None
if not other.is_finite:
return S.false
z = self.expr.subs(self.expr.free_symbols.pop(), other).is_zero
if z is False: # all roots will make z True but we don't know
# whether this is the right root if z is True
return S.false
o = other.is_real, other.is_imaginary
s = self.is_real, self.is_imaginary
assert None not in s # this is part of initial refinement
if o != s and None not in o:
return S.false
re, im = other.as_real_imag()
if self.is_real:
if im:
return S.false
i = self._get_interval()
a, b = [Rational(str(_)) for _ in (i.a, i.b)]
return sympify(a <= other and other <= b)
i = self._get_interval()
r1, r2, i1, i2 = [Rational(str(j)) for j in (
i.ax, i.bx, i.ay, i.by)]
return sympify((
r1 <= re and re <= r2) and (
i1 <= im and im <= i2))
CRootOf = ComplexRootOf
@public
class RootSum(Expr):
"""Represents a sum of all roots of a univariate polynomial. """
__slots__ = ('poly', 'fun', 'auto')
def __new__(cls, expr, func=None, x=None, auto=True, quadratic=False):
"""Construct a new ``RootSum`` instance of roots of a polynomial."""
coeff, poly = cls._transform(expr, x)
if not poly.is_univariate:
raise MultivariatePolynomialError(
"only univariate polynomials are allowed")
if func is None:
func = Lambda(poly.gen, poly.gen)
else:
is_func = getattr(func, 'is_Function', False)
if is_func and 1 in func.nargs:
if not isinstance(func, Lambda):
func = Lambda(poly.gen, func(poly.gen))
else:
raise ValueError(
"expected a univariate function, got %s" % func)
var, expr = func.variables[0], func.expr
if coeff is not S.One:
expr = expr.subs(var, coeff*var)
deg = poly.degree()
if not expr.has(var):
return deg*expr
if expr.is_Add:
add_const, expr = expr.as_independent(var)
else:
add_const = S.Zero
if expr.is_Mul:
mul_const, expr = expr.as_independent(var)
else:
mul_const = S.One
func = Lambda(var, expr)
rational = cls._is_func_rational(poly, func)
factors, terms = _pure_factors(poly), []
for poly, k in factors:
if poly.is_linear:
term = func(roots_linear(poly)[0])
elif quadratic and poly.is_quadratic:
term = sum(map(func, roots_quadratic(poly)))
else:
if not rational or not auto:
term = cls._new(poly, func, auto)
else:
term = cls._rational_case(poly, func)
terms.append(k*term)
return mul_const*Add(*terms) + deg*add_const
@classmethod
def _new(cls, poly, func, auto=True):
"""Construct new raw ``RootSum`` instance. """
obj = Expr.__new__(cls)
obj.poly = poly
obj.fun = func
obj.auto = auto
return obj
@classmethod
def new(cls, poly, func, auto=True):
"""Construct new ``RootSum`` instance. """
if not func.expr.has(*func.variables):
return func.expr
rational = cls._is_func_rational(poly, func)
if not rational or not auto:
return cls._new(poly, func, auto)
else:
return cls._rational_case(poly, func)
@classmethod
def _transform(cls, expr, x):
"""Transform an expression to a polynomial. """
poly = PurePoly(expr, x, greedy=False)
return preprocess_roots(poly)
@classmethod
def _is_func_rational(cls, poly, func):
"""Check if a lambda is a rational function. """
var, expr = func.variables[0], func.expr
return expr.is_rational_function(var)
@classmethod
def _rational_case(cls, poly, func):
"""Handle the rational function case. """
roots = symbols('r:%d' % poly.degree())
var, expr = func.variables[0], func.expr
f = sum(expr.subs(var, r) for r in roots)
p, q = together(f).as_numer_denom()
domain = QQ[roots]
p = p.expand()
q = q.expand()
try:
p = Poly(p, domain=domain, expand=False)
except GeneratorsNeeded:
p, p_coeff = None, (p,)
else:
p_monom, p_coeff = zip(*p.terms())
try:
q = Poly(q, domain=domain, expand=False)
except GeneratorsNeeded:
q, q_coeff = None, (q,)
else:
q_monom, q_coeff = zip(*q.terms())
coeffs, mapping = symmetrize(p_coeff + q_coeff, formal=True)
formulas, values = viete(poly, roots), []
for (sym, _), (_, val) in zip(mapping, formulas):
values.append((sym, val))
for i, (coeff, _) in enumerate(coeffs):
coeffs[i] = coeff.subs(values)
n = len(p_coeff)
p_coeff = coeffs[:n]
q_coeff = coeffs[n:]
if p is not None:
p = Poly(dict(zip(p_monom, p_coeff)), *p.gens).as_expr()
else:
(p,) = p_coeff
if q is not None:
q = Poly(dict(zip(q_monom, q_coeff)), *q.gens).as_expr()
else:
(q,) = q_coeff
return factor(p/q)
def _hashable_content(self):
return (self.poly, self.fun)
@property
def expr(self):
return self.poly.as_expr()
@property
def args(self):
return (self.expr, self.fun, self.poly.gen)
@property
def free_symbols(self):
return self.poly.free_symbols | self.fun.free_symbols
@property
def is_commutative(self):
return True
def doit(self, **hints):
if not hints.get('roots', True):
return self
_roots = roots(self.poly, multiple=True)
if len(_roots) < self.poly.degree():
return self
else:
return Add(*[self.fun(r) for r in _roots])
def _eval_evalf(self, prec):
try:
_roots = self.poly.nroots(n=prec_to_dps(prec))
except (DomainError, PolynomialError):
return self
else:
return Add(*[self.fun(r) for r in _roots])
def _eval_derivative(self, x):
var, expr = self.fun.args
func = Lambda(var, expr.diff(x))
return self.new(self.poly, func, self.auto)
|
5136457609ff65fa791917855cecf1f0830fb3bb6e18fc676a5e96119a1202c3 | """Advanced tools for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from __future__ import print_function, division
from sympy.polys.densearith import (
dup_add_term, dmp_add_term,
dup_lshift,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dup_div,
dup_rem, dmp_rem,
dmp_expand,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground,
dup_exquo_ground, dmp_exquo_ground,
)
from sympy.polys.densebasic import (
dup_strip, dmp_strip,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_to_dict,
dmp_from_dict,
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC, dmp_TC,
dmp_zero, dmp_ground,
dmp_zero_p,
dup_to_raw_dict, dup_from_raw_dict,
dmp_zeros
)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
DomainError
)
from sympy.utilities import variations
from math import ceil as _ceil, log as _log
def dup_integrate(f, m, K):
"""
Computes the indefinite integral of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_integrate(x**2 + 2*x, 1)
1/3*x**3 + x**2
>>> R.dup_integrate(x**2 + 2*x, 2)
1/12*x**4 + 1/3*x**3
"""
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, K.exquo(c, K(n)))
return g
def dmp_integrate(f, m, u, K):
"""
Computes the indefinite integral of ``f`` in ``x_0`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_integrate(x + 2*y, 1)
1/2*x**2 + 2*x*y
>>> R.dmp_integrate(x + 2*y, 2)
1/6*x**3 + x**2*y
"""
if not u:
return dup_integrate(f, m, K)
if m <= 0 or dmp_zero_p(f, u):
return f
g, v = dmp_zeros(m, u - 1, K), u - 1
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, dmp_quo_ground(c, K(n), v, K))
return g
def _rec_integrate_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_integrate_in`."""
if i == j:
return dmp_integrate(g, m, v, K)
w, i = v - 1, i + 1
return dmp_strip([ _rec_integrate_in(c, m, w, i, j, K) for c in g ], v)
def dmp_integrate_in(f, m, j, u, K):
"""
Computes the indefinite integral of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_integrate_in(x + 2*y, 1, 0)
1/2*x**2 + 2*x*y
>>> R.dmp_integrate_in(x + 2*y, 1, 1)
x*y + y**2
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= u expected, got u = %d, j = %d" % (u, j))
return _rec_integrate_in(f, m, u, 0, j, K)
def dup_diff(f, m, K):
"""
``m``-th order derivative of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_diff(x**3 + 2*x**2 + 3*x + 4, 1)
3*x**2 + 4*x + 3
>>> R.dup_diff(x**3 + 2*x**2 + 3*x + 4, 2)
6*x + 4
"""
if m <= 0:
return f
n = dup_degree(f)
if n < m:
return []
deriv = []
if m == 1:
for coeff in f[:-m]:
deriv.append(K(n)*coeff)
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in range(n - 1, n - m, -1):
k *= i
deriv.append(K(k)*coeff)
n -= 1
return dup_strip(deriv)
def dmp_diff(f, m, u, K):
"""
``m``-th order derivative in ``x_0`` of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff(f, 1)
y**2 + 2*y + 3
>>> R.dmp_diff(f, 2)
0
"""
if not u:
return dup_diff(f, m, K)
if m <= 0:
return f
n = dmp_degree(f, u)
if n < m:
return dmp_zero(u)
deriv, v = [], u - 1
if m == 1:
for coeff in f[:-m]:
deriv.append(dmp_mul_ground(coeff, K(n), v, K))
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in range(n - 1, n - m, -1):
k *= i
deriv.append(dmp_mul_ground(coeff, K(k), v, K))
n -= 1
return dmp_strip(deriv, u)
def _rec_diff_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_in`."""
if i == j:
return dmp_diff(g, m, v, K)
w, i = v - 1, i + 1
return dmp_strip([ _rec_diff_in(c, m, w, i, j, K) for c in g ], v)
def dmp_diff_in(f, m, j, u, K):
"""
``m``-th order derivative in ``x_j`` of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff_in(f, 1, 0)
y**2 + 2*y + 3
>>> R.dmp_diff_in(f, 1, 1)
2*x*y + 2*x + 4*y + 3
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_diff_in(f, m, u, 0, j, K)
def dup_eval(f, a, K):
"""
Evaluate a polynomial at ``x = a`` in ``K[x]`` using Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_eval(x**2 + 2*x + 3, 2)
11
"""
if not a:
return dup_TC(f, K)
result = K.zero
for c in f:
result *= a
result += c
return result
def dmp_eval(f, a, u, K):
"""
Evaluate a polynomial at ``x_0 = a`` in ``K[X]`` using the Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_eval(2*x*y + 3*x + y + 2, 2)
5*y + 8
"""
if not u:
return dup_eval(f, a, K)
if not a:
return dmp_TC(f, K)
result, v = dmp_LC(f, K), u - 1
for coeff in f[1:]:
result = dmp_mul_ground(result, a, v, K)
result = dmp_add(result, coeff, v, K)
return result
def _rec_eval_in(g, a, v, i, j, K):
"""Recursive helper for :func:`dmp_eval_in`."""
if i == j:
return dmp_eval(g, a, v, K)
v, i = v - 1, i + 1
return dmp_strip([ _rec_eval_in(c, a, v, i, j, K) for c in g ], v)
def dmp_eval_in(f, a, j, u, K):
"""
Evaluate a polynomial at ``x_j = a`` in ``K[X]`` using the Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_in(f, 2, 0)
5*y + 8
>>> R.dmp_eval_in(f, 2, 1)
7*x + 4
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_eval_in(f, a, u, 0, j, K)
def _rec_eval_tail(g, i, A, u, K):
"""Recursive helper for :func:`dmp_eval_tail`."""
if i == u:
return dup_eval(g, A[-1], K)
else:
h = [ _rec_eval_tail(c, i + 1, A, u, K) for c in g ]
if i < u - len(A) + 1:
return h
else:
return dup_eval(h, A[-u + i - 1], K)
def dmp_eval_tail(f, A, u, K):
"""
Evaluate a polynomial at ``x_j = a_j, ...`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_tail(f, [2])
7*x + 4
>>> R.dmp_eval_tail(f, [2, 2])
18
"""
if not A:
return f
if dmp_zero_p(f, u):
return dmp_zero(u - len(A))
e = _rec_eval_tail(f, 0, A, u, K)
if u == len(A) - 1:
return e
else:
return dmp_strip(e, u - len(A))
def _rec_diff_eval(g, m, a, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_eval`."""
if i == j:
return dmp_eval(dmp_diff(g, m, v, K), a, v, K)
v, i = v - 1, i + 1
return dmp_strip([ _rec_diff_eval(c, m, a, v, i, j, K) for c in g ], v)
def dmp_diff_eval_in(f, m, a, j, u, K):
"""
Differentiate and evaluate a polynomial in ``x_j`` at ``a`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff_eval_in(f, 1, 2, 0)
y**2 + 2*y + 3
>>> R.dmp_diff_eval_in(f, 1, 2, 1)
6*x + 11
"""
if j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not j:
return dmp_eval(dmp_diff(f, m, u, K), a, u, K)
return _rec_diff_eval(f, m, a, u, 0, j, K)
def dup_trunc(f, p, K):
"""
Reduce a ``K[x]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_trunc(2*x**3 + 3*x**2 + 5*x + 7, ZZ(3))
-x**3 - x + 1
"""
if K.is_ZZ:
g = []
for c in f:
c = c % p
if c > p // 2:
g.append(c - p)
else:
g.append(c)
else:
g = [ c % p for c in f ]
return dup_strip(g)
def dmp_trunc(f, p, u, K):
"""
Reduce a ``K[X]`` polynomial modulo a polynomial ``p`` in ``K[Y]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> g = (y - 1).drop(x)
>>> R.dmp_trunc(f, g)
11*x**2 + 11*x + 5
"""
return dmp_strip([ dmp_rem(c, p, u - 1, K) for c in f ], u)
def dmp_ground_trunc(f, p, u, K):
"""
Reduce a ``K[X]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_trunc(f, ZZ(3))
-x**2 - x*y - y
"""
if not u:
return dup_trunc(f, p, K)
v = u - 1
return dmp_strip([ dmp_ground_trunc(c, p, v, K) for c in f ], u)
def dup_monic(f, K):
"""
Divide all coefficients by ``LC(f)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_monic(3*x**2 + 6*x + 9)
x**2 + 2*x + 3
>>> R, x = ring("x", QQ)
>>> R.dup_monic(3*x**2 + 4*x + 2)
x**2 + 4/3*x + 2/3
"""
if not f:
return f
lc = dup_LC(f, K)
if K.is_one(lc):
return f
else:
return dup_exquo_ground(f, lc, K)
def dmp_ground_monic(f, u, K):
"""
Divide all coefficients by ``LC(f)`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 6*x**2 + 3*x*y + 9*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 2*x**2 + x*y + 3*y + 1
>>> R, x,y = ring("x,y", QQ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 8/3*x**2 + 5/3*x*y + 2*x + 2/3*y + 1
"""
if not u:
return dup_monic(f, K)
if dmp_zero_p(f, u):
return f
lc = dmp_ground_LC(f, u, K)
if K.is_one(lc):
return f
else:
return dmp_exquo_ground(f, lc, u, K)
def dup_content(f, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_content(f)
2
>>> R, x = ring("x", QQ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_content(f)
2
"""
from sympy.polys.domains import QQ
if not f:
return K.zero
cont = K.zero
if K == QQ:
for c in f:
cont = K.gcd(cont, c)
else:
for c in f:
cont = K.gcd(cont, c)
if K.is_one(cont):
break
return cont
def dmp_ground_content(f, u, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_content(f)
2
>>> R, x,y = ring("x,y", QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_content(f)
2
"""
from sympy.polys.domains import QQ
if not u:
return dup_content(f, K)
if dmp_zero_p(f, u):
return K.zero
cont, v = K.zero, u - 1
if K == QQ:
for c in f:
cont = K.gcd(cont, dmp_ground_content(c, v, K))
else:
for c in f:
cont = K.gcd(cont, dmp_ground_content(c, v, K))
if K.is_one(cont):
break
return cont
def dup_primitive(f, K):
"""
Compute content and the primitive form of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_primitive(f)
(2, 3*x**2 + 4*x + 6)
>>> R, x = ring("x", QQ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_primitive(f)
(2, 3*x**2 + 4*x + 6)
"""
if not f:
return K.zero, f
cont = dup_content(f, K)
if K.is_one(cont):
return cont, f
else:
return cont, dup_quo_ground(f, cont, K)
def dmp_ground_primitive(f, u, K):
"""
Compute content and the primitive form of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_primitive(f)
(2, x*y + 3*x + 2*y + 6)
>>> R, x,y = ring("x,y", QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_primitive(f)
(2, x*y + 3*x + 2*y + 6)
"""
if not u:
return dup_primitive(f, K)
if dmp_zero_p(f, u):
return K.zero, f
cont = dmp_ground_content(f, u, K)
if K.is_one(cont):
return cont, f
else:
return cont, dmp_quo_ground(f, cont, u, K)
def dup_extract(f, g, K):
"""
Extract common content from a pair of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_extract(6*x**2 + 12*x + 18, 4*x**2 + 8*x + 12)
(2, 3*x**2 + 6*x + 9, 2*x**2 + 4*x + 6)
"""
fc = dup_content(f, K)
gc = dup_content(g, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dup_quo_ground(f, gcd, K)
g = dup_quo_ground(g, gcd, K)
return gcd, f, g
def dmp_ground_extract(f, g, u, K):
"""
Extract common content from a pair of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_ground_extract(6*x*y + 12*x + 18, 4*x*y + 8*x + 12)
(2, 3*x*y + 6*x + 9, 2*x*y + 4*x + 6)
"""
fc = dmp_ground_content(f, u, K)
gc = dmp_ground_content(g, u, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dmp_quo_ground(f, gcd, u, K)
g = dmp_quo_ground(g, gcd, u, K)
return gcd, f, g
def dup_real_imag(f, K):
"""
Return bivariate polynomials ``f1`` and ``f2``, such that ``f = f1 + f2*I``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dup_real_imag(x**3 + x**2 + x + 1)
(x**3 + x**2 - 3*x*y**2 + x - y**2 + 1, 3*x**2*y + 2*x*y - y**3 + y)
"""
if not K.is_ZZ and not K.is_QQ:
raise DomainError("computing real and imaginary parts is not supported over %s" % K)
f1 = dmp_zero(1)
f2 = dmp_zero(1)
if not f:
return f1, f2
g = [[[K.one, K.zero]], [[K.one], []]]
h = dmp_ground(f[0], 2)
for c in f[1:]:
h = dmp_mul(h, g, 2, K)
h = dmp_add_term(h, dmp_ground(c, 1), 0, 2, K)
H = dup_to_raw_dict(h)
for k, h in H.items():
m = k % 4
if not m:
f1 = dmp_add(f1, h, 1, K)
elif m == 1:
f2 = dmp_add(f2, h, 1, K)
elif m == 2:
f1 = dmp_sub(f1, h, 1, K)
else:
f2 = dmp_sub(f2, h, 1, K)
return f1, f2
def dup_mirror(f, K):
"""
Evaluate efficiently the composition ``f(-x)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mirror(x**3 + 2*x**2 - 4*x + 2)
-x**3 + 2*x**2 + 4*x + 2
"""
f = list(f)
for i in range(len(f) - 2, -1, -2):
f[i] = -f[i]
return f
def dup_scale(f, a, K):
"""
Evaluate efficiently composition ``f(a*x)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_scale(x**2 - 2*x + 1, ZZ(2))
4*x**2 - 4*x + 1
"""
f, n, b = list(f), len(f) - 1, a
for i in range(n - 1, -1, -1):
f[i], b = b*f[i], b*a
return f
def dup_shift(f, a, K):
"""
Evaluate efficiently Taylor shift ``f(x + a)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_shift(x**2 - 2*x + 1, ZZ(2))
x**2 + 2*x + 1
"""
f, n = list(f), len(f) - 1
for i in range(n, 0, -1):
for j in range(0, i):
f[j + 1] += a*f[j]
return f
def dup_transform(f, p, q, K):
"""
Evaluate functional transformation ``q**n * f(p/q)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_transform(x**2 - 2*x + 1, x**2 + 1, x - 1)
x**4 - 2*x**3 + 5*x**2 - 4*x + 4
"""
if not f:
return []
n = len(f) - 1
h, Q = [f[0]], [[K.one]]
for i in range(0, n):
Q.append(dup_mul(Q[-1], q, K))
for c, q in zip(f[1:], Q[1:]):
h = dup_mul(h, p, K)
q = dup_mul_ground(q, c, K)
h = dup_add(h, q, K)
return h
def dup_compose(f, g, K):
"""
Evaluate functional composition ``f(g)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_compose(x**2 + x, x - 1)
x**2 - x
"""
if len(g) <= 1:
return dup_strip([dup_eval(f, dup_LC(g, K), K)])
if not f:
return []
h = [f[0]]
for c in f[1:]:
h = dup_mul(h, g, K)
h = dup_add_term(h, c, 0, K)
return h
def dmp_compose(f, g, u, K):
"""
Evaluate functional composition ``f(g)`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_compose(x*y + 2*x + y, y)
y**2 + 3*y
"""
if not u:
return dup_compose(f, g, K)
if dmp_zero_p(f, u):
return f
h = [f[0]]
for c in f[1:]:
h = dmp_mul(h, g, u, K)
h = dmp_add_term(h, c, 0, u, K)
return h
def _dup_right_decompose(f, s, K):
"""Helper function for :func:`_dup_decompose`."""
n = len(f) - 1
lc = dup_LC(f, K)
f = dup_to_raw_dict(f)
g = { s: K.one }
r = n // s
for i in range(1, s):
coeff = K.zero
for j in range(0, i):
if not n + j - i in f:
continue
if not s - j in g:
continue
fc, gc = f[n + j - i], g[s - j]
coeff += (i - r*j)*fc*gc
g[s - i] = K.quo(coeff, i*r*lc)
return dup_from_raw_dict(g, K)
def _dup_left_decompose(f, h, K):
"""Helper function for :func:`_dup_decompose`."""
g, i = {}, 0
while f:
q, r = dup_div(f, h, K)
if dup_degree(r) > 0:
return None
else:
g[i] = dup_LC(r, K)
f, i = q, i + 1
return dup_from_raw_dict(g, K)
def _dup_decompose(f, K):
"""Helper function for :func:`dup_decompose`."""
df = len(f) - 1
for s in range(2, df):
if df % s != 0:
continue
h = _dup_right_decompose(f, s, K)
if h is not None:
g = _dup_left_decompose(f, h, K)
if g is not None:
return g, h
return None
def dup_decompose(f, K):
"""
Computes functional decomposition of ``f`` in ``K[x]``.
Given a univariate polynomial ``f`` with coefficients in a field of
characteristic zero, returns list ``[f_1, f_2, ..., f_n]``, where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
and ``f_2, ..., f_n`` are monic and homogeneous polynomials of at
least second degree.
Unlike factorization, complete functional decompositions of
polynomials are not unique, consider examples:
1. ``f o g = f(x + b) o (g - b)``
2. ``x**n o x**m = x**m o x**n``
3. ``T_n o T_m = T_m o T_n``
where ``T_n`` and ``T_m`` are Chebyshev polynomials.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_decompose(x**4 - 2*x**3 + x**2)
[x**2, x**2 - x]
References
==========
.. [1] [Kozen89]_
"""
F = []
while True:
result = _dup_decompose(f, K)
if result is not None:
f, h = result
F = [h] + F
else:
break
return [f] + F
def dmp_lift(f, u, K):
"""
Convert algebraic coefficients to integers in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> from sympy import I
>>> K = QQ.algebraic_field(I)
>>> R, x = ring("x", K)
>>> f = x**2 + K([QQ(1), QQ(0)])*x + K([QQ(2), QQ(0)])
>>> R.dmp_lift(f)
x**8 + 2*x**6 + 9*x**4 - 8*x**2 + 16
"""
if not K.is_Algebraic:
raise DomainError(
'computation can be done only in an algebraic domain')
F, monoms, polys = dmp_to_dict(f, u), [], []
for monom, coeff in F.items():
if not coeff.is_ground:
monoms.append(monom)
perms = variations([-1, 1], len(monoms), repetition=True)
for perm in perms:
G = dict(F)
for sign, monom in zip(perm, monoms):
if sign == -1:
G[monom] = -G[monom]
polys.append(dmp_from_dict(G, u, K))
return dmp_convert(dmp_expand(polys, u, K), u, K, K.dom)
def dup_sign_variations(f, K):
"""
Compute the number of sign variations of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sign_variations(x**4 - x**2 - x + 1)
2
"""
prev, k = K.zero, 0
for coeff in f:
if K.is_negative(coeff*prev):
k += 1
if coeff:
prev = coeff
return k
def dup_clear_denoms(f, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = QQ(1,2)*x + QQ(1,3)
>>> R.dup_clear_denoms(f, convert=False)
(6, 3*x + 2)
>>> R.dup_clear_denoms(f, convert=True)
(6, 3*x + 2)
"""
if K1 is None:
if K0.has_assoc_Ring:
K1 = K0.get_ring()
else:
K1 = K0
common = K1.one
for c in f:
common = K1.lcm(common, K0.denom(c))
if not K1.is_one(common):
f = dup_mul_ground(f, common, K0)
if not convert:
return common, f
else:
return common, dup_convert(f, K0, K1)
def _rec_clear_denoms(g, v, K0, K1):
"""Recursive helper for :func:`dmp_clear_denoms`."""
common = K1.one
if not v:
for c in g:
common = K1.lcm(common, K0.denom(c))
else:
w = v - 1
for c in g:
common = K1.lcm(common, _rec_clear_denoms(c, w, K0, K1))
return common
def dmp_clear_denoms(f, u, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> f = QQ(1,2)*x + QQ(1,3)*y + 1
>>> R.dmp_clear_denoms(f, convert=False)
(6, 3*x + 2*y + 6)
>>> R.dmp_clear_denoms(f, convert=True)
(6, 3*x + 2*y + 6)
"""
if not u:
return dup_clear_denoms(f, K0, K1, convert=convert)
if K1 is None:
if K0.has_assoc_Ring:
K1 = K0.get_ring()
else:
K1 = K0
common = _rec_clear_denoms(f, u, K0, K1)
if not K1.is_one(common):
f = dmp_mul_ground(f, common, u, K0)
if not convert:
return common, f
else:
return common, dmp_convert(f, u, K0, K1)
def dup_revert(f, n, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
This function computes first ``2**n`` terms of a polynomial that
is a result of inversion of a polynomial modulo ``x**n``. This is
useful to efficiently compute series expansion of ``1/f``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = -QQ(1,720)*x**6 + QQ(1,24)*x**4 - QQ(1,2)*x**2 + 1
>>> R.dup_revert(f, 8)
61/720*x**6 + 5/24*x**4 + 1/2*x**2 + 1
"""
g = [K.revert(dup_TC(f, K))]
h = [K.one, K.zero, K.zero]
N = int(_ceil(_log(n, 2)))
for i in range(1, N + 1):
a = dup_mul_ground(g, K(2), K)
b = dup_mul(f, dup_sqr(g, K), K)
g = dup_rem(dup_sub(a, b, K), h, K)
h = dup_lshift(h, dup_degree(h), K)
return g
def dmp_revert(f, g, u, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
"""
if not u:
return dup_revert(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
|
ee82a3c2b4af445260dcc58b94861294dd1fdc927cc6934639310a66ff6ae080 | """User-friendly public interface to polynomial functions. """
from __future__ import print_function, division
from functools import wraps
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple
)
from sympy.core.basic import preorder_traversal
from sympy.core.compatibility import iterable, ordered
from sympy.core.decorators import _sympifyit
from sympy.core.function import Derivative
from sympy.core.mul import _keep_coeff
from sympy.core.relational import Relational
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify, _sympify
from sympy.logic.boolalg import BooleanAtom
from sympy.polys import polyoptions as options
from sympy.polys.constructor import construct_domain
from sympy.polys.domains import FF, QQ, ZZ
from sympy.polys.fglmtools import matrix_fglm
from sympy.polys.groebnertools import groebner as _groebner
from sympy.polys.monomials import Monomial
from sympy.polys.orderings import monomial_key
from sympy.polys.polyclasses import DMP
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import together
from sympy.polys.rootisolation import dup_isolate_real_roots_list
from sympy.utilities import group, sift, public, filldedent
from sympy.utilities.exceptions import SymPyDeprecationWarning
# Required to avoid errors
import sympy.polys
import mpmath
from mpmath.libmp.libhyper import NoConvergence
def _polifyit(func):
@wraps(func)
def wrapper(f, g):
g = _sympify(g)
if isinstance(g, Poly):
return func(f, g)
elif isinstance(g, Expr):
try:
g = f.from_expr(g, *f.gens)
except PolynomialError:
if g.is_Matrix:
return NotImplemented
expr_method = getattr(f.as_expr(), func.__name__)
result = expr_method(g)
if result is not NotImplemented:
SymPyDeprecationWarning(
feature="Mixing Poly with non-polynomial expressions in binary operations",
issue=18613,
deprecated_since_version="1.6",
useinstead="the as_expr or as_poly method to convert types").warn()
return result
else:
return func(f, g)
else:
return NotImplemented
return wrapper
@public
class Poly(Basic):
"""
Generic class for representing and operating on polynomial expressions.
Poly is a subclass of Basic rather than Expr but instances can be
converted to Expr with the ``as_expr`` method.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
Create a univariate polynomial:
>>> Poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
Create a univariate polynomial with specific domain:
>>> from sympy import sqrt
>>> Poly(x**2 + 2*x + sqrt(3), domain='R')
Poly(1.0*x**2 + 2.0*x + 1.73205080756888, x, domain='RR')
Create a multivariate polynomial:
>>> Poly(y*x**2 + x*y + 1)
Poly(x**2*y + x*y + 1, x, y, domain='ZZ')
Create a univariate polynomial, where y is a constant:
>>> Poly(y*x**2 + x*y + 1,x)
Poly(y*x**2 + y*x + 1, x, domain='ZZ[y]')
You can evaluate the above polynomial as a function of y:
>>> Poly(y*x**2 + x*y + 1,x).eval(2)
6*y + 1
See Also
========
sympy.core.expr.Expr
"""
__slots__ = ('rep',)
is_commutative = True
is_Poly = True
_op_priority = 10.001
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if iterable(rep, exclude=str):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError(
"invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens) - 1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
expr = basic_from_dict(rep.to_sympy_dict(), *gens)
obj = Basic.__new__(cls, expr, *gens)
obj.rep = rep
return obj
@property
def expr(self):
return self.args[0]
@property
def gens(self):
return self.args[1:]
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'dict' without generators")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.items():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError(
"'list' representation not supported")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = list(map(domain.convert, rep))
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 1).free_symbols
{x}
>>> Poly(x**2 + y).free_symbols
{x, y}
>>> Poly(x**2 + y, x).free_symbols
{x, y}
>>> Poly(x**2 + y, x, z).free_symbols
{x, y}
"""
symbols = set()
gens = self.gens
for i in range(len(gens)):
for monom in self.monoms():
if monom[i]:
symbols |= gens[i].free_symbols
break
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
{y}
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.symbols:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def gen(self):
"""
Return the principal generator.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(self):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens) - 1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(
f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [dom.convert(c, f.rep.dom) for c in f_coeffs]
F = DMP(dict(list(zip(f_monoms, f_coeffs))), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(
g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [dom.convert(c, g.rep.dom) for c in g_coeffs]
G = DMP(dict(list(zip(g_monoms, g_coeffs))), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if domain.is_FiniteField:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None, *_ignore):
# XXX this does not match Basic's signature
"""
Replace ``x`` with ``y`` in generators list.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError(
"syntax supported only in univariate case")
if x == y or x not in f.gens:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.symbols:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def match(f, *args, **kwargs):
"""Match expression from Poly. See Basic.match()"""
return f.as_expr().match(*args, **kwargs)
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError(
"generators list can differ only up to order of elements")
rep = dict(list(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens))))
return f.per(DMP(rep, f.rep.dom, len(gens) - 1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from ``f`` that are to the left of
specified ``gen`` in the generators as ordered. When ``gen``
is an integer, it refers to the generator located at that
position within the tuple of generators of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
>>> Poly(z, x, y, z).ltrim(-1)
Poly(z, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.items():
if any(monom[:j]):
# some generator is used in the portion to be trimmed
raise PolynomialError("can't left trim %s" % f)
terms[monom[j:]] = coeff
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens) - 1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
indices = set()
for gen in gens:
try:
index = f.gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
Examples
========
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
Examples
========
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True),
field=field, composite=f.domain.is_Composite or None)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
See Also
========
all_coeffs
coeff_monomial
nth
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order)]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
See Also
========
all_monoms
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
See Also
========
all_terms
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order)]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs()]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
See Also
========
all_terms
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms()]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func(k, coeff):
... k = k[0]
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError(
"%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a Poly instance to an Expr instance.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
return f.expr
if len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.items():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def as_poly(self, *gens, **args):
"""Converts ``self`` to a polynomial or returns ``None``.
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> print((x**2 + x*y).as_poly())
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + x*y).as_poly(x, y))
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + sin(y)).as_poly(x, y))
None
"""
try:
poly = Poly(self, *gens, **args)
if not poly.is_Poly:
return None
else:
return poly
except PolynomialError:
return None
def lift(f):
"""
Convert algebraic coefficients to rationals.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.symbols + f.gens
else:
gens = f.gens + dom.symbols
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
k = len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:-k], False
else:
raise NotImplementedError(
"can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
Caveat: The function prem(f, g, x) can be safely used to compute
in Z[x] _only_ subresultant polynomial remainder sequences (prs's).
To safely compute Euclidean and Sturmian prs's in Z[x]
employ anyone of the corresponding functions found in
the module sympy.polys.subresultants_qq_zz. The functions
in the module with suffix _pg compute prs's in Z[x] employing
rem(f, g, x), whereas the functions with suffix _amv
compute prs's in Z[x] employing rem_z(f, g, x).
The function rem_z(f, g, x) differs from prem(f, g, x) in that
to compute the remainder polynomials in Z[x] it premultiplies
the divident times the absolute value of the leading coefficient
of the divisor raised to the power degree(f, x) - degree(g, x) + 1.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
See the Caveat note in the function prem(f, g).
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.is_Ring and not dom.is_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.is_Ring and not dom.is_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.is_Ring and not dom.is_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.is_Ring and not dom.is_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" %
(length, length, gen))
else:
try:
return f.gens.index(sympify(gen))
except ValueError:
raise PolynomialError(
"a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
>>> Poly(0, x).degree()
-oo
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
2
>>> Poly(x + y**5, x, y).total_degree()
5
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def homogenize(f, s):
"""
Returns the homogeneous polynomial of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you only
want to check if a polynomial is homogeneous, then use
:func:`Poly.is_homogeneous`. If you want not only to check if a
polynomial is homogeneous but also compute its homogeneous order,
then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(x**5 + 2*x**2*y**2 + 9*x*y**3)
>>> f.homogenize(z)
Poly(x**5 + 2*x**2*y**2*z + 9*x*y**3*z, x, y, z, domain='ZZ')
"""
if not isinstance(s, Symbol):
raise TypeError("``Symbol`` expected, got %s" % type(s))
if s in f.gens:
i = f.gens.index(s)
gens = f.gens
else:
i = len(f.gens)
gens = f.gens + (s,)
if hasattr(f.rep, 'homogenize'):
return f.per(f.rep.homogenize(i), gens=gens)
raise OperationNotSupported(f, 'homogeneous_order')
def homogeneous_order(f):
"""
Returns the homogeneous order of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. This degree is
the homogeneous order of ``f``. If you only want to check if a
polynomial is homogeneous, then use :func:`Poly.is_homogeneous`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4)
>>> f.homogeneous_order()
5
"""
if hasattr(f.rep, 'homogeneous_order'):
return f.rep.homogeneous_order()
else: # pragma: no cover
raise OperationNotSupported(f, 'homogeneous_order')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def coeff_monomial(f, monom):
"""
Returns the coefficient of ``monom`` in ``f`` if there, else None.
Examples
========
>>> from sympy import Poly, exp
>>> from sympy.abc import x, y
>>> p = Poly(24*x*y*exp(8) + 23*x, x, y)
>>> p.coeff_monomial(x)
23
>>> p.coeff_monomial(y)
0
>>> p.coeff_monomial(x*y)
24*exp(8)
Note that ``Expr.coeff()`` behaves differently, collecting terms
if possible; the Poly must be converted to an Expr to use that
method, however:
>>> p.as_expr().coeff(x)
24*y*exp(8) + 23
>>> p.as_expr().coeff(y)
24*x*exp(8)
>>> p.as_expr().coeff(x*y)
24*exp(8)
See Also
========
nth: more efficient query using exponents of the monomial's generators
"""
return f.nth(*Monomial(monom, f.gens).exponents)
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f`` where ``N`` are the
exponents of the generators in the term of interest.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
>>> Poly(4*sqrt(x)*y)
Poly(4*y*(sqrt(x)), y, sqrt(x), domain='ZZ')
>>> _.nth(1, 1)
4
See Also
========
coeff_monomial
"""
if hasattr(f.rep, 'nth'):
if len(N) != len(f.gens):
raise ValueError('exponent of each generator must be specified')
result = f.rep.nth(*list(map(int, N)))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def coeff(f, x, n=1, right=False):
# the semantics of coeff_monomial and Expr.coeff are different;
# if someone is working with a Poly, they should be aware of the
# differences and chose the method best suited for the query.
# Alternatively, a pure-polys method could be written here but
# at this time the ``right`` keyword would be ignored because Poly
# doesn't work with non-commutatives.
raise NotImplementedError(
'Either convert to Expr with `as_expr` method '
'to use Expr\'s coeff method or else use the '
'`coeff_monomial` method of Polys.')
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
The Leading monomial signifies the monomial having
the highest power of the principal generator in the
expression f.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
x**2*y**0
"""
return Monomial(f.monoms(order)[0], f.gens)
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
x**0*y**1
"""
return Monomial(f.monoms(order)[-1], f.gens)
def LT(f, order=None):
"""
Returns the leading term of ``f``.
The Leading term signifies the term having
the highest power of the principal generator in the
expression f along with its coefficient.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
(x**2*y**0, 4)
"""
monom, coeff = f.terms(order)[0]
return Monomial(monom, f.gens), coeff
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
(x**0*y**1, 3)
"""
monom, coeff = f.terms(order)[-1]
return Monomial(monom, f.gens), coeff
def max_norm(f):
"""
Returns maximum norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(self, convert=False):
"""
Clear denominators, but keep the ground domain.
Examples
========
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
f = self
if not f.rep.dom.is_Field:
return S.One, f
dom = f.get_domain()
if dom.has_assoc_Ring:
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert or not dom.has_assoc_Ring:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(self, g):
"""
Clear denominators in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
f = self
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.is_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(self, *specs, **args):
"""
Computes indefinite integral of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
f = self
if args.get('auto', True) and f.rep.dom.is_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs, **kwargs):
"""
Computes partial derivative of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if not kwargs.get('evaluate', True):
return Derivative(f, *specs, **kwargs)
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
_eval_derivative = diff
def eval(self, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
>>> f.eval((2, 5))
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
"""
f = self
if a is None:
if isinstance(x, dict):
mapping = x
for gen, value in mapping.items():
f = f.eval(gen, value)
return f
elif isinstance(x, (tuple, list)):
values = x
if len(values) > len(f.gens):
raise ValueError("too many values provided")
for gen, value in zip(f.gens, values):
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
a_domain, [a] = construct_domain([a])
new_domain = f.get_domain().unify_with_symbols(a_domain, f.gens)
f = f.set_domain(new_domain)
a = new_domain.convert(a, a_domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def __call__(f, *values):
"""
Evaluate ``f`` at the give values.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f(2)
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5, 7)
45
"""
return f.eval(values)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.is_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.is_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.is_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""
Compute ``f**(-1)`` mod ``x**n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(1, x).revert(2)
Poly(1, x, domain='ZZ')
>>> Poly(1 + x, x).revert(1)
Poly(1, x, domain='ZZ')
>>> Poly(x**2 - 1, x).revert(1)
Traceback (most recent call last):
...
NotReversible: only unity is reversible in a ring
>>> Poly(1/x, x).revert(1)
Traceback (most recent call last):
...
PolynomialError: 1/x contains an element of the generators set
"""
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return list(map(per, result))
def resultant(f, g, includePRS=False):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
If includePRS=True, it includes the subresultant PRS in the result.
Because the PRS is used to calculate the resultant, this is more
efficient than calling :func:`subresultants` separately.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x)
>>> f.resultant(Poly(x**2 - 1, x))
4
>>> f.resultant(Poly(x**2 - 1, x), includePRS=True)
(4, [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')])
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
if includePRS:
return (per(result, remove=0), list(map(per, R)))
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def dispersionset(f, g=None):
r"""Compute the *dispersion set* of two polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as:
.. math::
\operatorname{J}(f, g)
& := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\
& = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\}
For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersion
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersionset
return dispersionset(f, g)
def dispersion(f, g=None):
r"""Compute the *dispersion* of polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion `\operatorname{dis}(f, g)` is defined as:
.. math::
\operatorname{dis}(f, g)
& := \max\{ J(f,g) \cup \{0\} \} \\
& = \max\{ \{a \in \mathbb{N} | \gcd(f(x), g(x+a)) \neq 1\} \cup \{0\} \}
and for a single polynomial `\operatorname{dis}(f) := \operatorname{dis}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersionset
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersion
return dispersion(f, g)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(self, auto=True):
"""
Divides all coefficients by ``LC(f)``.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
f = self
if auto and f.rep.dom.is_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return list(map(f.per, result))
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def transform(f, p, q):
"""
Efficiently evaluate the functional transformation ``q**n * f(p/q)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).transform(Poly(x + 1, x), Poly(x - 1, x))
Poly(4, x, domain='ZZ')
"""
P, Q = p.unify(q)
F, P = f.unify(P)
F, Q = F.unify(Q)
if hasattr(F.rep, 'transform'):
result = F.rep.transform(P.rep, Q.rep)
else: # pragma: no cover
raise OperationNotSupported(F, 'transform')
return F.per(result)
def sturm(self, auto=True):
"""
Computes the Sturm sequence of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
f = self
if auto and f.rep.dom.is_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return list(map(f.per, result))
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [(f.per(g), k) for g, k in result]
def norm(f):
"""
Computes the product, ``Norm(f)``, of the conjugates of
a polynomial ``f`` defined over a number field ``K``.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> a, b = sqrt(2), sqrt(3)
A polynomial over a quadratic extension.
Two conjugates x - a and x + a.
>>> f = Poly(x - a, x, extension=a)
>>> f.norm()
Poly(x**2 - 2, x, domain='QQ')
A polynomial over a quartic extension.
Four conjugates x - a, x - a, x + a and x + a.
>>> f = Poly(x - a, x, extension=(a, b))
>>> f.norm()
Poly(x**4 - 4*x**2 + 4, x, domain='QQ')
"""
if hasattr(f.rep, 'norm'):
r = f.rep.norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'norm')
return f.per(r)
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [(f.per(g), k) for g, k in factors]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [(f.per(g), k) for g, k in factors]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
For real roots the Vincent-Akritas-Strzebonski (VAS) continued fractions method is used.
References
==========
.. [#] Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
.. [#] Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(
all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return list(map(_real, result))
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return list(map(_real, result))
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = list(map(QQ.convert, (re, im))), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = list(map(QQ.convert, (re, im))), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
CRootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.rootof(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[CRootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.CRootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[CRootOf(x**3 + x + 1, 0),
CRootOf(x**3 + x + 1, 1),
CRootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.CRootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Parameters
==========
n ... the number of digits to calculate
maxsteps ... the maximum number of iterations to do
If the accuracy `n` cannot be reached in `maxsteps`, it will raise an
exception. You need to rerun with higher maxsteps.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots(n=15)
[-1.73205080756888, 1.73205080756888]
>>> Poly(x**2 - 3).nroots(n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
from sympy.functions.elementary.complexes import sign
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
# For integer and rational coefficients, convert them to integers only
# (for accuracy). Otherwise just try to convert the coefficients to
# mpmath.mpc and raise an exception if the conversion fails.
if f.rep.dom is ZZ:
coeffs = [int(coeff) for coeff in f.all_coeffs()]
elif f.rep.dom is QQ:
denoms = [coeff.q for coeff in f.all_coeffs()]
from sympy.core.numbers import ilcm
fac = ilcm(*denoms)
coeffs = [int(coeff*fac) for coeff in f.all_coeffs()]
else:
coeffs = [coeff.evalf(n=n).as_real_imag()
for coeff in f.all_coeffs()]
try:
coeffs = [mpmath.mpc(*coeff) for coeff in coeffs]
except TypeError:
raise DomainError("Numerical domain expected, got %s" % \
f.rep.dom)
dps = mpmath.mp.dps
mpmath.mp.dps = n
try:
# We need to add extra precision to guard against losing accuracy.
# 10 times the degree of the polynomial seems to work well.
roots = mpmath.polyroots(coeffs, maxsteps=maxsteps,
cleanup=cleanup, error=False, extraprec=f.degree()*10)
# Mpmath puts real roots first, then complex ones (as does all_roots)
# so we make sure this convention holds here, too.
roots = list(map(sympify,
sorted(roots, key=lambda r: (1 if r.imag else 0, r.real, abs(r.imag), sign(r.imag)))))
except NoConvergence:
raise NoConvergence(
'convergence to root failed; try n < %s or maxsteps > %s' % (
n, maxsteps))
finally:
mpmath.mp.dps = dps
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` is a homogeneous polynomial.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you want not
only to check if a polynomial is homogeneous but also compute its
homogeneous order, then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y, x, y).is_homogeneous
True
>>> Poly(x**3 + x*y, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_polifyit
def __add__(f, g):
return f.add(g)
@_polifyit
def __radd__(f, g):
return g.add(f)
@_polifyit
def __sub__(f, g):
return f.sub(g)
@_polifyit
def __rsub__(f, g):
return g.sub(f)
@_polifyit
def __mul__(f, g):
return f.mul(g)
@_polifyit
def __rmul__(f, g):
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return NotImplemented
@_polifyit
def __divmod__(f, g):
return f.div(g)
@_polifyit
def __rdivmod__(f, g):
return g.div(f)
@_polifyit
def __mod__(f, g):
return f.rem(g)
@_polifyit
def __rmod__(f, g):
return g.rem(f)
@_polifyit
def __floordiv__(f, g):
return f.quo(g)
@_polifyit
def __rfloordiv__(f, g):
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
return False
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f == g
def __nonzero__(f):
return not f.is_zero
__bool__ = __nonzero__
def eq(f, g, strict=False):
if not strict:
return f == g
else:
return f._strict_eq(sympify(g))
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True)
@public
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
{y}
"""
return self.free_symbols_in_domain
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
@public
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt.gens = poly.gens
opt.domain = poly.domain
if opt.polys is None:
opt.polys = True
return poly, opt
elif opt.expand:
expr = expr.expand()
rep, opt = _dict_from_expr(expr, opt)
if not opt.gens:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = list(zip(*list(rep.items())))
domain = opt.domain
if domain is None:
opt.domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = list(map(domain.from_sympy, coeffs))
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
if opt.polys is None:
opt.polys = False
return poly, opt
@public
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
from sympy.functions.elementary.piecewise import Piecewise
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt.gens = f.gens
opt.domain = f.domain
if opt.polys is None:
opt.polys = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
reps, opt = _parallel_dict_from_expr(exprs, opt)
if not opt.gens:
raise PolificationFailed(opt, origs, exprs, True)
for k in opt.gens:
if isinstance(k, Piecewise):
raise PolynomialError("Piecewise generators do not make sense")
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = list(zip(*list(rep.items())))
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
opt.domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = list(map(domain.from_sympy, coeffs_list))
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys = []
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
polys.append(poly)
if opt.polys is None:
opt.polys = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
@public
def degree(f, gen=0):
"""
Return the degree of ``f`` in the given variable.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
>>> degree(0, x)
-oo
See also
========
sympy.polys.polytools.Poly.total_degree
degree_list
"""
f = sympify(f, strict=True)
gen_is_Num = sympify(gen, strict=True).is_Number
if f.is_Poly:
p = f
isNum = p.as_expr().is_Number
else:
isNum = f.is_Number
if not isNum:
if gen_is_Num:
p, _ = poly_from_expr(f)
else:
p, _ = poly_from_expr(f, gen)
if isNum:
return S.Zero if f else S.NegativeInfinity
if not gen_is_Num:
if f.is_Poly and gen not in p.gens:
# try recast without explicit gens
p, _ = poly_from_expr(f.as_expr())
if gen not in p.gens:
return S.Zero
elif not f.is_Poly and len(f.free_symbols) > 1:
raise TypeError(filldedent('''
A symbolic generator of interest is required for a multivariate
expression like func = %s, e.g. degree(func, gen = %s) instead of
degree(func, gen = %s).
''' % (f, next(ordered(f.free_symbols)), gen)))
return Integer(p.degree(gen))
@public
def total_degree(f, *gens):
"""
Return the total_degree of ``f`` in the given variables.
Examples
========
>>> from sympy import total_degree, Poly
>>> from sympy.abc import x, y, z
>>> total_degree(1)
0
>>> total_degree(x + x*y)
2
>>> total_degree(x + x*y, x)
1
If the expression is a Poly and no variables are given
then the generators of the Poly will be used:
>>> p = Poly(x + x*y, y)
>>> total_degree(p)
1
To deal with the underlying expression of the Poly, convert
it to an Expr:
>>> total_degree(p.as_expr())
2
This is done automatically if any variables are given:
>>> total_degree(p, x)
1
See also
========
degree
"""
p = sympify(f)
if p.is_Poly:
p = p.as_expr()
if p.is_Number:
rv = 0
else:
if f.is_Poly:
gens = gens or f.gens
rv = Poly(p, gens).total_degree()
return Integer(rv)
@public
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
@public
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
@public
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
@public
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
@public
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pquo', 2, exc)
try:
q = F.pquo(G)
except ExactQuotientFailed:
raise ExactQuotientFailed(f, g)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(3/5 - x/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
@public
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(3/5 - x/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
@public
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert, S
>>> from sympy.core.numbers import mod_inverse
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
For more efficient inversion of Rationals,
use the :obj:`~.mod_inverse` function:
>>> mod_inverse(3, 5)
2
>>> (S(2)/5).invert(S(7)/3)
5/2
See Also
========
sympy.core.numbers.mod_inverse
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
@public
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
includePRS = args.pop('includePRS', False)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('resultant', 2, exc)
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
if not opt.polys:
if includePRS:
return result.as_expr(), [r.as_expr() for r in R]
return result.as_expr()
else:
if includePRS:
return result, R
return result
@public
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
@public
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
def try_non_polynomial_gcd(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
return None
result = try_non_polynomial_gcd(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
# gcd for domain Q[irrational] (purely algebraic irrational)
if len(seq) > 1 and all(elt.is_algebraic and elt.is_irrational for elt in seq):
a = seq[-1]
lst = [ (a/elt).ratsimp() for elt in seq[:-1] ]
if all(frc.is_rational for frc in lst):
lc = 1
for frc in lst:
lc = lcm(lc, frc.as_numer_denom()[0])
return a/lc
except PolificationFailed as exc:
result = try_non_polynomial_gcd(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
@public
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
# gcd for domain Q[irrational] (purely algebraic irrational)
a, b = map(sympify, (f, g))
if a.is_algebraic and a.is_irrational and b.is_algebraic and b.is_irrational:
frc = (a/b).ratsimp()
if frc.is_rational:
return a/frc.as_numer_denom()[0]
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
def try_non_polynomial_lcm(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.one
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
return None
result = try_non_polynomial_lcm(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
# lcm for domain Q[irrational] (purely algebraic irrational)
if len(seq) > 1 and all(elt.is_algebraic and elt.is_irrational for elt in seq):
a = seq[-1]
lst = [ (a/elt).ratsimp() for elt in seq[:-1] ]
if all(frc.is_rational for frc in lst):
lc = 1
for frc in lst:
lc = lcm(lc, frc.as_numer_denom()[1])
return a*lc
except PolificationFailed as exc:
result = try_non_polynomial_lcm(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
# lcm for domain Q[irrational] (purely algebraic irrational)
a, b = map(sympify, (f, g))
if a.is_algebraic and a.is_irrational and b.is_algebraic and b.is_irrational:
frc = (a/b).ratsimp()
if frc.is_rational:
return a*frc.as_numer_denom()[1]
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
>>> terms_gcd(x*y/2 + y**2, clear=False)
y*(x/2 + y)
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
from sympy.core.relational import Equality
orig = sympify(f)
if isinstance(f, Equality):
return Equality(*(terms_gcd(s, *gens, **args) for s in [f.lhs, f.rhs]))
elif isinstance(f, Relational):
raise TypeError("Inequalities can not be used with terms_gcd. Found: %s" %(f,))
if not isinstance(f, Expr) or f.is_Atom:
return orig
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.is_Ring:
if opt.domain.is_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.is_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[x**j for x, j in zip(f.gens, J)])
if coeff == 1:
coeff = S.One
if term == 1:
return orig
if clear:
return _keep_coeff(coeff, term*f.as_expr())
# base the clearing on the form of the original expression, not
# the (perhaps) Mul that we have now
coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul()
return _keep_coeff(coeff, term*f, clear=False)
@public
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
@public
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('content', 1, exc)
return F.content()
@public
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
@public
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Note that the input to ff() and rf() should be Poly instances to use the
definitions here.
Examples
========
>>> from sympy import gff_list, ff, Poly
>>> from sympy.abc import x
>>> f = Poly(x**5 + 2*x**4 - x**3 - 2*x**2, x)
>>> gff_list(f)
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
>>> (ff(Poly(x), 1)*ff(Poly(x + 2), 4)) == f
True
>>> f = Poly(x**12 + 6*x**11 - 11*x**10 - 56*x**9 + 220*x**8 + 208*x**7 - \
1401*x**6 + 1090*x**5 + 2715*x**4 - 6720*x**3 - 1092*x**2 + 5040*x, x)
>>> gff_list(f)
[(Poly(x**3 + 7, x, domain='ZZ'), 2), (Poly(x**2 + 5*x, x, domain='ZZ'), 3)]
>>> ff(Poly(x**3 + 7, x), 2)*ff(Poly(x**2 + 5*x, x), 3) == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [(g.as_expr(), k) for g, k in factors]
else:
return factors
@public
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
@public
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
@public
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), len(poly.gens), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), len(poly.gens), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[f.as_expr()**k for f, k in factors])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
args = [i._eval_factor() if hasattr(i, '_eval_factor') else i
for i in Mul.make_args(expr)]
for arg in args:
if arg.is_Number:
coeff *= arg
continue
if arg.is_Mul:
args.extend(arg.args)
continue
if arg.is_Pow:
base, exp = arg.args
if base.is_Number and exp.is_Number:
coeff *= arg
continue
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed as exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, S.One))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer:
factors.extend([(f, k*exp) for f, k in _factors])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
else:
other.append((f, k))
factors.append((_factors_product(other), exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr):
if hasattr(expr,'_eval_factor'):
return expr._eval_factor()
coeff, factors = _symbolic_factor_list(together(expr, fraction=opt['fraction']), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[_symbolic_factor(arg, opt, method) for arg in expr.args])
elif hasattr(expr, '__iter__'):
return expr.__class__([_symbolic_factor(arg, opt, method) for arg in expr])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, (Expr, Poly)):
if isinstance(expr, Poly):
numer, denom = expr, 1
else:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone(dict(expand=True))
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [(f.as_expr(), k) for f, k in fp]
fq = [(f.as_expr(), k) for f, k in fq]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
fraction = args.pop('fraction', True)
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
opt['fraction'] = fraction
return _symbolic_factor(sympify(expr), opt, method)
def to_rational_coeffs(f):
"""
try to transform a polynomial to have rational coefficients
try to find a transformation ``x = alpha*y``
``f(x) = lc*alpha**n * g(y)`` where ``g`` is a polynomial with
rational coefficients, ``lc`` the leading coefficient.
If this fails, try ``x = y + beta``
``f(x) = g(y)``
Returns ``None`` if ``g`` not found;
``(lc, alpha, None, g)`` in case of rescaling
``(None, None, beta, g)`` in case of translation
Notes
=====
Currently it transforms only polynomials without roots larger than 2.
Examples
========
>>> from sympy import sqrt, Poly, simplify
>>> from sympy.polys.polytools import to_rational_coeffs
>>> from sympy.abc import x
>>> p = Poly(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}), x, domain='EX')
>>> lc, r, _, g = to_rational_coeffs(p)
>>> lc, r
(7 + 5*sqrt(2), 2 - 2*sqrt(2))
>>> g
Poly(x**3 + x**2 - 1/4*x - 1/4, x, domain='QQ')
>>> r1 = simplify(1/r)
>>> Poly(lc*r**3*(g.as_expr()).subs({x:x*r1}), x, domain='EX') == p
True
"""
from sympy.simplify.simplify import simplify
def _try_rescale(f, f1=None):
"""
try rescaling ``x -> alpha*x`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the rescaling is successful,
``alpha`` is the rescaling factor, and ``f`` is the rescaled
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
lc = f.LC()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
coeffs = [simplify(coeffx) for coeffx in coeffs]
if coeffs[-2]:
rescale1_x = simplify(coeffs[-2]/coeffs[-1])
coeffs1 = []
for i in range(len(coeffs)):
coeffx = simplify(coeffs[i]*rescale1_x**(i + 1))
if not coeffx.is_rational:
break
coeffs1.append(coeffx)
else:
rescale_x = simplify(1/rescale1_x)
x = f.gens[0]
v = [x**n]
for i in range(1, n + 1):
v.append(coeffs1[i - 1]*x**(n - i))
f = Add(*v)
f = Poly(f)
return lc, rescale_x, f
return None
def _try_translate(f, f1=None):
"""
try translating ``x -> x + alpha`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the translating is successful,
``alpha`` is the translating factor, and ``f`` is the shifted
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
c = simplify(coeffs[0])
if c and not c.is_rational:
func = Add
if c.is_Add:
args = c.args
func = c.func
else:
args = [c]
c1, c2 = sift(args, lambda z: z.is_rational, binary=True)
alpha = -func(*c2)/n
f2 = f1.shift(alpha)
return alpha, f2
return None
def _has_square_roots(p):
"""
Return True if ``f`` is a sum with square roots but no other root
"""
from sympy.core.exprtools import Factors
coeffs = p.coeffs()
has_sq = False
for y in coeffs:
for x in Add.make_args(y):
f = Factors(x).factors
r = [wx.q for b, wx in f.items() if
b.is_number and wx.is_Rational and wx.q >= 2]
if not r:
continue
if min(r) == 2:
has_sq = True
if max(r) > 2:
return False
return has_sq
if f.get_domain().is_EX and _has_square_roots(f):
f1 = f.monic()
r = _try_rescale(f, f1)
if r:
return r[0], r[1], None, r[2]
else:
r = _try_translate(f, f1)
if r:
return None, None, r[0], r[1]
return None
def _torational_factor_list(p, x):
"""
helper function to factor polynomial using to_rational_coeffs
Examples
========
>>> from sympy.polys.polytools import _torational_factor_list
>>> from sympy.abc import x
>>> from sympy import sqrt, expand, Mul
>>> p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
>>> factors = _torational_factor_list(p, x); factors
(-2, [(-x*(1 + sqrt(2))/2 + 1, 1), (-x*(1 + sqrt(2)) - 1, 1), (-x*(1 + sqrt(2)) + 1, 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
>>> p = expand(((x**2-1)*(x-2)).subs({x:x + sqrt(2)}))
>>> factors = _torational_factor_list(p, x); factors
(1, [(x - 2 + sqrt(2), 1), (x - 1 + sqrt(2), 1), (x + 1 + sqrt(2), 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
"""
from sympy.simplify.simplify import simplify
p1 = Poly(p, x, domain='EX')
n = p1.degree()
res = to_rational_coeffs(p1)
if not res:
return None
lc, r, t, g = res
factors = factor_list(g.as_expr())
if lc:
c = simplify(factors[0]*lc*r**n)
r1 = simplify(1/r)
a = []
for z in factors[1:][0]:
a.append((simplify(z[0].subs({x: x*r1})), z[1]))
else:
c = factors[0]
a = []
for z in factors[1:][0]:
a.append((z[0].subs({x: x - t}), z[1]))
return (c, a)
@public
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
@public
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
@public
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
@public
def factor(f, *gens, **args):
"""
Compute the factorization of expression, ``f``, into irreducibles. (To
factor an integer into primes, use ``factorint``.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`~.Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt, exp
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
By default, factor deals with an expression as a whole:
>>> eq = 2**(x**2 + 2*x + 1)
>>> factor(eq)
2**(x**2 + 2*x + 1)
If the ``deep`` flag is True then subexpressions will
be factored:
>>> factor(eq, deep=True)
2**((x + 1)**2)
If the ``fraction`` flag is False then rational expressions
won't be combined. By default it is True.
>>> factor(5*x + 3*exp(2 - 7*x), deep=True)
(5*x*exp(7*x) + 3*exp(2))*exp(-7*x)
>>> factor(5*x + 3*exp(2 - 7*x), deep=True, fraction=False)
5*x + 3*exp(2)*exp(-7*x)
See Also
========
sympy.ntheory.factor_.factorint
"""
f = sympify(f)
if args.pop('deep', False):
from sympy.simplify.simplify import bottom_up
def _try_factor(expr):
"""
Factor, but avoid changing the expression when unable to.
"""
fac = factor(expr, *gens, **args)
if fac.is_Mul or fac.is_Pow:
return fac
return expr
f = bottom_up(f, _try_factor)
# clean up any subexpressions that may have been expanded
# while factoring out a larger expression
partials = {}
muladd = f.atoms(Mul, Add)
for p in muladd:
fac = factor(p, *gens, **args)
if (fac.is_Mul or fac.is_Pow) and fac != p:
partials[p] = fac
return f.xreplace(partials)
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError as msg:
if not f.is_commutative:
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
@public
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
@public
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError(
"can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
@public
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
@public
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
@public
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup)
@public
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
@public
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel, sqrt, Symbol, together
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
>>> cancel((sqrt(3) + sqrt(15)*A)/(sqrt(2) + sqrt(10)*A))
sqrt(6)/2
Note: due to automatic distribution of Rationals, a sum divided by an integer
will appear as a sum. To recover a rational form use `together` on the result:
>>> cancel(x/2 + 1)
x/2 + 1
>>> together(_)
(x + 2)/2
"""
from sympy.core.exprtools import factor_terms
from sympy.functions.elementary.piecewise import Piecewise
options.allowed_flags(args, ['polys'])
f = sympify(f)
if not isinstance(f, (tuple, Tuple)):
if f.is_Number or isinstance(f, Relational) or not isinstance(f, Expr):
return f
f = factor_terms(f, radical=True)
p, q = f.as_numer_denom()
elif len(f) == 2:
p, q = f
elif isinstance(f, Tuple):
return factor_terms(f)
else:
raise ValueError('unexpected argument: %s' % f)
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed:
if not isinstance(f, (tuple, Tuple)):
return f.expand()
else:
return S.One, p, q
except PolynomialError as msg:
if f.is_commutative and not f.has(Piecewise):
raise PolynomialError(msg)
# Handling of noncommutative and/or piecewise expressions
if f.is_Add or f.is_Mul:
c, nc = sift(f.args, lambda x:
x.is_commutative is True and not x.has(Piecewise),
binary=True)
nc = [cancel(i) for i in nc]
return f.func(cancel(f.func(*c)), *nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
# XXX: This should really skip anything that's not Expr.
if isinstance(e, (tuple, Tuple, BooleanAtom)):
continue
try:
reps.append((e, cancel(e)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
c, P, Q = F.cancel(G)
if not isinstance(f, (tuple, Tuple)):
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
@public
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*g_1 + ... + q_n*g_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.is_Ring and not domain.is_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
@public
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
For more information on Groebner bases, see the references and the docstring
of :func:`~.solve_poly_system`.
Examples
========
Example taken from [1].
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y,
domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used. The
algorithm can be set using the ``method`` flag or with the
:func:`sympy.polys.polyconfig.setup` function.
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
@public
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
@public
class GroebnerBasis(Basic):
"""Represents a reduced Groebner basis. """
def __new__(cls, F, *gens, **args):
"""Compute a reduced Groebner basis for a system of polynomials. """
options.allowed_flags(args, ['polys', 'method'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('groebner', len(F), exc)
from sympy.polys.rings import PolyRing
ring = PolyRing(opt.gens, opt.domain, opt.order)
polys = [ring.from_dict(poly.rep.to_dict()) for poly in polys if poly]
G = _groebner(polys, ring, method=opt.method)
G = [Poly._from_dict(g, opt) for g in G]
return cls._new(G, opt)
@classmethod
def _new(cls, basis, options):
obj = Basic.__new__(cls)
obj._basis = tuple(basis)
obj._options = options
return obj
@property
def args(self):
basis = (p.as_expr() for p in self._basis)
return (Tuple(*basis), Tuple(*self._options.gens))
@property
def exprs(self):
return [poly.as_expr() for poly in self._basis]
@property
def polys(self):
return list(self._basis)
@property
def gens(self):
return self._options.gens
@property
def domain(self):
return self._options.domain
@property
def order(self):
return self._options.order
def __len__(self):
return len(self._basis)
def __iter__(self):
if self._options.polys:
return iter(self.polys)
else:
return iter(self.exprs)
def __getitem__(self, item):
if self._options.polys:
basis = self.polys
else:
basis = self.exprs
return basis[item]
def __hash__(self):
return hash((self._basis, tuple(self._options.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._basis == other._basis and self._options == other._options
elif iterable(other):
return self.polys == list(other) or self.exprs == list(other)
else:
return False
def __ne__(self, other):
return not self == other
@property
def is_zero_dimensional(self):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
def single_var(monomial):
return sum(map(bool, monomial)) == 1
exponents = Monomial([0]*len(self.gens))
order = self._options.order
for poly in self.polys:
monomial = poly.LM(order=order)
if single_var(monomial):
exponents *= monomial
# If any element of the exponents vector is zero, then there's
# a variable for which there's no degree bound and the ideal
# generated by this Groebner basis isn't zero-dimensional.
return all(exponents)
def fglm(self, order):
"""
Convert a Groebner basis from one ordering to another.
The FGLM algorithm converts reduced Groebner bases of zero-dimensional
ideals from one ordering to another. This method is often used when it
is infeasible to compute a Groebner basis with respect to a particular
ordering directly.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import groebner
>>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
>>> G = groebner(F, x, y, order='grlex')
>>> list(G.fglm('lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
>>> list(groebner(F, x, y, order='lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
References
==========
.. [1] J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
"""
opt = self._options
src_order = opt.order
dst_order = monomial_key(order)
if src_order == dst_order:
return self
if not self.is_zero_dimensional:
raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension")
polys = list(self._basis)
domain = opt.domain
opt = opt.clone(dict(
domain=domain.get_field(),
order=dst_order,
))
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, src_order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
G = matrix_fglm(polys, _ring, dst_order)
G = [Poly._from_dict(dict(g), opt) for g in G]
if not domain.is_Field:
G = [g.clear_denoms(convert=True)[1] for g in G]
opt.domain = domain
return self._new(G, opt)
def reduce(self, expr, auto=True):
"""
Reduces a polynomial modulo a Groebner basis.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import groebner, expand
>>> from sympy.abc import x, y
>>> f = 2*x**4 - x**2 + y**3 + y**2
>>> G = groebner([x**3 - x, y**3 - y])
>>> G.reduce(f)
([2*x, 1], x**2 + y**2 + y)
>>> Q, r = _
>>> expand(sum(q*g for q, g in zip(Q, G)) + r)
2*x**4 - x**2 + y**3 + y**2
>>> _ == f
True
"""
poly = Poly._from_expr(expr, self._options)
polys = [poly] + list(self._basis)
opt = self._options
domain = opt.domain
retract = False
if auto and domain.is_Ring and not domain.is_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
def contains(self, poly):
"""
Check if ``poly`` belongs the ideal generated by ``self``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> f = 2*x**3 + y**3 + 3*y
>>> G = groebner([x**2 + y**2 - 1, x*y - 2])
>>> G.contains(f)
True
>>> G.contains(f + 1)
False
"""
return self.reduce(poly)[1] == 0
@public
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and \
factor.exp.is_Integer and factor.exp >= 0:
poly_factors.append(
_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get('gens', ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
f11233af8a86ed62ccf451f574e917c3b44208c4e1d0563bab8ce24b17c13bbb | """Functions for generating interesting polynomials, e.g. for benchmarking. """
from __future__ import print_function, division
from sympy.core import Add, Mul, Symbol, sympify, Dummy, symbols
from sympy.core.containers import Tuple
from sympy.core.singleton import S
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.ntheory import nextprime
from sympy.polys.densearith import (
dmp_add_term, dmp_neg, dmp_mul, dmp_sqr
)
from sympy.polys.densebasic import (
dmp_zero, dmp_one, dmp_ground,
dup_from_raw_dict, dmp_raise, dup_random
)
from sympy.polys.domains import ZZ
from sympy.polys.factortools import dup_zz_cyclotomic_poly
from sympy.polys.polyclasses import DMP
from sympy.polys.polytools import Poly, PurePoly
from sympy.polys.polyutils import _analyze_gens
from sympy.utilities import subsets, public, filldedent
@public
def swinnerton_dyer_poly(n, x=None, polys=False):
"""Generates n-th Swinnerton-Dyer polynomial in `x`.
Parameters
----------
n : int
`n` decides the order of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
from .numberfields import minimal_polynomial
if n <= 0:
raise ValueError(
"can't generate Swinnerton-Dyer polynomial of order %s" % n)
if x is not None:
sympify(x)
else:
x = Dummy('x')
if n > 3:
p = 2
a = [sqrt(2)]
for i in range(2, n + 1):
p = nextprime(p)
a.append(sqrt(p))
return minimal_polynomial(Add(*a), x, polys=polys)
if n == 1:
ex = x**2 - 2
elif n == 2:
ex = x**4 - 10*x**2 + 1
elif n == 3:
ex = x**8 - 40*x**6 + 352*x**4 - 960*x**2 + 576
return PurePoly(ex, x) if polys else ex
@public
def cyclotomic_poly(n, x=None, polys=False):
"""Generates cyclotomic polynomial of order `n` in `x`.
Parameters
----------
n : int
`n` decides the order of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n <= 0:
raise ValueError(
"can't generate cyclotomic polynomial of order %s" % n)
poly = DMP(dup_zz_cyclotomic_poly(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
@public
def symmetric_poly(n, *gens, **args):
"""Generates symmetric polynomial of order `n`.
Returns a Poly object when ``polys=True``, otherwise
(default) returns an expression.
"""
# TODO: use an explicit keyword argument when Python 2 support is dropped
gens = _analyze_gens(gens)
if n < 0 or n > len(gens) or not gens:
raise ValueError("can't generate symmetric polynomial of order %s for %s" % (n, gens))
elif not n:
poly = S.One
else:
poly = Add(*[Mul(*s) for s in subsets(gens, int(n))])
if not args.get('polys', False):
return poly
else:
return Poly(poly, *gens)
@public
def random_poly(x, n, inf, sup, domain=ZZ, polys=False):
"""Generates a polynomial of degree ``n`` with coefficients in
``[inf, sup]``.
Parameters
----------
x
`x` is the independent term of polynomial
n : int
`n` decides the order of polynomial
inf
Lower limit of range in which coefficients lie
sup
Upper limit of range in which coefficients lie
domain : optional
Decides what ring the coefficients are supposed
to belong. Default is set to Integers.
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
poly = Poly(dup_random(n, inf, sup, domain), x, domain=domain)
return poly if polys else poly.as_expr()
@public
def interpolating_poly(n, x, X='x', Y='y'):
"""Construct Lagrange interpolating polynomial for ``n``
data points. If a sequence of values are given for ``X`` and ``Y``
then the first ``n`` values will be used.
"""
ok = getattr(x, 'free_symbols', None)
if isinstance(X, str):
X = symbols("%s:%s" % (X, n))
elif ok and ok & Tuple(*X).free_symbols:
ok = False
if isinstance(Y, str):
Y = symbols("%s:%s" % (Y, n))
elif ok and ok & Tuple(*Y).free_symbols:
ok = False
if not ok:
raise ValueError(filldedent('''
Expecting symbol for x that does not appear in X or Y.
Use `interpolate(list(zip(X, Y)), x)` instead.'''))
coeffs = []
numert = Mul(*[x - X[i] for i in range(n)])
for i in range(n):
numer = numert/(x - X[i])
denom = Mul(*[(X[i] - X[j]) for j in range(n) if i != j])
coeffs.append(numer/denom)
return Add(*[coeff*y for coeff, y in zip(coeffs, Y)])
def fateman_poly_F_1(n):
"""Fateman's GCD benchmark: trivial GCD """
Y = [Symbol('y_' + str(i)) for i in range(n + 1)]
y_0, y_1 = Y[0], Y[1]
u = y_0 + Add(*[y for y in Y[1:]])
v = y_0**2 + Add(*[y**2 for y in Y[1:]])
F = ((u + 1)*(u + 2)).as_poly(*Y)
G = ((v + 1)*(-3*y_1*y_0**2 + y_1**2 - 1)).as_poly(*Y)
H = Poly(1, *Y)
return F, G, H
def dmp_fateman_poly_F_1(n, K):
"""Fateman's GCD benchmark: trivial GCD """
u = [K(1), K(0)]
for i in range(n):
u = [dmp_one(i, K), u]
v = [K(1), K(0), K(0)]
for i in range(0, n):
v = [dmp_one(i, K), dmp_zero(i), v]
m = n - 1
U = dmp_add_term(u, dmp_ground(K(1), m), 0, n, K)
V = dmp_add_term(u, dmp_ground(K(2), m), 0, n, K)
f = [[-K(3), K(0)], [], [K(1), K(0), -K(1)]]
W = dmp_add_term(v, dmp_ground(K(1), m), 0, n, K)
Y = dmp_raise(f, m, 1, K)
F = dmp_mul(U, V, n, K)
G = dmp_mul(W, Y, n, K)
H = dmp_one(n, K)
return F, G, H
def fateman_poly_F_2(n):
"""Fateman's GCD benchmark: linearly dense quartic inputs """
Y = [Symbol('y_' + str(i)) for i in range(n + 1)]
y_0 = Y[0]
u = Add(*[y for y in Y[1:]])
H = Poly((y_0 + u + 1)**2, *Y)
F = Poly((y_0 - u - 2)**2, *Y)
G = Poly((y_0 + u + 2)**2, *Y)
return H*F, H*G, H
def dmp_fateman_poly_F_2(n, K):
"""Fateman's GCD benchmark: linearly dense quartic inputs """
u = [K(1), K(0)]
for i in range(n - 1):
u = [dmp_one(i, K), u]
m = n - 1
v = dmp_add_term(u, dmp_ground(K(2), m - 1), 0, n, K)
f = dmp_sqr([dmp_one(m, K), dmp_neg(v, m, K)], n, K)
g = dmp_sqr([dmp_one(m, K), v], n, K)
v = dmp_add_term(u, dmp_one(m - 1, K), 0, n, K)
h = dmp_sqr([dmp_one(m, K), v], n, K)
return dmp_mul(f, h, n, K), dmp_mul(g, h, n, K), h
def fateman_poly_F_3(n):
"""Fateman's GCD benchmark: sparse inputs (deg f ~ vars f) """
Y = [Symbol('y_' + str(i)) for i in range(n + 1)]
y_0 = Y[0]
u = Add(*[y**(n + 1) for y in Y[1:]])
H = Poly((y_0**(n + 1) + u + 1)**2, *Y)
F = Poly((y_0**(n + 1) - u - 2)**2, *Y)
G = Poly((y_0**(n + 1) + u + 2)**2, *Y)
return H*F, H*G, H
def dmp_fateman_poly_F_3(n, K):
"""Fateman's GCD benchmark: sparse inputs (deg f ~ vars f) """
u = dup_from_raw_dict({n + 1: K.one}, K)
for i in range(0, n - 1):
u = dmp_add_term([u], dmp_one(i, K), n + 1, i + 1, K)
v = dmp_add_term(u, dmp_ground(K(2), n - 2), 0, n, K)
f = dmp_sqr(
dmp_add_term([dmp_neg(v, n - 1, K)], dmp_one(n - 1, K), n + 1, n, K), n, K)
g = dmp_sqr(dmp_add_term([v], dmp_one(n - 1, K), n + 1, n, K), n, K)
v = dmp_add_term(u, dmp_one(n - 2, K), 0, n - 1, K)
h = dmp_sqr(dmp_add_term([v], dmp_one(n - 1, K), n + 1, n, K), n, K)
return dmp_mul(f, h, n, K), dmp_mul(g, h, n, K), h
# A few useful polynomials from Wang's paper ('78).
from sympy.polys.rings import ring
def _f_0():
R, x, y, z = ring("x,y,z", ZZ)
return x**2*y*z**2 + 2*x**2*y*z + 3*x**2*y + 2*x**2 + 3*x + 4*y**2*z**2 + 5*y**2*z + 6*y**2 + y*z**2 + 2*y*z + y + 1
def _f_1():
R, x, y, z = ring("x,y,z", ZZ)
return x**3*y*z + x**2*y**2*z**2 + x**2*y**2 + 20*x**2*y*z + 30*x**2*y + x**2*z**2 + 10*x**2*z + x*y**3*z + 30*x*y**2*z + 20*x*y**2 + x*y*z**3 + 10*x*y*z**2 + x*y*z + 610*x*y + 20*x*z**2 + 230*x*z + 300*x + y**2*z**2 + 10*y**2*z + 30*y*z**2 + 320*y*z + 200*y + 600*z + 6000
def _f_2():
R, x, y, z = ring("x,y,z", ZZ)
return x**5*y**3 + x**5*y**2*z + x**5*y*z**2 + x**5*z**3 + x**3*y**2 + x**3*y*z + 90*x**3*y + 90*x**3*z + x**2*y**2*z - 11*x**2*y**2 + x**2*z**3 - 11*x**2*z**2 + y*z - 11*y + 90*z - 990
def _f_3():
R, x, y, z = ring("x,y,z", ZZ)
return x**5*y**2 + x**4*z**4 + x**4 + x**3*y**3*z + x**3*z + x**2*y**4 + x**2*y**3*z**3 + x**2*y*z**5 + x**2*y*z + x*y**2*z**4 + x*y**2 + x*y*z**7 + x*y*z**3 + x*y*z**2 + y**2*z + y*z**4
def _f_4():
R, x, y, z = ring("x,y,z", ZZ)
return -x**9*y**8*z - x**8*y**5*z**3 - x**7*y**12*z**2 - 5*x**7*y**8 - x**6*y**9*z**4 + x**6*y**7*z**3 + 3*x**6*y**7*z - 5*x**6*y**5*z**2 - x**6*y**4*z**3 + x**5*y**4*z**5 + 3*x**5*y**4*z**3 - x**5*y*z**5 + x**4*y**11*z**4 + 3*x**4*y**11*z**2 - x**4*y**8*z**4 + 5*x**4*y**7*z**2 + 15*x**4*y**7 - 5*x**4*y**4*z**2 + x**3*y**8*z**6 + 3*x**3*y**8*z**4 - x**3*y**5*z**6 + 5*x**3*y**4*z**4 + 15*x**3*y**4*z**2 + x**3*y**3*z**5 + 3*x**3*y**3*z**3 - 5*x**3*y*z**4 + x**2*z**7 + 3*x**2*z**5 + x*y**7*z**6 + 3*x*y**7*z**4 + 5*x*y**3*z**4 + 15*x*y**3*z**2 + y**4*z**8 + 3*y**4*z**6 + 5*z**6 + 15*z**4
def _f_5():
R, x, y, z = ring("x,y,z", ZZ)
return -x**3 - 3*x**2*y + 3*x**2*z - 3*x*y**2 + 6*x*y*z - 3*x*z**2 - y**3 + 3*y**2*z - 3*y*z**2 + z**3
def _f_6():
R, x, y, z, t = ring("x,y,z,t", ZZ)
return 2115*x**4*y + 45*x**3*z**3*t**2 - 45*x**3*t**2 - 423*x*y**4 - 47*x*y**3 + 141*x*y*z**3 + 94*x*y*z*t - 9*y**3*z**3*t**2 + 9*y**3*t**2 - y**2*z**3*t**2 + y**2*t**2 + 3*z**6*t**2 + 2*z**4*t**3 - 3*z**3*t**2 - 2*z*t**3
def _w_1():
R, x, y, z = ring("x,y,z", ZZ)
return 4*x**6*y**4*z**2 + 4*x**6*y**3*z**3 - 4*x**6*y**2*z**4 - 4*x**6*y*z**5 + x**5*y**4*z**3 + 12*x**5*y**3*z - x**5*y**2*z**5 + 12*x**5*y**2*z**2 - 12*x**5*y*z**3 - 12*x**5*z**4 + 8*x**4*y**4 + 6*x**4*y**3*z**2 + 8*x**4*y**3*z - 4*x**4*y**2*z**4 + 4*x**4*y**2*z**3 - 8*x**4*y**2*z**2 - 4*x**4*y*z**5 - 2*x**4*y*z**4 - 8*x**4*y*z**3 + 2*x**3*y**4*z + x**3*y**3*z**3 - x**3*y**2*z**5 - 2*x**3*y**2*z**3 + 9*x**3*y**2*z - 12*x**3*y*z**3 + 12*x**3*y*z**2 - 12*x**3*z**4 + 3*x**3*z**3 + 6*x**2*y**3 - 6*x**2*y**2*z**2 + 8*x**2*y**2*z - 2*x**2*y*z**4 - 8*x**2*y*z**3 + 2*x**2*y*z**2 + 2*x*y**3*z - 2*x*y**2*z**3 - 3*x*y*z + 3*x*z**3 - 2*y**2 + 2*y*z**2
def _w_2():
R, x, y = ring("x,y", ZZ)
return 24*x**8*y**3 + 48*x**8*y**2 + 24*x**7*y**5 - 72*x**7*y**2 + 25*x**6*y**4 + 2*x**6*y**3 + 4*x**6*y + 8*x**6 + x**5*y**6 + x**5*y**3 - 12*x**5 + x**4*y**5 - x**4*y**4 - 2*x**4*y**3 + 292*x**4*y**2 - x**3*y**6 + 3*x**3*y**3 - x**2*y**5 + 12*x**2*y**3 + 48*x**2 - 12*y**3
def f_polys():
return _f_0(), _f_1(), _f_2(), _f_3(), _f_4(), _f_5(), _f_6()
def w_polys():
return _w_1(), _w_2()
|
e110e9a13ac140ce264f36471bbe64d5a91b50c18102d39869cf3efa40b8c6c2 | """Implementation of matrix FGLM Groebner basis conversion algorithm. """
from __future__ import print_function, division
from sympy.polys.monomials import monomial_mul, monomial_div
def matrix_fglm(F, ring, O_to):
"""
Converts the reduced Groebner basis ``F`` of a zero-dimensional
ideal w.r.t. ``O_from`` to a reduced Groebner basis
w.r.t. ``O_to``.
References
==========
.. [1] J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
"""
domain = ring.domain
ngens = ring.ngens
ring_to = ring.clone(order=O_to)
old_basis = _basis(F, ring)
M = _representing_matrices(old_basis, F, ring)
# V contains the normalforms (wrt O_from) of S
S = [ring.zero_monom]
V = [[domain.one] + [domain.zero] * (len(old_basis) - 1)]
G = []
L = [(i, 0) for i in range(ngens)] # (i, j) corresponds to x_i * S[j]
L.sort(key=lambda k_l: O_to(_incr_k(S[k_l[1]], k_l[0])), reverse=True)
t = L.pop()
P = _identity_matrix(len(old_basis), domain)
while True:
s = len(S)
v = _matrix_mul(M[t[0]], V[t[1]])
_lambda = _matrix_mul(P, v)
if all(_lambda[i] == domain.zero for i in range(s, len(old_basis))):
# there is a linear combination of v by V
lt = ring.term_new(_incr_k(S[t[1]], t[0]), domain.one)
rest = ring.from_dict({S[i]: _lambda[i] for i in range(s)})
g = (lt - rest).set_ring(ring_to)
if g:
G.append(g)
else:
# v is linearly independent from V
P = _update(s, _lambda, P)
S.append(_incr_k(S[t[1]], t[0]))
V.append(v)
L.extend([(i, s) for i in range(ngens)])
L = list(set(L))
L.sort(key=lambda k_l: O_to(_incr_k(S[k_l[1]], k_l[0])), reverse=True)
L = [(k, l) for (k, l) in L if all(monomial_div(_incr_k(S[l], k), g.LM) is None for g in G)]
if not L:
G = [ g.monic() for g in G ]
return sorted(G, key=lambda g: O_to(g.LM), reverse=True)
t = L.pop()
def _incr_k(m, k):
return tuple(list(m[:k]) + [m[k] + 1] + list(m[k + 1:]))
def _identity_matrix(n, domain):
M = [[domain.zero]*n for _ in range(n)]
for i in range(n):
M[i][i] = domain.one
return M
def _matrix_mul(M, v):
return [sum([row[i] * v[i] for i in range(len(v))]) for row in M]
def _update(s, _lambda, P):
"""
Update ``P`` such that for the updated `P'` `P' v = e_{s}`.
"""
k = min([j for j in range(s, len(_lambda)) if _lambda[j] != 0])
for r in range(len(_lambda)):
if r != k:
P[r] = [P[r][j] - (P[k][j] * _lambda[r]) / _lambda[k] for j in range(len(P[r]))]
P[k] = [P[k][j] / _lambda[k] for j in range(len(P[k]))]
P[k], P[s] = P[s], P[k]
return P
def _representing_matrices(basis, G, ring):
r"""
Compute the matrices corresponding to the linear maps `m \mapsto
x_i m` for all variables `x_i`.
"""
domain = ring.domain
u = ring.ngens-1
def var(i):
return tuple([0] * i + [1] + [0] * (u - i))
def representing_matrix(m):
M = [[domain.zero] * len(basis) for _ in range(len(basis))]
for i, v in enumerate(basis):
r = ring.term_new(monomial_mul(m, v), domain.one).rem(G)
for monom, coeff in r.terms():
j = basis.index(monom)
M[j][i] = coeff
return M
return [representing_matrix(var(i)) for i in range(u + 1)]
def _basis(G, ring):
r"""
Computes a list of monomials which are not divisible by the leading
monomials wrt to ``O`` of ``G``. These monomials are a basis of
`K[X_1, \ldots, X_n]/(G)`.
"""
order = ring.order
leading_monomials = [g.LM for g in G]
candidates = [ring.zero_monom]
basis = []
while candidates:
t = candidates.pop()
basis.append(t)
new_candidates = [_incr_k(t, k) for k in range(ring.ngens)
if all(monomial_div(_incr_k(t, k), lmg) is None
for lmg in leading_monomials)]
candidates.extend(new_candidates)
candidates.sort(key=lambda m: order(m), reverse=True)
basis = list(set(basis))
return sorted(basis, key=lambda m: order(m))
|
e51be85113a46aacaffcc079d1ed9801b39255fe4ede9dcbd4f45c7e71f5a86b | """Basic tools for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from __future__ import print_function, division
from sympy import oo
from sympy.core import igcd
from sympy.polys.monomials import monomial_min, monomial_div
from sympy.polys.orderings import monomial_key
import random
def poly_LC(f, K):
"""
Return leading coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_LC
>>> poly_LC([], ZZ)
0
>>> poly_LC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
1
"""
if not f:
return K.zero
else:
return f[0]
def poly_TC(f, K):
"""
Return trailing coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_TC
>>> poly_TC([], ZZ)
0
>>> poly_TC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
3
"""
if not f:
return K.zero
else:
return f[-1]
dup_LC = dmp_LC = poly_LC
dup_TC = dmp_TC = poly_TC
def dmp_ground_LC(f, u, K):
"""
Return the ground leading coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_LC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_LC(f, 2, ZZ)
1
"""
while u:
f = dmp_LC(f, K)
u -= 1
return dup_LC(f, K)
def dmp_ground_TC(f, u, K):
"""
Return the ground trailing coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_TC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_TC(f, 2, ZZ)
3
"""
while u:
f = dmp_TC(f, K)
u -= 1
return dup_TC(f, K)
def dmp_true_LT(f, u, K):
"""
Return the leading term ``c * x_1**n_1 ... x_k**n_k``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_true_LT
>>> f = ZZ.map([[4], [2, 0], [3, 0, 0]])
>>> dmp_true_LT(f, 1, ZZ)
((2, 0), 4)
"""
monom = []
while u:
monom.append(len(f) - 1)
f, u = f[0], u - 1
if not f:
monom.append(0)
else:
monom.append(len(f) - 1)
return tuple(monom), dup_LC(f, K)
def dup_degree(f):
"""
Return the leading degree of ``f`` in ``K[x]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_degree
>>> f = ZZ.map([1, 2, 0, 3])
>>> dup_degree(f)
3
"""
if not f:
return -oo
return len(f) - 1
def dmp_degree(f, u):
"""
Return the leading degree of ``f`` in ``x_0`` in ``K[X]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree
>>> dmp_degree([[[]]], 2)
-oo
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree(f, 1)
1
"""
if dmp_zero_p(f, u):
return -oo
else:
return len(f) - 1
def _rec_degree_in(g, v, i, j):
"""Recursive helper function for :func:`dmp_degree_in`."""
if i == j:
return dmp_degree(g, v)
v, i = v - 1, i + 1
return max([ _rec_degree_in(c, v, i, j) for c in g ])
def dmp_degree_in(f, j, u):
"""
Return the leading degree of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_in
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree_in(f, 0, 1)
1
>>> dmp_degree_in(f, 1, 1)
2
"""
if not j:
return dmp_degree(f, u)
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_degree_in(f, u, 0, j)
def _rec_degree_list(g, v, i, degs):
"""Recursive helper for :func:`dmp_degree_list`."""
degs[i] = max(degs[i], dmp_degree(g, v))
if v > 0:
v, i = v - 1, i + 1
for c in g:
_rec_degree_list(c, v, i, degs)
def dmp_degree_list(f, u):
"""
Return a list of degrees of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_list
>>> f = ZZ.map([[1], [1, 2, 3]])
>>> dmp_degree_list(f, 1)
(1, 2)
"""
degs = [-oo]*(u + 1)
_rec_degree_list(f, u, 0, degs)
return tuple(degs)
def dup_strip(f):
"""
Remove leading zeros from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.densebasic import dup_strip
>>> dup_strip([0, 0, 1, 2, 3, 0])
[1, 2, 3, 0]
"""
if not f or f[0]:
return f
i = 0
for cf in f:
if cf:
break
else:
i += 1
return f[i:]
def dmp_strip(f, u):
"""
Remove leading zeros from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_strip
>>> dmp_strip([[], [0, 1, 2], [1]], 1)
[[0, 1, 2], [1]]
"""
if not u:
return dup_strip(f)
if dmp_zero_p(f, u):
return f
i, v = 0, u - 1
for c in f:
if not dmp_zero_p(c, v):
break
else:
i += 1
if i == len(f):
return dmp_zero(u)
else:
return f[i:]
def _rec_validate(f, g, i, K):
"""Recursive helper for :func:`dmp_validate`."""
if type(g) is not list:
if K is not None and not K.of_type(g):
raise TypeError("%s in %s in not of type %s" % (g, f, K.dtype))
return set([i - 1])
elif not g:
return set([i])
else:
levels = set([])
for c in g:
levels |= _rec_validate(f, c, i + 1, K)
return levels
def _rec_strip(g, v):
"""Recursive helper for :func:`_rec_strip`."""
if not v:
return dup_strip(g)
w = v - 1
return dmp_strip([ _rec_strip(c, w) for c in g ], v)
def dmp_validate(f, K=None):
"""
Return the number of levels in ``f`` and recursively strip it.
Examples
========
>>> from sympy.polys.densebasic import dmp_validate
>>> dmp_validate([[], [0, 1, 2], [1]])
([[1, 2], [1]], 1)
>>> dmp_validate([[1], 1])
Traceback (most recent call last):
...
ValueError: invalid data structure for a multivariate polynomial
"""
levels = _rec_validate(f, f, 0, K)
u = levels.pop()
if not levels:
return _rec_strip(f, u), u
else:
raise ValueError(
"invalid data structure for a multivariate polynomial")
def dup_reverse(f):
"""
Compute ``x**n * f(1/x)``, i.e.: reverse ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_reverse
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_reverse(f)
[3, 2, 1]
"""
return dup_strip(list(reversed(f)))
def dup_copy(f):
"""
Create a new copy of a polynomial ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return list(f)
def dmp_copy(f, u):
"""
Create a new copy of a polynomial ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_copy
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_copy(f, 1)
[[1], [1, 2]]
"""
if not u:
return list(f)
v = u - 1
return [ dmp_copy(c, v) for c in f ]
def dup_to_tuple(f):
"""
Convert `f` into a tuple.
This is needed for hashing. This is similar to dup_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return tuple(f)
def dmp_to_tuple(f, u):
"""
Convert `f` into a nested tuple of tuples.
This is needed for hashing. This is similar to dmp_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_to_tuple
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_to_tuple(f, 1)
((1,), (1, 2))
"""
if not u:
return tuple(f)
v = u - 1
return tuple(dmp_to_tuple(c, v) for c in f)
def dup_normal(f, K):
"""
Normalize univariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_normal
>>> dup_normal([0, 1.5, 2, 3], ZZ)
[1, 2, 3]
"""
return dup_strip([ K.normal(c) for c in f ])
def dmp_normal(f, u, K):
"""
Normalize a multivariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_normal
>>> dmp_normal([[], [0, 1.5, 2]], 1, ZZ)
[[1, 2]]
"""
if not u:
return dup_normal(f, K)
v = u - 1
return dmp_strip([ dmp_normal(c, v, K) for c in f ], u)
def dup_convert(f, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_convert
>>> R, x = ring("x", ZZ)
>>> dup_convert([R(1), R(2)], R.to_domain(), ZZ)
[1, 2]
>>> dup_convert([ZZ(1), ZZ(2)], ZZ, R.to_domain())
[1, 2]
"""
if K0 is not None and K0 == K1:
return f
else:
return dup_strip([ K1.convert(c, K0) for c in f ])
def dmp_convert(f, u, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_convert
>>> R, x = ring("x", ZZ)
>>> dmp_convert([[R(1)], [R(2)]], 1, R.to_domain(), ZZ)
[[1], [2]]
>>> dmp_convert([[ZZ(1)], [ZZ(2)]], 1, ZZ, R.to_domain())
[[1], [2]]
"""
if not u:
return dup_convert(f, K0, K1)
if K0 is not None and K0 == K1:
return f
v = u - 1
return dmp_strip([ dmp_convert(c, v, K0, K1) for c in f ], u)
def dup_from_sympy(f, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_sympy
>>> dup_from_sympy([S(1), S(2)], ZZ) == [ZZ(1), ZZ(2)]
True
"""
return dup_strip([ K.from_sympy(c) for c in f ])
def dmp_from_sympy(f, u, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_sympy
>>> dmp_from_sympy([[S(1)], [S(2)]], 1, ZZ) == [[ZZ(1)], [ZZ(2)]]
True
"""
if not u:
return dup_from_sympy(f, K)
v = u - 1
return dmp_strip([ dmp_from_sympy(c, v, K) for c in f ], u)
def dup_nth(f, n, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_nth
>>> f = ZZ.map([1, 2, 3])
>>> dup_nth(f, 0, ZZ)
3
>>> dup_nth(f, 4, ZZ)
0
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
return f[dup_degree(f) - n]
def dmp_nth(f, n, u, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nth
>>> f = ZZ.map([[1], [2], [3]])
>>> dmp_nth(f, 0, 1, ZZ)
[3]
>>> dmp_nth(f, 4, 1, ZZ)
[]
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return dmp_zero(u - 1)
else:
return f[dmp_degree(f, u) - n]
def dmp_ground_nth(f, N, u, K):
"""
Return the ground ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_nth
>>> f = ZZ.map([[1], [2, 3]])
>>> dmp_ground_nth(f, (0, 1), 1, ZZ)
2
"""
v = u
for n in N:
if n < 0:
raise IndexError("`n` must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
d = dmp_degree(f, v)
if d == -oo:
d = -1
f, v = f[d - n], v - 1
return f
def dmp_zero_p(f, u):
"""
Return ``True`` if ``f`` is zero in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero_p
>>> dmp_zero_p([[[[[]]]]], 4)
True
>>> dmp_zero_p([[[[[1]]]]], 4)
False
"""
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
return not f
def dmp_zero(u):
"""
Return a multivariate zero.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero
>>> dmp_zero(4)
[[[[[]]]]]
"""
r = []
for i in range(u):
r = [r]
return r
def dmp_one_p(f, u, K):
"""
Return ``True`` if ``f`` is one in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one_p
>>> dmp_one_p([[[ZZ(1)]]], 2, ZZ)
True
"""
return dmp_ground_p(f, K.one, u)
def dmp_one(u, K):
"""
Return a multivariate one over ``K``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one
>>> dmp_one(2, ZZ)
[[[1]]]
"""
return dmp_ground(K.one, u)
def dmp_ground_p(f, c, u):
"""
Return True if ``f`` is constant in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground_p
>>> dmp_ground_p([[[3]]], 3, 2)
True
>>> dmp_ground_p([[[4]]], None, 2)
True
"""
if c is not None and not c:
return dmp_zero_p(f, u)
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
if c is None:
return len(f) <= 1
else:
return f == [c]
def dmp_ground(c, u):
"""
Return a multivariate constant.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground
>>> dmp_ground(3, 5)
[[[[[[3]]]]]]
>>> dmp_ground(1, -1)
1
"""
if not c:
return dmp_zero(u)
for i in range(u + 1):
c = [c]
return c
def dmp_zeros(n, u, K):
"""
Return a list of multivariate zeros.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_zeros
>>> dmp_zeros(3, 2, ZZ)
[[[[]]], [[[]]], [[[]]]]
>>> dmp_zeros(3, -1, ZZ)
[0, 0, 0]
"""
if not n:
return []
if u < 0:
return [K.zero]*n
else:
return [ dmp_zero(u) for i in range(n) ]
def dmp_grounds(c, n, u):
"""
Return a list of multivariate constants.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_grounds
>>> dmp_grounds(ZZ(4), 3, 2)
[[[[4]]], [[[4]]], [[[4]]]]
>>> dmp_grounds(ZZ(4), 3, -1)
[4, 4, 4]
"""
if not n:
return []
if u < 0:
return [c]*n
else:
return [ dmp_ground(c, u) for i in range(n) ]
def dmp_negative_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is negative.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_negative_p
>>> dmp_negative_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
False
>>> dmp_negative_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
True
"""
return K.is_negative(dmp_ground_LC(f, u, K))
def dmp_positive_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is positive.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_positive_p
>>> dmp_positive_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
True
>>> dmp_positive_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
False
"""
return K.is_positive(dmp_ground_LC(f, u, K))
def dup_from_dict(f, K):
"""
Create a ``K[x]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_dict
>>> dup_from_dict({(0,): ZZ(7), (2,): ZZ(5), (4,): ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
>>> dup_from_dict({}, ZZ)
[]
"""
if not f:
return []
n, h = max(f.keys()), []
if type(n) is int:
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
else:
(n,) = n
for k in range(n, -1, -1):
h.append(f.get((k,), K.zero))
return dup_strip(h)
def dup_from_raw_dict(f, K):
"""
Create a ``K[x]`` polynomial from a raw ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_raw_dict
>>> dup_from_raw_dict({0: ZZ(7), 2: ZZ(5), 4: ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
"""
if not f:
return []
n, h = max(f.keys()), []
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
return dup_strip(h)
def dmp_from_dict(f, u, K):
"""
Create a ``K[X]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_dict
>>> dmp_from_dict({(0, 0): ZZ(3), (0, 1): ZZ(2), (2, 1): ZZ(1)}, 1, ZZ)
[[1, 0], [], [2, 3]]
>>> dmp_from_dict({}, 0, ZZ)
[]
"""
if not u:
return dup_from_dict(f, K)
if not f:
return dmp_zero(u)
coeffs = {}
for monom, coeff in f.items():
head, tail = monom[0], monom[1:]
if head in coeffs:
coeffs[head][tail] = coeff
else:
coeffs[head] = { tail: coeff }
n, v, h = max(coeffs.keys()), u - 1, []
for k in range(n, -1, -1):
coeff = coeffs.get(k)
if coeff is not None:
h.append(dmp_from_dict(coeff, v, K))
else:
h.append(dmp_zero(v))
return dmp_strip(h, u)
def dup_to_dict(f, K=None, zero=False):
"""
Convert ``K[x]`` polynomial to a ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_dict
>>> dup_to_dict([1, 0, 5, 0, 7])
{(0,): 7, (2,): 5, (4,): 1}
>>> dup_to_dict([])
{}
"""
if not f and zero:
return {(0,): K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[(k,)] = f[n - k]
return result
def dup_to_raw_dict(f, K=None, zero=False):
"""
Convert a ``K[x]`` polynomial to a raw ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_raw_dict
>>> dup_to_raw_dict([1, 0, 5, 0, 7])
{0: 7, 2: 5, 4: 1}
"""
if not f and zero:
return {0: K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[k] = f[n - k]
return result
def dmp_to_dict(f, u, K=None, zero=False):
"""
Convert a ``K[X]`` polynomial to a ``dict````.
Examples
========
>>> from sympy.polys.densebasic import dmp_to_dict
>>> dmp_to_dict([[1, 0], [], [2, 3]], 1)
{(0, 0): 3, (0, 1): 2, (2, 1): 1}
>>> dmp_to_dict([], 0)
{}
"""
if not u:
return dup_to_dict(f, K, zero=zero)
if dmp_zero_p(f, u) and zero:
return {(0,)*(u + 1): K.zero}
n, v, result = dmp_degree(f, u), u - 1, {}
if n == -oo:
n = -1
for k in range(0, n + 1):
h = dmp_to_dict(f[n - k], v)
for exp, coeff in h.items():
result[(k,) + exp] = coeff
return result
def dmp_swap(f, i, j, u, K):
"""
Transform ``K[..x_i..x_j..]`` to ``K[..x_j..x_i..]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_swap
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_swap(f, 0, 1, 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_swap(f, 1, 2, 2, ZZ)
[[[1], [2, 0]], [[]]]
>>> dmp_swap(f, 0, 2, 2, ZZ)
[[[1, 0]], [[2, 0], []]]
"""
if i < 0 or j < 0 or i > u or j > u:
raise IndexError("0 <= i < j <= %s expected" % u)
elif i == j:
return f
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
H[exp[:i] + (exp[j],) +
exp[i + 1:j] +
(exp[i],) + exp[j + 1:]] = coeff
return dmp_from_dict(H, u, K)
def dmp_permute(f, P, u, K):
"""
Return a polynomial in ``K[x_{P(1)},..,x_{P(n)}]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_permute
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_permute(f, [1, 0, 2], 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_permute(f, [1, 2, 0], 2, ZZ)
[[[1], []], [[2, 0], []]]
"""
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
new_exp = [0]*len(exp)
for e, p in zip(exp, P):
new_exp[p] = e
H[tuple(new_exp)] = coeff
return dmp_from_dict(H, u, K)
def dmp_nest(f, l, K):
"""
Return a multivariate value nested ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nest
>>> dmp_nest([[ZZ(1)]], 2, ZZ)
[[[[1]]]]
"""
if not isinstance(f, list):
return dmp_ground(f, l)
for i in range(l):
f = [f]
return f
def dmp_raise(f, l, u, K):
"""
Return a multivariate polynomial raised ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_raise
>>> f = ZZ.map([[], [1, 2]])
>>> dmp_raise(f, 2, 1, ZZ)
[[[[]]], [[[1]], [[2]]]]
"""
if not l:
return f
if not u:
if not f:
return dmp_zero(l)
k = l - 1
return [ dmp_ground(c, k) for c in f ]
v = u - 1
return [ dmp_raise(c, l, v, K) for c in f ]
def dup_deflate(f, K):
"""
Map ``x**m`` to ``y`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_deflate
>>> f = ZZ.map([1, 0, 0, 1, 0, 0, 1])
>>> dup_deflate(f, ZZ)
(3, [1, 1, 1])
"""
if dup_degree(f) <= 0:
return 1, f
g = 0
for i in range(len(f)):
if not f[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, f
return g, f[::g]
def dmp_deflate(f, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> dmp_deflate(f, 1, ZZ)
((2, 3), [[1, 2], [3, 4]])
"""
if dmp_zero_p(f, u):
return (1,)*(u + 1), f
F = dmp_to_dict(f, u)
B = [0]*(u + 1)
for M in F.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, f
H = {}
for A, coeff in F.items():
N = [ a // b for a, b in zip(A, B) ]
H[tuple(N)] = coeff
return B, dmp_from_dict(H, u, K)
def dup_multi_deflate(polys, K):
"""
Map ``x**m`` to ``y`` in a set of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_multi_deflate
>>> f = ZZ.map([1, 0, 2, 0, 3])
>>> g = ZZ.map([4, 0, 0])
>>> dup_multi_deflate((f, g), ZZ)
(2, ([1, 2, 3], [4, 0]))
"""
G = 0
for p in polys:
if dup_degree(p) <= 0:
return 1, polys
g = 0
for i in range(len(p)):
if not p[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, polys
G = igcd(G, g)
return G, tuple([ p[::G] for p in polys ])
def dmp_multi_deflate(polys, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a set of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_multi_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> g = ZZ.map([[1, 0, 2], [], [3, 0, 4]])
>>> dmp_multi_deflate((f, g), 1, ZZ)
((2, 1), ([[1, 0, 0, 2], [3, 0, 0, 4]], [[1, 0, 2], [3, 0, 4]]))
"""
if not u:
M, H = dup_multi_deflate(polys, K)
return (M,), H
F, B = [], [0]*(u + 1)
for p in polys:
f = dmp_to_dict(p, u)
if not dmp_zero_p(p, u):
for M in f.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
F.append(f)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, polys
H = []
for f in F:
h = {}
for A, coeff in f.items():
N = [ a // b for a, b in zip(A, B) ]
h[tuple(N)] = coeff
H.append(dmp_from_dict(h, u, K))
return B, tuple(H)
def dup_inflate(f, m, K):
"""
Map ``y`` to ``x**m`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_inflate
>>> f = ZZ.map([1, 1, 1])
>>> dup_inflate(f, 3, ZZ)
[1, 0, 0, 1, 0, 0, 1]
"""
if m <= 0:
raise IndexError("'m' must be positive, got %s" % m)
if m == 1 or not f:
return f
result = [f[0]]
for coeff in f[1:]:
result.extend([K.zero]*(m - 1))
result.append(coeff)
return result
def _rec_inflate(g, M, v, i, K):
"""Recursive helper for :func:`dmp_inflate`."""
if not v:
return dup_inflate(g, M[i], K)
if M[i] <= 0:
raise IndexError("all M[i] must be positive, got %s" % M[i])
w, j = v - 1, i + 1
g = [ _rec_inflate(c, M, w, j, K) for c in g ]
result = [g[0]]
for coeff in g[1:]:
for _ in range(1, M[i]):
result.append(dmp_zero(w))
result.append(coeff)
return result
def dmp_inflate(f, M, u, K):
"""
Map ``y_i`` to ``x_i**k_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inflate
>>> f = ZZ.map([[1, 2], [3, 4]])
>>> dmp_inflate(f, (2, 3), 1, ZZ)
[[1, 0, 0, 2], [], [3, 0, 0, 4]]
"""
if not u:
return dup_inflate(f, M[0], K)
if all(m == 1 for m in M):
return f
else:
return _rec_inflate(f, M, u, 0, K)
def dmp_exclude(f, u, K):
"""
Exclude useless levels from ``f``.
Return the levels excluded, the new excluded ``f``, and the new ``u``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_exclude
>>> f = ZZ.map([[[1]], [[1], [2]]])
>>> dmp_exclude(f, 2, ZZ)
([2], [[1], [1, 2]], 1)
"""
if not u or dmp_ground_p(f, None, u):
return [], f, u
J, F = [], dmp_to_dict(f, u)
for j in range(0, u + 1):
for monom in F.keys():
if monom[j]:
break
else:
J.append(j)
if not J:
return [], f, u
f = {}
for monom, coeff in F.items():
monom = list(monom)
for j in reversed(J):
del monom[j]
f[tuple(monom)] = coeff
u -= len(J)
return J, dmp_from_dict(f, u, K), u
def dmp_include(f, J, u, K):
"""
Include useless levels in ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_include
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_include(f, [2], 1, ZZ)
[[[1]], [[1], [2]]]
"""
if not J:
return f
F, f = dmp_to_dict(f, u), {}
for monom, coeff in F.items():
monom = list(monom)
for j in J:
monom.insert(j, 0)
f[tuple(monom)] = coeff
u += len(J)
return dmp_from_dict(f, u, K)
def dmp_inject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X][Y]`` to ``K[X,Y]``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inject
>>> R, x,y = ring("x,y", ZZ)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain())
([[[1]], [[1], [2]]], 2)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain(), front=True)
([[[1]], [[1, 2]]], 2)
"""
f, h = dmp_to_dict(f, u), {}
v = K.ngens - 1
for f_monom, g in f.items():
g = g.to_dict()
for g_monom, c in g.items():
if front:
h[g_monom + f_monom] = c
else:
h[f_monom + g_monom] = c
w = u + v + 1
return dmp_from_dict(h, w, K.dom), w
def dmp_eject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X,Y]`` to ``K[X][Y]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_eject
>>> dmp_eject([[[1]], [[1], [2]]], 2, ZZ['x', 'y'])
[1, x + 2]
"""
f, h = dmp_to_dict(f, u), {}
n = K.ngens
v = u - K.ngens + 1
for monom, c in f.items():
if front:
g_monom, f_monom = monom[:n], monom[n:]
else:
g_monom, f_monom = monom[-n:], monom[:-n]
if f_monom in h:
h[f_monom][g_monom] = c
else:
h[f_monom] = {g_monom: c}
for monom, c in h.items():
h[monom] = K(c)
return dmp_from_dict(h, v - 1, K)
def dup_terms_gcd(f, K):
"""
Remove GCD of terms from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_terms_gcd
>>> f = ZZ.map([1, 0, 1, 0, 0])
>>> dup_terms_gcd(f, ZZ)
(2, [1, 0, 1])
"""
if dup_TC(f, K) or not f:
return 0, f
i = 0
for c in reversed(f):
if not c:
i += 1
else:
break
return i, f[:-i]
def dmp_terms_gcd(f, u, K):
"""
Remove GCD of terms from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_terms_gcd
>>> f = ZZ.map([[1, 0], [1, 0, 0], [], []])
>>> dmp_terms_gcd(f, 1, ZZ)
((2, 1), [[1], [1, 0]])
"""
if dmp_ground_TC(f, u, K) or dmp_zero_p(f, u):
return (0,)*(u + 1), f
F = dmp_to_dict(f, u)
G = monomial_min(*list(F.keys()))
if all(g == 0 for g in G):
return G, f
f = {}
for monom, coeff in F.items():
f[monomial_div(monom, G)] = coeff
return G, dmp_from_dict(f, u, K)
def _rec_list_terms(g, v, monom):
"""Recursive helper for :func:`dmp_list_terms`."""
d, terms = dmp_degree(g, v), []
if not v:
for i, c in enumerate(g):
if not c:
continue
terms.append((monom + (d - i,), c))
else:
w = v - 1
for i, c in enumerate(g):
terms.extend(_rec_list_terms(c, w, monom + (d - i,)))
return terms
def dmp_list_terms(f, u, K, order=None):
"""
List all non-zero terms from ``f`` in the given order ``order``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_list_terms
>>> f = ZZ.map([[1, 1], [2, 3]])
>>> dmp_list_terms(f, 1, ZZ)
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
>>> dmp_list_terms(f, 1, ZZ, order='grevlex')
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
"""
def sort(terms, O):
return sorted(terms, key=lambda term: O(term[0]), reverse=True)
terms = _rec_list_terms(f, u, ())
if not terms:
return [((0,)*(u + 1), K.zero)]
if order is None:
return terms
else:
return sort(terms, monomial_key(order))
def dup_apply_pairs(f, g, h, args, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dup_apply_pairs([1, 2, 3], [3, 2, 1], h, (1,), ZZ)
[4, 5, 6]
"""
n, m = len(f), len(g)
if n != m:
if n > m:
g = [K.zero]*(n - m) + g
else:
f = [K.zero]*(m - n) + f
result = []
for a, b in zip(f, g):
result.append(h(a, b, *args))
return dup_strip(result)
def dmp_apply_pairs(f, g, h, args, u, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dmp_apply_pairs([[1], [2, 3]], [[3], [2, 1]], h, (1,), 1, ZZ)
[[4], [5, 6]]
"""
if not u:
return dup_apply_pairs(f, g, h, args, K)
n, m, v = len(f), len(g), u - 1
if n != m:
if n > m:
g = dmp_zeros(n - m, v, K) + g
else:
f = dmp_zeros(m - n, v, K) + f
result = []
for a, b in zip(f, g):
result.append(dmp_apply_pairs(a, b, h, args, v, K))
return dmp_strip(result, u)
def dup_slice(f, m, n, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[x]``. """
k = len(f)
if k >= m:
M = k - m
else:
M = 0
if k >= n:
N = k - n
else:
N = 0
f = f[N:M]
if not f:
return []
else:
return f + [K.zero]*m
def dmp_slice(f, m, n, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[X]``. """
return dmp_slice_in(f, m, n, 0, u, K)
def dmp_slice_in(f, m, n, j, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``x_j`` in ``K[X]``. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not u:
return dup_slice(f, m, n, K)
f, g = dmp_to_dict(f, u), {}
for monom, coeff in f.items():
k = monom[j]
if k < m or k >= n:
monom = monom[:j] + (0,) + monom[j + 1:]
if monom in g:
g[monom] += coeff
else:
g[monom] = coeff
return dmp_from_dict(g, u, K)
def dup_random(n, a, b, K):
"""
Return a polynomial of degree ``n`` with coefficients in ``[a, b]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_random
>>> dup_random(3, -10, 10, ZZ) #doctest: +SKIP
[-2, -8, 9, -4]
"""
f = [ K.convert(random.randint(a, b)) for _ in range(0, n + 1) ]
while not f[0]:
f[0] = K.convert(random.randint(a, b))
return f
|
2a6e08e44f8208533900c2d82a82798e9ba83908e6eeb291e8a9010a20c637fd | """Algorithms for computing symbolic roots of polynomials. """
from __future__ import print_function, division
import math
from sympy.core import S, I, pi
from sympy.core.compatibility import ordered, reduce
from sympy.core.exprtools import factor_terms
from sympy.core.function import _mexpand
from sympy.core.logic import fuzzy_not
from sympy.core.mul import expand_2arg, Mul
from sympy.core.numbers import Rational, igcd, comp
from sympy.core.power import Pow
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy, Symbol, symbols
from sympy.core.sympify import sympify
from sympy.functions import exp, sqrt, im, cos, acos, Piecewise
from sympy.functions.elementary.miscellaneous import root
from sympy.ntheory import divisors, isprime, nextprime
from sympy.polys.polyerrors import (PolynomialError, GeneratorsNeeded,
DomainError)
from sympy.polys.polyquinticconst import PolyQuintic
from sympy.polys.polytools import Poly, cancel, factor, gcd_list, discriminant
from sympy.polys.rationaltools import together
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.simplify import simplify, powsimp
from sympy.utilities import public
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.nth(0)/f.nth(1)
dom = f.get_domain()
if not dom.is_Numerical:
if dom.is_Composite:
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
a, b, c = f.all_coeffs()
dom = f.get_domain()
def _sqrt(d):
# remove squares from square root since both will be represented
# in the results; a similar thing is happening in roots() but
# must be duplicated here because not all quadratics are binomials
co = []
other = []
for di in Mul.make_args(d):
if di.is_Pow and di.exp.is_Integer and di.exp % 2 == 0:
co.append(Pow(di.base, di.exp//2))
else:
other.append(di)
if co:
d = Mul(*other)
co = Mul(*co)
return co*sqrt(d)
return sqrt(d)
def _simplify(expr):
if dom.is_Composite:
return factor(expr)
else:
return simplify(expr)
if c is S.Zero:
r0, r1 = S.Zero, -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif r1.is_negative:
r0, r1 = r1, r0
elif b is S.Zero:
r = -c/a
if not dom.is_Numerical:
r = _simplify(r)
R = _sqrt(r)
r0 = -R
r1 = R
else:
d = b**2 - 4*a*c
A = 2*a
B = -b/A
if not dom.is_Numerical:
d = _simplify(d)
B = _simplify(B)
D = factor_terms(_sqrt(d)/A)
r0 = B - D
r1 = B + D
if a.is_negative:
r0, r1 = r1, r0
elif not dom.is_Numerical:
r0, r1 = [expand_2arg(i) for i in (r0, r1)]
return [r0, r1]
def roots_cubic(f, trig=False):
"""Returns a list of roots of a cubic polynomial.
References
==========
[1] https://en.wikipedia.org/wiki/Cubic_function, General formula for roots,
(accessed November 17, 2014).
"""
if trig:
a, b, c, d = f.all_coeffs()
p = (3*a*c - b**2)/3/a**2
q = (2*b**3 - 9*a*b*c + 27*a**2*d)/(27*a**3)
D = 18*a*b*c*d - 4*b**3*d + b**2*c**2 - 4*a*c**3 - 27*a**2*d**2
if (D > 0) == True:
rv = []
for k in range(3):
rv.append(2*sqrt(-p/3)*cos(acos(q/p*sqrt(-3/p)*Rational(3, 2))/3 - k*pi*Rational(2, 3)))
return [i - b/3/a for i in rv]
_, a, b, c = f.monic().all_coeffs()
if c is S.Zero:
x1, x2 = roots([1, a, b], multiple=True)
return [x1, S.Zero, x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
u1 = None
if p is S.Zero:
if q is S.Zero:
return [-aon3]*3
if q.is_real:
if q.is_positive:
u1 = -root(q, 3)
elif q.is_negative:
u1 = root(-q, 3)
elif q is S.Zero:
y1, y2 = roots([1, 0, p], multiple=True)
return [tmp - aon3 for tmp in [y1, S.Zero, y2]]
elif q.is_real and q.is_negative:
u1 = -root(-q/2 + sqrt(q**2/4 + pon3**3), 3)
coeff = I*sqrt(3)/2
if u1 is None:
u1 = S.One
u2 = Rational(-1, 2) + coeff
u3 = Rational(-1, 2) - coeff
a, b, c, d = S(1), a, b, c
D0 = b**2 - 3*a*c
D1 = 2*b**3 - 9*a*b*c + 27*a**2*d
C = root((D1 + sqrt(D1**2 - 4*D0**3))/2, 3)
return [-(b + uk*C + D0/C/uk)/3/a for uk in [u1, u2, u3]]
u2 = u1*(Rational(-1, 2) + coeff)
u3 = u1*(Rational(-1, 2) - coeff)
if p is S.Zero:
return [u1 - aon3, u2 - aon3, u3 - aon3]
soln = [
-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3
]
return soln
def _roots_quartic_euler(p, q, r, a):
"""
Descartes-Euler solution of the quartic equation
Parameters
==========
p, q, r: coefficients of ``x**4 + p*x**2 + q*x + r``
a: shift of the roots
Notes
=====
This is a helper function for ``roots_quartic``.
Look for solutions of the form ::
``x1 = sqrt(R) - sqrt(A + B*sqrt(R))``
``x2 = -sqrt(R) - sqrt(A - B*sqrt(R))``
``x3 = -sqrt(R) + sqrt(A - B*sqrt(R))``
``x4 = sqrt(R) + sqrt(A + B*sqrt(R))``
To satisfy the quartic equation one must have
``p = -2*(R + A); q = -4*B*R; r = (R - A)**2 - B**2*R``
so that ``R`` must satisfy the Descartes-Euler resolvent equation
``64*R**3 + 32*p*R**2 + (4*p**2 - 16*r)*R - q**2 = 0``
If the resolvent does not have a rational solution, return None;
in that case it is likely that the Ferrari method gives a simpler
solution.
Examples
========
>>> from sympy import S
>>> from sympy.polys.polyroots import _roots_quartic_euler
>>> p, q, r = -S(64)/5, -S(512)/125, -S(1024)/3125
>>> _roots_quartic_euler(p, q, r, S(0))[0]
-sqrt(32*sqrt(5)/125 + 16/5) + 4*sqrt(5)/5
"""
# solve the resolvent equation
x = Dummy('x')
eq = 64*x**3 + 32*p*x**2 + (4*p**2 - 16*r)*x - q**2
xsols = list(roots(Poly(eq, x), cubics=False).keys())
xsols = [sol for sol in xsols if sol.is_rational and sol.is_nonzero]
if not xsols:
return None
R = max(xsols)
c1 = sqrt(R)
B = -q*c1/(4*R)
A = -R - p/2
c2 = sqrt(A + B)
c3 = sqrt(A - B)
return [c1 - c2 - a, -c1 - c3 - a, -c1 + c3 - a, c1 + c2 - a]
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-5].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [6] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although no general solution that is always applicable for all
coefficients is known to this reviewer, certain conditions are tested
to determine the simplest 4 expressions that can be returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
Examples
========
>>> from sympy import Poly, symbols, I
>>> from sympy.polys.polyroots import roots_quartic
>>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20'))
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(n=2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
References
==========
1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html
2. https://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html
4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
5. http://www.albmath.org/files/Math_5713.pdf
6. http://www.statemaster.com/encyclopedia/Quartic-equation
7. eqworld.ipmnet.ru/en/solutions/ae/ae0108.pdf
"""
_, a, b, c, d = f.monic().all_coeffs()
if not d:
return [S.Zero] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = Poly(x**2 + a*x + b - 2*m, x)
z1, z2 = roots_quadratic(g)
h1 = Poly(x**2 - z1*x + m, x)
h2 = Poly(x**2 - z2*x + m, x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = _mexpand(c + a*(a2/8 - b/2))
g = _mexpand(d - a*(a*(3*a2/256 - b/16) + c/4))
aon4 = a/4
if f is S.Zero:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple=True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g is S.Zero:
y = [S.Zero] + roots([1, 0, e, f], multiple=True)
return [tmp - aon4 for tmp in y]
else:
# Descartes-Euler method, see [7]
sols = _roots_quartic_euler(e, f, g, aon4)
if sols:
return sols
# Ferrari method, see [1, 2]
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
def _ans(y):
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
ans = []
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
# p == 0 case
y1 = e*Rational(-5, 6) - q**TH
if p.is_zero:
return _ans(y1)
# if p != 0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3 - r, x)
y2 = e*Rational(-5, 6) + u - p/u/3
if fuzzy_not(p.is_zero):
return _ans(y2)
# sort it out once they know the values of the coefficients
return [Piecewise((a1, Eq(p, 0)), (a2, True))
for a1, a2 in zip(_ans(y1), _ans(y2))]
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
n = f.degree()
a, b = f.nth(n), f.nth(0)
base = -cancel(b/a)
alpha = root(base, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
# define some parameters that will allow us to order the roots.
# If the domain is ZZ this is guaranteed to return roots sorted
# with reals before non-real roots and non-real sorted according
# to real part and imaginary part, e.g. -1, 1, -1 + I, 2 - I
neg = base.is_negative
even = n % 2 == 0
if neg:
if even == True and (base + 1).is_positive:
big = True
else:
big = False
# get the indices in the right order so the computed
# roots will be sorted when the domain is ZZ
ks = []
imax = n//2
if even:
ks.append(imax)
imax -= 1
if not neg:
ks.append(0)
for i in range(imax, 0, -1):
if neg:
ks.extend([i, -i])
else:
ks.extend([-i, i])
if neg:
ks.append(0)
if big:
for i in range(0, len(ks), 2):
pair = ks[i: i + 2]
pair = list(reversed(pair))
# compute the roots
roots, d = [], 2*I*pi/n
for k in ks:
zeta = exp(k*d).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return roots
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
Examples
========
>>> from sympy.polys.polyroots import _inv_totient_estimate
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = int(math.ceil(m*(float(a)/b)))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = int(math.ceil(m*(float(P)/b)))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials. """
L, U = _inv_totient_estimate(f.degree())
for n in range(L, U + 1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f.expr == g.expr:
break
else: # pragma: no cover
raise RuntimeError("failed to find index of a cyclotomic polynomial")
roots = []
if not factor:
# get the indices in the right order so the computed
# roots will be sorted
h = n//2
ks = [i for i in range(1, n + 1) if igcd(i, n) == 1]
ks.sort(key=lambda x: (x, -1) if x <= h else (abs(x - n), 1))
d = 2*I*pi/n
for k in reversed(ks):
roots.append(exp(k*d).expand(complex=True))
else:
g = Poly(f, extension=root(-1, n))
for h, _ in ordered(g.factor_list()[1]):
roots.append(-h.TC())
return roots
def roots_quintic(f):
"""
Calculate exact roots of a solvable quintic
"""
result = []
coeff_5, coeff_4, p, q, r, s = f.all_coeffs()
# Eqn must be of the form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
return result
if coeff_5 != 1:
l = [p/coeff_5, q/coeff_5, r/coeff_5, s/coeff_5]
if not all(coeff.is_Rational for coeff in l):
return result
f = Poly(f/coeff_5)
quintic = PolyQuintic(f)
# Eqn standardized. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
for _factor in f20.factor_list()[1]:
if _factor[0].is_linear:
theta = _factor[0].root(0)
break
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = S(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
l1 = _quintic_simplify((-alpha + sqrt(disc)) / S(2))
l4 = _quintic_simplify((-alpha - sqrt(disc)) / S(2))
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / S(2))
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / S(2))
order = quintic.order(theta, d)
test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )
# Comparing floats
if not comp(test, 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
sol = Symbol('sol')
# Simplifying improves performance a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# Solve imported here. Causing problems if imported as 'solve'
# and hence the changed name
from sympy.solvers.solvers import solve as _solve
a, b = symbols('a b', cls=Dummy)
_sol = _solve( sol**5 - a - I*b, sol)
for i in range(5):
_sol[i] = factor(_sol[i])
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, currentroot in enumerate(_sol):
Res[1][i] = _quintic_simplify(currentroot.subs({ a: R1[0], b: R1[1] }))
Res[2][i] = _quintic_simplify(currentroot.subs({ a: R2[0], b: R2[1] }))
Res[3][i] = _quintic_simplify(currentroot.subs({ a: R3[0], b: R3[1] }))
Res[4][i] = _quintic_simplify(currentroot.subs({ a: R4[0], b: R4[1] }))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].n()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5):
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).n()
testminus = (u - v*delta*sqrt(5)).n()
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.n()
r2 = r3 = None
for i in range(5):
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if (comp((r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus).n(), 0, tol) and
comp((r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus).n(), 0, tol)):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2:
break
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
result = [x1, x2, x3, x4, x5]
# Now check if solutions are distinct
saw = set()
for r in result:
r = r.n(2)
if r in saw:
# Roots were identical. Abort, return []
# and fall back to usual solve
return []
saw.add(r)
return result
def _quintic_simplify(expr):
expr = powsimp(expr)
expr = cancel(expr)
return together(expr)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers.
Returns the integer ``div`` such that substituting ``x = div*y``
``p(x) = m*q(y)`` where the coefficients of ``q`` are smaller
than those of ``p``.
For example ``x**5 + 512*x + 1024 = 0``
with ``div = 4`` becomes ``y**5 + 2*y + 1 = 0``
Returns the integer ``div`` or ``None`` if there is no possible scaling.
Examples
========
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> from sympy.polys.polyroots import _integer_basis
>>> p = Poly(x**5 + 512*x + 1024, x, domain='ZZ')
>>> _integer_basis(p)
4
"""
monoms, coeffs = list(zip(*poly.terms()))
monoms, = list(zip(*monoms))
coeffs = list(map(abs, coeffs))
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
n = monoms[0]
monoms = [n - i for i in reversed(monoms)]
else:
return None
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(gcd_list(coeffs))[1:])
try:
div = next(divs)
except StopIteration:
return None
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = next(divs)
except StopIteration:
return None
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``. """
coeff = S.One
poly_func = poly.func
try:
_, poly = poly.clear_denoms(convert=True)
except DomainError:
return coeff, poly
poly = poly.primitive()[1]
poly = poly.retract()
# TODO: This is fragile. Figure out how to make this independent of construct_domain().
if poly.get_domain().is_Poly and all(c.is_term for c in poly.rep.coeffs()):
poly = poly.inject()
strips = list(zip(*poly.monoms()))
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.get_domain().is_ZZ:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n - k[0])
poly = poly.termwise(func)
coeff *= basis
if not isinstance(poly, poly_func):
poly = poly_func(poly)
return coeff, poly
@public
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively. If cubic
roots are real but are expressed in terms of complex numbers
(casus irreducibilis [1]) the ``trig`` flag can be set to True to
have the solutions returned in terms of cosine and inverse cosine
functions.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a list containing all
those roots set the ``multiple`` flag to True; the list will
have identical roots appearing next to each other in the result.
(For a given Poly, the all_roots method will give the roots in
sorted numerical order.)
Examples
========
>>> from sympy import Poly, roots
>>> from sympy.abc import x, y
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = Poly(x**2-1, x)
>>> roots(p)
{-1: 1, 1: 1}
>>> p = Poly(x**2-y, x, y)
>>> roots(Poly(p, x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
References
==========
.. [1] https://en.wikipedia.org/wiki/Cubic_function#Trigonometric_.28and_hyperbolic.29_method
"""
from sympy.polys.polytools import to_rational_coeffs
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
trig = flags.pop('trig', False)
quartics = flags.pop('quartics', True)
quintics = flags.pop('quintics', False)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f) - 1
for coeff in f:
poly[i], i = sympify(coeff), i - 1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
if f.length == 2 and f.degree() != 1:
# check for foo**n factors in the constant
n = f.degree()
npow_bases = []
others = []
expr = f.as_expr()
con = expr.as_independent(*gens)[0]
for p in Mul.make_args(con):
if p.is_Pow and not p.exp % n:
npow_bases.append(p.base**(p.exp/n))
else:
others.append(p)
if npow_bases:
b = Mul(*npow_bases)
B = Dummy()
d = roots(Poly(expr - con + B**n*Mul(*others), *gens,
**flags), *gens, **flags)
rv = {}
for k, v in d.items():
rv[k.subs(B, b)] = v
return rv
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, currentroot, k):
if currentroot in result:
result[currentroot] += k
else:
result[currentroot] = k
def _try_decompose(f):
"""Find roots using functional decomposition. """
factors, roots = f.decompose(), []
for currentroot in _try_heuristics(factors[0]):
roots.append(currentroot)
for currentfactor in factors[1:]:
previous, roots = list(roots), []
for currentroot in previous:
g = currentfactor - Poly(currentroot, f.gen)
for currentroot in _try_heuristics(g):
roots.append(currentroot)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks. """
if f.is_ground:
return []
if f.is_monomial:
return [S.Zero]*f.degree()
if f.length() == 2:
if f.degree() == 1:
return list(map(cancel, roots_linear(f)))
else:
return roots_binomial(f)
result = []
for i in [-1, 1]:
if not f.eval(i):
f = f.quo(Poly(f.gen - i, f.gen))
result.append(i)
break
n = f.degree()
if n == 1:
result += list(map(cancel, roots_linear(f)))
elif n == 2:
result += list(map(cancel, roots_quadratic(f)))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f, trig=trig)
elif n == 4 and quartics:
result += roots_quartic(f)
elif n == 5 and quintics:
result += roots_quintic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {S.Zero: k}
coeff, f = preprocess_roots(f)
if auto and f.get_domain().is_Ring:
f = f.to_field()
rescale_x = None
translate_x = None
result = {}
if not f.is_ground:
dom = f.get_domain()
if not dom.is_Exact and dom.is_Numerical:
for r in f.nroots():
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.length() == 2:
roots_fun = roots_quadratic if f.degree() == 2 else roots_binomial
for r in roots_fun(f):
_update_dict(result, r, 1)
else:
_, factors = Poly(f.as_expr()).factor_list()
if len(factors) == 1 and f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
else:
if len(factors) == 1 and factors[0][1] == 1:
if f.get_domain().is_EX:
res = to_rational_coeffs(f)
if res:
if res[0] is None:
translate_x, f = res[2:]
else:
rescale_x, f = res[1], res[-1]
result = roots(f)
if not result:
for currentroot in _try_decompose(f):
_update_dict(result, currentroot, 1)
else:
for r in _try_heuristics(f):
_update_dict(result, r, 1)
else:
for currentroot in _try_decompose(f):
_update_dict(result, currentroot, 1)
else:
for currentfactor, k in factors:
for r in _try_heuristics(Poly(currentfactor, f.gen, field=True)):
_update_dict(result, r, k)
if coeff is not S.One:
_result, result, = result, {}
for currentroot, k in _result.items():
result[coeff*currentroot] = k
if filter not in [None, 'C']:
handlers = {
'Z': lambda r: r.is_Integer,
'Q': lambda r: r.is_Rational,
'R': lambda r: all(a.is_real for a in r.as_numer_denom()),
'I': lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError("Invalid filter: %s" % filter)
for zero in dict(result).keys():
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result).keys():
if not predicate(zero):
del result[zero]
if rescale_x:
result1 = {}
for k, v in result.items():
result1[k*rescale_x] = v
result = result1
if translate_x:
result1 = {}
for k, v in result.items():
result1[k + translate_x] = v
result = result1
# adding zero roots after non-trivial roots have been translated
result.update(zeros)
if not multiple:
return result
else:
zeros = []
for zero in ordered(result):
zeros.extend([zero]*result[zero])
return zeros
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import root_factors
>>> root_factors(x**2 - y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = Poly(f, *gens, **args)
if not F.is_Poly:
return [f]
if F.is_multivariate:
raise ValueError('multivariate polynomials are not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in ordered(zeros.items()):
factors, N = factors + [Poly(x - r, x)]*n, N + n
if N < F.degree():
G = reduce(lambda p, q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
factors = [ f.as_expr() for f in factors ]
return factors
|
57f46b674d15cdff43242633938de19ef635fe683ea27b95c6d8f91483109165 | """Arithmetics for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from __future__ import print_function, division
from sympy.polys.densebasic import (
dup_slice,
dup_LC, dmp_LC,
dup_degree, dmp_degree,
dup_strip, dmp_strip,
dmp_zero_p, dmp_zero,
dmp_one_p, dmp_one,
dmp_ground, dmp_zeros)
from sympy.polys.polyerrors import (ExactQuotientFailed, PolynomialDivisionFailed)
def dup_add_term(f, c, i, K):
"""
Add ``c*x**i`` to ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add_term(x**2 - 1, ZZ(2), 4)
2*x**4 + x**2 - 1
"""
if not c:
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dup_strip([f[0] + c] + f[1:])
else:
if i >= n:
return [c] + [K.zero]*(i - n) + f
else:
return f[:m] + [f[m] + c] + f[m + 1:]
def dmp_add_term(f, c, i, u, K):
"""
Add ``c(x_2..x_u)*x_0**i`` to ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add_term(x*y + 1, 2, 2)
2*x**2 + x*y + 1
"""
if not u:
return dup_add_term(f, c, i, K)
v = u - 1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dmp_strip([dmp_add(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [c] + dmp_zeros(i - n, v, K) + f
else:
return f[:m] + [dmp_add(f[m], c, v, K)] + f[m + 1:]
def dup_sub_term(f, c, i, K):
"""
Subtract ``c*x**i`` from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub_term(2*x**4 + x**2 - 1, ZZ(2), 4)
x**2 - 1
"""
if not c:
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dup_strip([f[0] - c] + f[1:])
else:
if i >= n:
return [-c] + [K.zero]*(i - n) + f
else:
return f[:m] + [f[m] - c] + f[m + 1:]
def dmp_sub_term(f, c, i, u, K):
"""
Subtract ``c(x_2..x_u)*x_0**i`` from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub_term(2*x**2 + x*y + 1, 2, 2)
x*y + 1
"""
if not u:
return dup_add_term(f, -c, i, K)
v = u - 1
if dmp_zero_p(c, v):
return f
n = len(f)
m = n - i - 1
if i == n - 1:
return dmp_strip([dmp_sub(f[0], c, v, K)] + f[1:], u)
else:
if i >= n:
return [dmp_neg(c, v, K)] + dmp_zeros(i - n, v, K) + f
else:
return f[:m] + [dmp_sub(f[m], c, v, K)] + f[m + 1:]
def dup_mul_term(f, c, i, K):
"""
Multiply ``f`` by ``c*x**i`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mul_term(x**2 - 1, ZZ(3), 2)
3*x**4 - 3*x**2
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ] + [K.zero]*i
def dmp_mul_term(f, c, i, u, K):
"""
Multiply ``f`` by ``c(x_2..x_u)*x_0**i`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_mul_term(x**2*y + x, 3*y, 2)
3*x**4*y**2 + 3*x**3*y
"""
if not u:
return dup_mul_term(f, c, i, K)
v = u - 1
if dmp_zero_p(f, u):
return f
if dmp_zero_p(c, v):
return dmp_zero(u)
else:
return [ dmp_mul(cf, c, v, K) for cf in f ] + dmp_zeros(i, v, K)
def dup_add_ground(f, c, K):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x + 8
"""
return dup_add_term(f, c, 0, K)
def dmp_add_ground(f, c, u, K):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x + 8
"""
return dmp_add_term(f, dmp_ground(c, u - 1), 0, u, K)
def dup_sub_ground(f, c, K):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x
"""
return dup_sub_term(f, c, 0, K)
def dmp_sub_ground(f, c, u, K):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub_ground(x**3 + 2*x**2 + 3*x + 4, ZZ(4))
x**3 + 2*x**2 + 3*x
"""
return dmp_sub_term(f, dmp_ground(c, u - 1), 0, u, K)
def dup_mul_ground(f, c, K):
"""
Multiply ``f`` by a constant value in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mul_ground(x**2 + 2*x - 1, ZZ(3))
3*x**2 + 6*x - 3
"""
if not c or not f:
return []
else:
return [ cf * c for cf in f ]
def dmp_mul_ground(f, c, u, K):
"""
Multiply ``f`` by a constant value in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_mul_ground(2*x + 2*y, ZZ(3))
6*x + 6*y
"""
if not u:
return dup_mul_ground(f, c, K)
v = u - 1
return [ dmp_mul_ground(cf, c, v, K) for cf in f ]
def dup_quo_ground(f, c, K):
"""
Quotient by a constant in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_quo_ground(3*x**2 + 2, ZZ(2))
x**2 + 1
>>> R, x = ring("x", QQ)
>>> R.dup_quo_ground(3*x**2 + 2, QQ(2))
3/2*x**2 + 1
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
if K.is_Field:
return [ K.quo(cf, c) for cf in f ]
else:
return [ cf // c for cf in f ]
def dmp_quo_ground(f, c, u, K):
"""
Quotient by a constant in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_quo_ground(2*x**2*y + 3*x, ZZ(2))
x**2*y + x
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_quo_ground(2*x**2*y + 3*x, QQ(2))
x**2*y + 3/2*x
"""
if not u:
return dup_quo_ground(f, c, K)
v = u - 1
return [ dmp_quo_ground(cf, c, v, K) for cf in f ]
def dup_exquo_ground(f, c, K):
"""
Exact quotient by a constant in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_exquo_ground(x**2 + 2, QQ(2))
1/2*x**2 + 1
"""
if not c:
raise ZeroDivisionError('polynomial division')
if not f:
return f
return [ K.exquo(cf, c) for cf in f ]
def dmp_exquo_ground(f, c, u, K):
"""
Exact quotient by a constant in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_exquo_ground(x**2*y + 2*x, QQ(2))
1/2*x**2*y + x
"""
if not u:
return dup_exquo_ground(f, c, K)
v = u - 1
return [ dmp_exquo_ground(cf, c, v, K) for cf in f ]
def dup_lshift(f, n, K):
"""
Efficiently multiply ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_lshift(x**2 + 1, 2)
x**4 + x**2
"""
if not f:
return f
else:
return f + [K.zero]*n
def dup_rshift(f, n, K):
"""
Efficiently divide ``f`` by ``x**n`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rshift(x**4 + x**2, 2)
x**2 + 1
>>> R.dup_rshift(x**4 + x**2 + 2, 2)
x**2 + 1
"""
return f[:-n]
def dup_abs(f, K):
"""
Make all coefficients positive in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_abs(x**2 - 1)
x**2 + 1
"""
return [ K.abs(coeff) for coeff in f ]
def dmp_abs(f, u, K):
"""
Make all coefficients positive in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_abs(x**2*y - x)
x**2*y + x
"""
if not u:
return dup_abs(f, K)
v = u - 1
return [ dmp_abs(cf, v, K) for cf in f ]
def dup_neg(f, K):
"""
Negate a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_neg(x**2 - 1)
-x**2 + 1
"""
return [ -coeff for coeff in f ]
def dmp_neg(f, u, K):
"""
Negate a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_neg(x**2*y - x)
-x**2*y + x
"""
if not u:
return dup_neg(f, K)
v = u - 1
return [ dmp_neg(cf, v, K) for cf in f ]
def dup_add(f, g, K):
"""
Add dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add(x**2 - 1, x - 2)
x**2 + x - 3
"""
if not f:
return g
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a + b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ a + b for a, b in zip(f, g) ]
def dmp_add(f, g, u, K):
"""
Add dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add(x**2 + y, x**2*y + x)
x**2*y + x**2 + x + y
"""
if not u:
return dup_add(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return g
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u - 1
if df == dg:
return dmp_strip([ dmp_add(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ dmp_add(a, b, v, K) for a, b in zip(f, g) ]
def dup_sub(f, g, K):
"""
Subtract dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub(x**2 - 1, x - 2)
x**2 - x + 1
"""
if not f:
return dup_neg(g, K)
if not g:
return f
df = dup_degree(f)
dg = dup_degree(g)
if df == dg:
return dup_strip([ a - b for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dup_neg(g[:k], K), g[k:]
return h + [ a - b for a, b in zip(f, g) ]
def dmp_sub(f, g, u, K):
"""
Subtract dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub(x**2 + y, x**2*y + x)
-x**2*y + x**2 - x + y
"""
if not u:
return dup_sub(f, g, K)
df = dmp_degree(f, u)
if df < 0:
return dmp_neg(g, u, K)
dg = dmp_degree(g, u)
if dg < 0:
return f
v = u - 1
if df == dg:
return dmp_strip([ dmp_sub(a, b, v, K) for a, b in zip(f, g) ], u)
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = dmp_neg(g[:k], u, K), g[k:]
return h + [ dmp_sub(a, b, v, K) for a, b in zip(f, g) ]
def dup_add_mul(f, g, h, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_add_mul(x**2 - 1, x - 2, x + 2)
2*x**2 - 5
"""
return dup_add(f, dup_mul(g, h, K), K)
def dmp_add_mul(f, g, h, u, K):
"""
Returns ``f + g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_add_mul(x**2 + y, x, x + 2)
2*x**2 + 2*x + y
"""
return dmp_add(f, dmp_mul(g, h, u, K), u, K)
def dup_sub_mul(f, g, h, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sub_mul(x**2 - 1, x - 2, x + 2)
3
"""
return dup_sub(f, dup_mul(g, h, K), K)
def dmp_sub_mul(f, g, h, u, K):
"""
Returns ``f - g*h`` where ``f, g, h`` are in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sub_mul(x**2 + y, x, x + 2)
-2*x + y
"""
return dmp_sub(f, dmp_mul(g, h, u, K), u, K)
def dup_mul(f, g, K):
"""
Multiply dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mul(x - 2, x + 2)
x**2 - 4
"""
if f == g:
return dup_sqr(f, K)
if not (f and g):
return []
df = dup_degree(f)
dg = dup_degree(g)
n = max(df, dg) + 1
if n < 100:
h = []
for i in range(0, df + dg + 1):
coeff = K.zero
for j in range(max(0, i - dg), min(df, i) + 1):
coeff += f[j]*g[i - j]
h.append(coeff)
return dup_strip(h)
else:
# Use Karatsuba's algorithm (divide and conquer), see e.g.:
# Joris van der Hoeven, Relax But Don't Be Too Lazy,
# J. Symbolic Computation, 11 (2002), section 3.1.1.
n2 = n//2
fl, gl = dup_slice(f, 0, n2, K), dup_slice(g, 0, n2, K)
fh = dup_rshift(dup_slice(f, n2, n, K), n2, K)
gh = dup_rshift(dup_slice(g, n2, n, K), n2, K)
lo, hi = dup_mul(fl, gl, K), dup_mul(fh, gh, K)
mid = dup_mul(dup_add(fl, fh, K), dup_add(gl, gh, K), K)
mid = dup_sub(mid, dup_add(lo, hi, K), K)
return dup_add(dup_add(lo, dup_lshift(mid, n2, K), K),
dup_lshift(hi, 2*n2, K), K)
def dmp_mul(f, g, u, K):
"""
Multiply dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_mul(x*y + 1, x)
x**2*y + x
"""
if not u:
return dup_mul(f, g, K)
if f == g:
return dmp_sqr(f, u, K)
df = dmp_degree(f, u)
if df < 0:
return f
dg = dmp_degree(g, u)
if dg < 0:
return g
h, v = [], u - 1
for i in range(0, df + dg + 1):
coeff = dmp_zero(v)
for j in range(max(0, i - dg), min(df, i) + 1):
coeff = dmp_add(coeff, dmp_mul(f[j], g[i - j], v, K), v, K)
h.append(coeff)
return dmp_strip(h, u)
def dup_sqr(f, K):
"""
Square dense polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sqr(x**2 + 1)
x**4 + 2*x**2 + 1
"""
df, h = len(f) - 1, []
for i in range(0, 2*df + 1):
c = K.zero
jmin = max(0, i - df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in range(jmin, jmax + 1):
c += f[j]*f[i - j]
c += c
if n & 1:
elem = f[jmax + 1]
c += elem**2
h.append(c)
return dup_strip(h)
def dmp_sqr(f, u, K):
"""
Square dense polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_sqr(x**2 + x*y + y**2)
x**4 + 2*x**3*y + 3*x**2*y**2 + 2*x*y**3 + y**4
"""
if not u:
return dup_sqr(f, K)
df = dmp_degree(f, u)
if df < 0:
return f
h, v = [], u - 1
for i in range(0, 2*df + 1):
c = dmp_zero(v)
jmin = max(0, i - df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in range(jmin, jmax + 1):
c = dmp_add(c, dmp_mul(f[j], f[i - j], v, K), v, K)
c = dmp_mul_ground(c, K(2), v, K)
if n & 1:
elem = dmp_sqr(f[jmax + 1], v, K)
c = dmp_add(c, elem, v, K)
h.append(c)
return dmp_strip(h, u)
def dup_pow(f, n, K):
"""
Raise ``f`` to the ``n``-th power in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pow(x - 2, 3)
x**3 - 6*x**2 + 12*x - 8
"""
if not n:
return [K.one]
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or not f or f == [K.one]:
return f
g = [K.one]
while True:
n, m = n//2, n
if m % 2:
g = dup_mul(g, f, K)
if not n:
break
f = dup_sqr(f, K)
return g
def dmp_pow(f, n, u, K):
"""
Raise ``f`` to the ``n``-th power in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_pow(x*y + 1, 3)
x**3*y**3 + 3*x**2*y**2 + 3*x*y + 1
"""
if not u:
return dup_pow(f, n, K)
if not n:
return dmp_one(u, K)
if n < 0:
raise ValueError("can't raise polynomial to a negative power")
if n == 1 or dmp_zero_p(f, u) or dmp_one_p(f, u, K):
return f
g = dmp_one(u, K)
while True:
n, m = n//2, n
if m & 1:
g = dmp_mul(g, f, u, K)
if not n:
break
f = dmp_sqr(f, u, K)
return g
def dup_pdiv(f, g, K):
"""
Polynomial pseudo-division in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r, dr = [], f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
j, N = dr - dg, N - 1
Q = dup_mul_ground(q, lc_g, K)
q = dup_add_term(Q, lc_r, j, K)
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
c = lc_g**N
q = dup_mul_ground(q, c, K)
r = dup_mul_ground(r, c, K)
return q, r
def dup_prem(f, g, K):
"""
Polynomial pseudo-remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_prem(x**2 + 1, 2*x - 4)
20
"""
df = dup_degree(f)
dg = dup_degree(g)
r, dr = f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return r
N = df - dg + 1
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
j, N = dr - dg, N - 1
R = dup_mul_ground(r, lc_g, K)
G = dup_mul_term(g, lc_r, j, K)
r = dup_sub(R, G, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return dup_mul_ground(r, lc_g**N, K)
def dup_pquo(f, g, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> R.dup_pquo(x**2 + 1, 2*x - 4)
2*x + 4
"""
return dup_pdiv(f, g, K)[0]
def dup_pexquo(f, g, K):
"""
Polynomial pseudo-quotient in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> R.dup_pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_pdiv(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
def dmp_pdiv(f, g, u, K):
"""
Polynomial pseudo-division in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_pdiv(x**2 + x*y, 2*x + 2)
(2*x + 2*y - 2, -4*y + 4)
"""
if not u:
return dup_pdiv(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = dmp_zero(u), f, df
if df < dg:
return q, r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
lc_r = dmp_LC(r, K)
j, N = dr - dg, N - 1
Q = dmp_mul_term(q, lc_g, 0, u, K)
q = dmp_add_term(Q, lc_r, j, u, K)
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
c = dmp_pow(lc_g, N, u - 1, K)
q = dmp_mul_term(q, c, 0, u, K)
r = dmp_mul_term(r, c, 0, u, K)
return q, r
def dmp_prem(f, g, u, K):
"""
Polynomial pseudo-remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_prem(x**2 + x*y, 2*x + 2)
-4*y + 4
"""
if not u:
return dup_prem(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
r, dr = f, df
if df < dg:
return r
N = df - dg + 1
lc_g = dmp_LC(g, K)
while True:
lc_r = dmp_LC(r, K)
j, N = dr - dg, N - 1
R = dmp_mul_term(r, lc_g, 0, u, K)
G = dmp_mul_term(g, lc_r, j, u, K)
r = dmp_sub(R, G, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
c = dmp_pow(lc_g, N, u - 1, K)
return dmp_mul_term(r, c, 0, u, K)
def dmp_pquo(f, g, u, K):
"""
Polynomial exact pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2*y
>>> h = 2*x + 2
>>> R.dmp_pquo(f, g)
2*x
>>> R.dmp_pquo(f, h)
2*x + 2*y - 2
"""
return dmp_pdiv(f, g, u, K)[0]
def dmp_pexquo(f, g, u, K):
"""
Polynomial pseudo-quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2*y
>>> h = 2*x + 2
>>> R.dmp_pexquo(f, g)
2*x
>>> R.dmp_pexquo(f, h)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_pdiv(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
def dup_rr_div(f, g, K):
"""
Univariate division with remainder over a ring.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rr_div(x**2 + 1, 2*x - 4)
(0, x**2 + 1)
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r, dr = [], f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
if lc_r % lc_g:
break
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dmp_rr_div(f, g, u, K):
"""
Multivariate division with remainder over a ring.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_rr_div(x**2 + x*y, 2*x + 2)
(0, x**2 + x*y)
"""
if not u:
return dup_rr_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = dmp_zero(u), f, df
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u - 1
while True:
lc_r = dmp_LC(r, K)
c, R = dmp_rr_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dup_ff_div(f, g, K):
"""
Polynomial division with remainder over a field.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_ff_div(x**2 + 1, 2*x - 4)
(1/2*x + 1, 5)
"""
df = dup_degree(f)
dg = dup_degree(g)
q, r, dr = [], f, df
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return q, r
lc_g = dup_LC(g, K)
while True:
lc_r = dup_LC(r, K)
c = K.exquo(lc_r, lc_g)
j = dr - dg
q = dup_add_term(q, c, j, K)
h = dup_mul_term(g, c, j, K)
r = dup_sub(r, h, K)
_dr, dr = dr, dup_degree(r)
if dr < dg:
break
elif dr == _dr and not K.is_Exact:
# remove leading term created by rounding error
r = dup_strip(r[1:])
dr = dup_degree(r)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dmp_ff_div(f, g, u, K):
"""
Polynomial division with remainder over a field.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_ff_div(x**2 + x*y, 2*x + 2)
(1/2*x + 1/2*y - 1/2, -y + 1)
"""
if not u:
return dup_ff_div(f, g, K)
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = dmp_zero(u), f, df
if df < dg:
return q, r
lc_g, v = dmp_LC(g, K), u - 1
while True:
lc_r = dmp_LC(r, K)
c, R = dmp_ff_div(lc_r, lc_g, v, K)
if not dmp_zero_p(R, v):
break
j = dr - dg
q = dmp_add_term(q, c, j, u, K)
h = dmp_mul_term(g, c, j, u, K)
r = dmp_sub(r, h, u, K)
_dr, dr = dr, dmp_degree(r, u)
if dr < dg:
break
elif not (dr < _dr):
raise PolynomialDivisionFailed(f, g, K)
return q, r
def dup_div(f, g, K):
"""
Polynomial division with remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_div(x**2 + 1, 2*x - 4)
(0, x**2 + 1)
>>> R, x = ring("x", QQ)
>>> R.dup_div(x**2 + 1, 2*x - 4)
(1/2*x + 1, 5)
"""
if K.is_Field:
return dup_ff_div(f, g, K)
else:
return dup_rr_div(f, g, K)
def dup_rem(f, g, K):
"""
Returns polynomial remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_rem(x**2 + 1, 2*x - 4)
x**2 + 1
>>> R, x = ring("x", QQ)
>>> R.dup_rem(x**2 + 1, 2*x - 4)
5
"""
return dup_div(f, g, K)[1]
def dup_quo(f, g, K):
"""
Returns exact polynomial quotient in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_quo(x**2 + 1, 2*x - 4)
0
>>> R, x = ring("x", QQ)
>>> R.dup_quo(x**2 + 1, 2*x - 4)
1/2*x + 1
"""
return dup_div(f, g, K)[0]
def dup_exquo(f, g, K):
"""
Returns polynomial quotient in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_exquo(x**2 - 1, x - 1)
x + 1
>>> R.dup_exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: [2, -4] does not divide [1, 0, 1]
"""
q, r = dup_div(f, g, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
def dmp_div(f, g, u, K):
"""
Polynomial division with remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(0, x**2 + x*y)
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(1/2*x + 1/2*y - 1/2, -y + 1)
"""
if K.is_Field:
return dmp_ff_div(f, g, u, K)
else:
return dmp_rr_div(f, g, u, K)
def dmp_rem(f, g, u, K):
"""
Returns polynomial remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_rem(x**2 + x*y, 2*x + 2)
x**2 + x*y
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_rem(x**2 + x*y, 2*x + 2)
-y + 1
"""
return dmp_div(f, g, u, K)[1]
def dmp_quo(f, g, u, K):
"""
Returns exact polynomial quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_quo(x**2 + x*y, 2*x + 2)
0
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_quo(x**2 + x*y, 2*x + 2)
1/2*x + 1/2*y - 1/2
"""
return dmp_div(f, g, u, K)[0]
def dmp_exquo(f, g, u, K):
"""
Returns polynomial quotient in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = x + y
>>> h = 2*x + 2
>>> R.dmp_exquo(f, g)
x
>>> R.dmp_exquo(f, h)
Traceback (most recent call last):
...
ExactQuotientFailed: [[2], [2]] does not divide [[1], [1, 0], []]
"""
q, r = dmp_div(f, g, u, K)
if dmp_zero_p(r, u):
return q
else:
raise ExactQuotientFailed(f, g)
def dup_max_norm(f, K):
"""
Returns maximum norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_max_norm(-x**2 + 2*x - 3)
3
"""
if not f:
return K.zero
else:
return max(dup_abs(f, K))
def dmp_max_norm(f, u, K):
"""
Returns maximum norm of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_max_norm(2*x*y - x - 3)
3
"""
if not u:
return dup_max_norm(f, K)
v = u - 1
return max([ dmp_max_norm(c, v, K) for c in f ])
def dup_l1_norm(f, K):
"""
Returns l1 norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_l1_norm(2*x**3 - 3*x**2 + 1)
6
"""
if not f:
return K.zero
else:
return sum(dup_abs(f, K))
def dmp_l1_norm(f, u, K):
"""
Returns l1 norm of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_l1_norm(2*x*y - x - 3)
6
"""
if not u:
return dup_l1_norm(f, K)
v = u - 1
return sum([ dmp_l1_norm(c, v, K) for c in f ])
def dup_expand(polys, K):
"""
Multiply together several polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_expand([x**2 - 1, x, 2])
2*x**3 - 2*x
"""
if not polys:
return [K.one]
f = polys[0]
for g in polys[1:]:
f = dup_mul(f, g, K)
return f
def dmp_expand(polys, u, K):
"""
Multiply together several polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_expand([x**2 + y**2, x + 1])
x**3 + x**2 + x*y**2 + y**2
"""
if not polys:
return dmp_one(u, K)
f = polys[0]
for g in polys[1:]:
f = dmp_mul(f, g, u, K)
return f
|
ba48dc1290b99adbcc095143d1a25fe785d0abf05ebd4322667a8d378b701a49 | """Dense univariate polynomials with coefficients in Galois fields. """
from __future__ import print_function, division
from random import uniform
from math import ceil as _ceil, sqrt as _sqrt
from sympy.core.compatibility import SYMPY_INTS
from sympy.core.mul import prod
from sympy.ntheory import factorint
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.polys.polyutils import _sort_factors
def gf_crt(U, M, K=None):
"""
Chinese Remainder Theorem.
Given a set of integer residues ``u_0,...,u_n`` and a set of
co-prime integer moduli ``m_0,...,m_n``, returns an integer
``u``, such that ``u = u_i mod m_i`` for ``i = ``0,...,n``.
Examples
========
Consider a set of residues ``U = [49, 76, 65]``
and a set of moduli ``M = [99, 97, 95]``. Then we have::
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_crt
>>> from sympy.ntheory.modular import solve_congruence
>>> gf_crt([49, 76, 65], [99, 97, 95], ZZ)
639985
This is the correct result because::
>>> [639985 % m for m in [99, 97, 95]]
[49, 76, 65]
Note: this is a low-level routine with no error checking.
See Also
========
sympy.ntheory.modular.crt : a higher level crt routine
sympy.ntheory.modular.solve_congruence
"""
p = prod(M, start=K.one)
v = K.zero
for u, m in zip(U, M):
e = p // m
s, _, _ = K.gcdex(e, m)
v += e*(u*s % m)
return v % p
def gf_crt1(M, K):
"""
First part of the Chinese Remainder Theorem.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_crt1
>>> gf_crt1([99, 97, 95], ZZ)
(912285, [9215, 9405, 9603], [62, 24, 12])
"""
E, S = [], []
p = prod(M, start=K.one)
for m in M:
E.append(p // m)
S.append(K.gcdex(E[-1], m)[0] % m)
return p, E, S
def gf_crt2(U, M, p, E, S, K):
"""
Second part of the Chinese Remainder Theorem.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_crt2
>>> U = [49, 76, 65]
>>> M = [99, 97, 95]
>>> p = 912285
>>> E = [9215, 9405, 9603]
>>> S = [62, 24, 12]
>>> gf_crt2(U, M, p, E, S, ZZ)
639985
"""
v = K.zero
for u, m, e, s in zip(U, M, E, S):
v += e*(u*s % m)
return v % p
def gf_int(a, p):
"""
Coerce ``a mod p`` to an integer in the range ``[-p/2, p/2]``.
Examples
========
>>> from sympy.polys.galoistools import gf_int
>>> gf_int(2, 7)
2
>>> gf_int(5, 7)
-2
"""
if a <= p // 2:
return a
else:
return a - p
def gf_degree(f):
"""
Return the leading degree of ``f``.
Examples
========
>>> from sympy.polys.galoistools import gf_degree
>>> gf_degree([1, 1, 2, 0])
3
>>> gf_degree([])
-1
"""
return len(f) - 1
def gf_LC(f, K):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_LC
>>> gf_LC([3, 0, 1], ZZ)
3
"""
if not f:
return K.zero
else:
return f[0]
def gf_TC(f, K):
"""
Return the trailing coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_TC
>>> gf_TC([3, 0, 1], ZZ)
1
"""
if not f:
return K.zero
else:
return f[-1]
def gf_strip(f):
"""
Remove leading zeros from ``f``.
Examples
========
>>> from sympy.polys.galoistools import gf_strip
>>> gf_strip([0, 0, 0, 3, 0, 1])
[3, 0, 1]
"""
if not f or f[0]:
return f
k = 0
for coeff in f:
if coeff:
break
else:
k += 1
return f[k:]
def gf_trunc(f, p):
"""
Reduce all coefficients modulo ``p``.
Examples
========
>>> from sympy.polys.galoistools import gf_trunc
>>> gf_trunc([7, -2, 3], 5)
[2, 3, 3]
"""
return gf_strip([ a % p for a in f ])
def gf_normal(f, p, K):
"""
Normalize all coefficients in ``K``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_normal
>>> gf_normal([5, 10, 21, -3], 5, ZZ)
[1, 2]
"""
return gf_trunc(list(map(K, f)), p)
def gf_from_dict(f, p, K):
"""
Create a ``GF(p)[x]`` polynomial from a dict.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_from_dict
>>> gf_from_dict({10: ZZ(4), 4: ZZ(33), 0: ZZ(-1)}, 5, ZZ)
[4, 0, 0, 0, 0, 0, 3, 0, 0, 0, 4]
"""
n, h = max(f.keys()), []
if isinstance(n, SYMPY_INTS):
for k in range(n, -1, -1):
h.append(f.get(k, K.zero) % p)
else:
(n,) = n
for k in range(n, -1, -1):
h.append(f.get((k,), K.zero) % p)
return gf_trunc(h, p)
def gf_to_dict(f, p, symmetric=True):
"""
Convert a ``GF(p)[x]`` polynomial to a dict.
Examples
========
>>> from sympy.polys.galoistools import gf_to_dict
>>> gf_to_dict([4, 0, 0, 0, 0, 0, 3, 0, 0, 0, 4], 5)
{0: -1, 4: -2, 10: -1}
>>> gf_to_dict([4, 0, 0, 0, 0, 0, 3, 0, 0, 0, 4], 5, symmetric=False)
{0: 4, 4: 3, 10: 4}
"""
n, result = gf_degree(f), {}
for k in range(0, n + 1):
if symmetric:
a = gf_int(f[n - k], p)
else:
a = f[n - k]
if a:
result[k] = a
return result
def gf_from_int_poly(f, p):
"""
Create a ``GF(p)[x]`` polynomial from ``Z[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_from_int_poly
>>> gf_from_int_poly([7, -2, 3], 5)
[2, 3, 3]
"""
return gf_trunc(f, p)
def gf_to_int_poly(f, p, symmetric=True):
"""
Convert a ``GF(p)[x]`` polynomial to ``Z[x]``.
Examples
========
>>> from sympy.polys.galoistools import gf_to_int_poly
>>> gf_to_int_poly([2, 3, 3], 5)
[2, -2, -2]
>>> gf_to_int_poly([2, 3, 3], 5, symmetric=False)
[2, 3, 3]
"""
if symmetric:
return [ gf_int(c, p) for c in f ]
else:
return f
def gf_neg(f, p, K):
"""
Negate a polynomial in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_neg
>>> gf_neg([3, 2, 1, 0], 5, ZZ)
[2, 3, 4, 0]
"""
return [ -coeff % p for coeff in f ]
def gf_add_ground(f, a, p, K):
"""
Compute ``f + a`` where ``f`` in ``GF(p)[x]`` and ``a`` in ``GF(p)``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_add_ground
>>> gf_add_ground([3, 2, 4], 2, 5, ZZ)
[3, 2, 1]
"""
if not f:
a = a % p
else:
a = (f[-1] + a) % p
if len(f) > 1:
return f[:-1] + [a]
if not a:
return []
else:
return [a]
def gf_sub_ground(f, a, p, K):
"""
Compute ``f - a`` where ``f`` in ``GF(p)[x]`` and ``a`` in ``GF(p)``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_sub_ground
>>> gf_sub_ground([3, 2, 4], 2, 5, ZZ)
[3, 2, 2]
"""
if not f:
a = -a % p
else:
a = (f[-1] - a) % p
if len(f) > 1:
return f[:-1] + [a]
if not a:
return []
else:
return [a]
def gf_mul_ground(f, a, p, K):
"""
Compute ``f * a`` where ``f`` in ``GF(p)[x]`` and ``a`` in ``GF(p)``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_mul_ground
>>> gf_mul_ground([3, 2, 4], 2, 5, ZZ)
[1, 4, 3]
"""
if not a:
return []
else:
return [ (a*b) % p for b in f ]
def gf_quo_ground(f, a, p, K):
"""
Compute ``f/a`` where ``f`` in ``GF(p)[x]`` and ``a`` in ``GF(p)``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_quo_ground
>>> gf_quo_ground(ZZ.map([3, 2, 4]), ZZ(2), 5, ZZ)
[4, 1, 2]
"""
return gf_mul_ground(f, K.invert(a, p), p, K)
def gf_add(f, g, p, K):
"""
Add polynomials in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_add
>>> gf_add([3, 2, 4], [2, 2, 2], 5, ZZ)
[4, 1]
"""
if not f:
return g
if not g:
return f
df = gf_degree(f)
dg = gf_degree(g)
if df == dg:
return gf_strip([ (a + b) % p for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ (a + b) % p for a, b in zip(f, g) ]
def gf_sub(f, g, p, K):
"""
Subtract polynomials in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_sub
>>> gf_sub([3, 2, 4], [2, 2, 2], 5, ZZ)
[1, 0, 2]
"""
if not g:
return f
if not f:
return gf_neg(g, p, K)
df = gf_degree(f)
dg = gf_degree(g)
if df == dg:
return gf_strip([ (a - b) % p for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = gf_neg(g[:k], p, K), g[k:]
return h + [ (a - b) % p for a, b in zip(f, g) ]
def gf_mul(f, g, p, K):
"""
Multiply polynomials in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_mul
>>> gf_mul([3, 2, 4], [2, 2, 2], 5, ZZ)
[1, 0, 3, 2, 3]
"""
df = gf_degree(f)
dg = gf_degree(g)
dh = df + dg
h = [0]*(dh + 1)
for i in range(0, dh + 1):
coeff = K.zero
for j in range(max(0, i - dg), min(i, df) + 1):
coeff += f[j]*g[i - j]
h[i] = coeff % p
return gf_strip(h)
def gf_sqr(f, p, K):
"""
Square polynomials in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_sqr
>>> gf_sqr([3, 2, 4], 5, ZZ)
[4, 2, 3, 1, 1]
"""
df = gf_degree(f)
dh = 2*df
h = [0]*(dh + 1)
for i in range(0, dh + 1):
coeff = K.zero
jmin = max(0, i - df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in range(jmin, jmax + 1):
coeff += f[j]*f[i - j]
coeff += coeff
if n & 1:
elem = f[jmax + 1]
coeff += elem**2
h[i] = coeff % p
return gf_strip(h)
def gf_add_mul(f, g, h, p, K):
"""
Returns ``f + g*h`` where ``f``, ``g``, ``h`` in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_add_mul
>>> gf_add_mul([3, 2, 4], [2, 2, 2], [1, 4], 5, ZZ)
[2, 3, 2, 2]
"""
return gf_add(f, gf_mul(g, h, p, K), p, K)
def gf_sub_mul(f, g, h, p, K):
"""
Compute ``f - g*h`` where ``f``, ``g``, ``h`` in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_sub_mul
>>> gf_sub_mul([3, 2, 4], [2, 2, 2], [1, 4], 5, ZZ)
[3, 3, 2, 1]
"""
return gf_sub(f, gf_mul(g, h, p, K), p, K)
def gf_expand(F, p, K):
"""
Expand results of :func:`~.factor` in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_expand
>>> gf_expand([([3, 2, 4], 1), ([2, 2], 2), ([3, 1], 3)], 5, ZZ)
[4, 3, 0, 3, 0, 1, 4, 1]
"""
if type(F) is tuple:
lc, F = F
else:
lc = K.one
g = [lc]
for f, k in F:
f = gf_pow(f, k, p, K)
g = gf_mul(g, f, p, K)
return g
def gf_div(f, g, p, K):
"""
Division with remainder in ``GF(p)[x]``.
Given univariate polynomials ``f`` and ``g`` with coefficients in a
finite field with ``p`` elements, returns polynomials ``q`` and ``r``
(quotient and remainder) such that ``f = q*g + r``.
Consider polynomials ``x**3 + x + 1`` and ``x**2 + x`` in GF(2)::
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_div, gf_add_mul
>>> gf_div(ZZ.map([1, 0, 1, 1]), ZZ.map([1, 1, 0]), 2, ZZ)
([1, 1], [1])
As result we obtained quotient ``x + 1`` and remainder ``1``, thus::
>>> gf_add_mul(ZZ.map([1]), ZZ.map([1, 1]), ZZ.map([1, 1, 0]), 2, ZZ)
[1, 0, 1, 1]
References
==========
.. [1] [Monagan93]_
.. [2] [Gathen99]_
"""
df = gf_degree(f)
dg = gf_degree(g)
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return [], f
inv = K.invert(g[0], p)
h, dq, dr = list(f), df - dg, dg - 1
for i in range(0, df + 1):
coeff = h[i]
for j in range(max(0, dg - i), min(df - i, dr) + 1):
coeff -= h[i + j - dg] * g[dg - j]
if i <= dq:
coeff *= inv
h[i] = coeff % p
return h[:dq + 1], gf_strip(h[dq + 1:])
def gf_rem(f, g, p, K):
"""
Compute polynomial remainder in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_rem
>>> gf_rem(ZZ.map([1, 0, 1, 1]), ZZ.map([1, 1, 0]), 2, ZZ)
[1]
"""
return gf_div(f, g, p, K)[1]
def gf_quo(f, g, p, K):
"""
Compute exact quotient in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_quo
>>> gf_quo(ZZ.map([1, 0, 1, 1]), ZZ.map([1, 1, 0]), 2, ZZ)
[1, 1]
>>> gf_quo(ZZ.map([1, 0, 3, 2, 3]), ZZ.map([2, 2, 2]), 5, ZZ)
[3, 2, 4]
"""
df = gf_degree(f)
dg = gf_degree(g)
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return []
inv = K.invert(g[0], p)
h, dq, dr = f[:], df - dg, dg - 1
for i in range(0, dq + 1):
coeff = h[i]
for j in range(max(0, dg - i), min(df - i, dr) + 1):
coeff -= h[i + j - dg] * g[dg - j]
h[i] = (coeff * inv) % p
return h[:dq + 1]
def gf_exquo(f, g, p, K):
"""
Compute polynomial quotient in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_exquo
>>> gf_exquo(ZZ.map([1, 0, 3, 2, 3]), ZZ.map([2, 2, 2]), 5, ZZ)
[3, 2, 4]
>>> gf_exquo(ZZ.map([1, 0, 1, 1]), ZZ.map([1, 1, 0]), 2, ZZ)
Traceback (most recent call last):
...
ExactQuotientFailed: [1, 1, 0] does not divide [1, 0, 1, 1]
"""
q, r = gf_div(f, g, p, K)
if not r:
return q
else:
raise ExactQuotientFailed(f, g)
def gf_lshift(f, n, K):
"""
Efficiently multiply ``f`` by ``x**n``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_lshift
>>> gf_lshift([3, 2, 4], 4, ZZ)
[3, 2, 4, 0, 0, 0, 0]
"""
if not f:
return f
else:
return f + [K.zero]*n
def gf_rshift(f, n, K):
"""
Efficiently divide ``f`` by ``x**n``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_rshift
>>> gf_rshift([1, 2, 3, 4, 0], 3, ZZ)
([1, 2], [3, 4, 0])
"""
if not n:
return f, []
else:
return f[:-n], f[-n:]
def gf_pow(f, n, p, K):
"""
Compute ``f**n`` in ``GF(p)[x]`` using repeated squaring.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_pow
>>> gf_pow([3, 2, 4], 3, 5, ZZ)
[2, 4, 4, 2, 2, 1, 4]
"""
if not n:
return [K.one]
elif n == 1:
return f
elif n == 2:
return gf_sqr(f, p, K)
h = [K.one]
while True:
if n & 1:
h = gf_mul(h, f, p, K)
n -= 1
n >>= 1
if not n:
break
f = gf_sqr(f, p, K)
return h
def gf_frobenius_monomial_base(g, p, K):
"""
return the list of ``x**(i*p) mod g in Z_p`` for ``i = 0, .., n - 1``
where ``n = gf_degree(g)``
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_frobenius_monomial_base
>>> g = ZZ.map([1, 0, 2, 1])
>>> gf_frobenius_monomial_base(g, 5, ZZ)
[[1], [4, 4, 2], [1, 2]]
"""
n = gf_degree(g)
if n == 0:
return []
b = [0]*n
b[0] = [1]
if p < n:
for i in range(1, n):
mon = gf_lshift(b[i - 1], p, K)
b[i] = gf_rem(mon, g, p, K)
elif n > 1:
b[1] = gf_pow_mod([K.one, K.zero], p, g, p, K)
for i in range(2, n):
b[i] = gf_mul(b[i - 1], b[1], p, K)
b[i] = gf_rem(b[i], g, p, K)
return b
def gf_frobenius_map(f, g, b, p, K):
"""
compute gf_pow_mod(f, p, g, p, K) using the Frobenius map
Parameters
==========
f, g : polynomials in ``GF(p)[x]``
b : frobenius monomial base
p : prime number
K : domain
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_frobenius_monomial_base, gf_frobenius_map
>>> f = ZZ.map([2, 1 , 0, 1])
>>> g = ZZ.map([1, 0, 2, 1])
>>> p = 5
>>> b = gf_frobenius_monomial_base(g, p, ZZ)
>>> r = gf_frobenius_map(f, g, b, p, ZZ)
>>> gf_frobenius_map(f, g, b, p, ZZ)
[4, 0, 3]
"""
m = gf_degree(g)
if gf_degree(f) >= m:
f = gf_rem(f, g, p, K)
if not f:
return []
n = gf_degree(f)
sf = [f[-1]]
for i in range(1, n + 1):
v = gf_mul_ground(b[i], f[n - i], p, K)
sf = gf_add(sf, v, p, K)
return sf
def _gf_pow_pnm1d2(f, n, g, b, p, K):
"""
utility function for ``gf_edf_zassenhaus``
Compute ``f**((p**n - 1) // 2)`` in ``GF(p)[x]/(g)``
``f**((p**n - 1) // 2) = (f*f**p*...*f**(p**n - 1))**((p - 1) // 2)``
"""
f = gf_rem(f, g, p, K)
h = f
r = f
for i in range(1, n):
h = gf_frobenius_map(h, g, b, p, K)
r = gf_mul(r, h, p, K)
r = gf_rem(r, g, p, K)
res = gf_pow_mod(r, (p - 1)//2, g, p, K)
return res
def gf_pow_mod(f, n, g, p, K):
"""
Compute ``f**n`` in ``GF(p)[x]/(g)`` using repeated squaring.
Given polynomials ``f`` and ``g`` in ``GF(p)[x]`` and a non-negative
integer ``n``, efficiently computes ``f**n (mod g)`` i.e. the remainder
of ``f**n`` from division by ``g``, using the repeated squaring algorithm.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_pow_mod
>>> gf_pow_mod(ZZ.map([3, 2, 4]), 3, ZZ.map([1, 1]), 5, ZZ)
[]
References
==========
.. [1] [Gathen99]_
"""
if not n:
return [K.one]
elif n == 1:
return gf_rem(f, g, p, K)
elif n == 2:
return gf_rem(gf_sqr(f, p, K), g, p, K)
h = [K.one]
while True:
if n & 1:
h = gf_mul(h, f, p, K)
h = gf_rem(h, g, p, K)
n -= 1
n >>= 1
if not n:
break
f = gf_sqr(f, p, K)
f = gf_rem(f, g, p, K)
return h
def gf_gcd(f, g, p, K):
"""
Euclidean Algorithm in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_gcd
>>> gf_gcd(ZZ.map([3, 2, 4]), ZZ.map([2, 2, 3]), 5, ZZ)
[1, 3]
"""
while g:
f, g = g, gf_rem(f, g, p, K)
return gf_monic(f, p, K)[1]
def gf_lcm(f, g, p, K):
"""
Compute polynomial LCM in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_lcm
>>> gf_lcm(ZZ.map([3, 2, 4]), ZZ.map([2, 2, 3]), 5, ZZ)
[1, 2, 0, 4]
"""
if not f or not g:
return []
h = gf_quo(gf_mul(f, g, p, K),
gf_gcd(f, g, p, K), p, K)
return gf_monic(h, p, K)[1]
def gf_cofactors(f, g, p, K):
"""
Compute polynomial GCD and cofactors in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_cofactors
>>> gf_cofactors(ZZ.map([3, 2, 4]), ZZ.map([2, 2, 3]), 5, ZZ)
([1, 3], [3, 3], [2, 1])
"""
if not f and not g:
return ([], [], [])
h = gf_gcd(f, g, p, K)
return (h, gf_quo(f, h, p, K),
gf_quo(g, h, p, K))
def gf_gcdex(f, g, p, K):
"""
Extended Euclidean Algorithm in ``GF(p)[x]``.
Given polynomials ``f`` and ``g`` in ``GF(p)[x]``, computes polynomials
``s``, ``t`` and ``h``, such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
The typical application of EEA is solving polynomial diophantine equations.
Consider polynomials ``f = (x + 7) (x + 1)``, ``g = (x + 7) (x**2 + 1)``
in ``GF(11)[x]``. Application of Extended Euclidean Algorithm gives::
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_gcdex, gf_mul, gf_add
>>> s, t, g = gf_gcdex(ZZ.map([1, 8, 7]), ZZ.map([1, 7, 1, 7]), 11, ZZ)
>>> s, t, g
([5, 6], [6], [1, 7])
As result we obtained polynomials ``s = 5*x + 6`` and ``t = 6``, and
additionally ``gcd(f, g) = x + 7``. This is correct because::
>>> S = gf_mul(s, ZZ.map([1, 8, 7]), 11, ZZ)
>>> T = gf_mul(t, ZZ.map([1, 7, 1, 7]), 11, ZZ)
>>> gf_add(S, T, 11, ZZ) == [1, 7]
True
References
==========
.. [1] [Gathen99]_
"""
if not (f or g):
return [K.one], [], []
p0, r0 = gf_monic(f, p, K)
p1, r1 = gf_monic(g, p, K)
if not f:
return [], [K.invert(p1, p)], r1
if not g:
return [K.invert(p0, p)], [], r0
s0, s1 = [K.invert(p0, p)], []
t0, t1 = [], [K.invert(p1, p)]
while True:
Q, R = gf_div(r0, r1, p, K)
if not R:
break
(lc, r1), r0 = gf_monic(R, p, K), r1
inv = K.invert(lc, p)
s = gf_sub_mul(s0, s1, Q, p, K)
t = gf_sub_mul(t0, t1, Q, p, K)
s1, s0 = gf_mul_ground(s, inv, p, K), s1
t1, t0 = gf_mul_ground(t, inv, p, K), t1
return s1, t1, r1
def gf_monic(f, p, K):
"""
Compute LC and a monic polynomial in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_monic
>>> gf_monic(ZZ.map([3, 2, 4]), 5, ZZ)
(3, [1, 4, 3])
"""
if not f:
return K.zero, []
else:
lc = f[0]
if K.is_one(lc):
return lc, list(f)
else:
return lc, gf_quo_ground(f, lc, p, K)
def gf_diff(f, p, K):
"""
Differentiate polynomial in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_diff
>>> gf_diff([3, 2, 4], 5, ZZ)
[1, 2]
"""
df = gf_degree(f)
h, n = [K.zero]*df, df
for coeff in f[:-1]:
coeff *= K(n)
coeff %= p
if coeff:
h[df - n] = coeff
n -= 1
return gf_strip(h)
def gf_eval(f, a, p, K):
"""
Evaluate ``f(a)`` in ``GF(p)`` using Horner scheme.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_eval
>>> gf_eval([3, 2, 4], 2, 5, ZZ)
0
"""
result = K.zero
for c in f:
result *= a
result += c
result %= p
return result
def gf_multi_eval(f, A, p, K):
"""
Evaluate ``f(a)`` for ``a`` in ``[a_1, ..., a_n]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_multi_eval
>>> gf_multi_eval([3, 2, 4], [0, 1, 2, 3, 4], 5, ZZ)
[4, 4, 0, 2, 0]
"""
return [ gf_eval(f, a, p, K) for a in A ]
def gf_compose(f, g, p, K):
"""
Compute polynomial composition ``f(g)`` in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_compose
>>> gf_compose([3, 2, 4], [2, 2, 2], 5, ZZ)
[2, 4, 0, 3, 0]
"""
if len(g) <= 1:
return gf_strip([gf_eval(f, gf_LC(g, K), p, K)])
if not f:
return []
h = [f[0]]
for c in f[1:]:
h = gf_mul(h, g, p, K)
h = gf_add_ground(h, c, p, K)
return h
def gf_compose_mod(g, h, f, p, K):
"""
Compute polynomial composition ``g(h)`` in ``GF(p)[x]/(f)``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_compose_mod
>>> gf_compose_mod(ZZ.map([3, 2, 4]), ZZ.map([2, 2, 2]), ZZ.map([4, 3]), 5, ZZ)
[4]
"""
if not g:
return []
comp = [g[0]]
for a in g[1:]:
comp = gf_mul(comp, h, p, K)
comp = gf_add_ground(comp, a, p, K)
comp = gf_rem(comp, f, p, K)
return comp
def gf_trace_map(a, b, c, n, f, p, K):
"""
Compute polynomial trace map in ``GF(p)[x]/(f)``.
Given a polynomial ``f`` in ``GF(p)[x]``, polynomials ``a``, ``b``,
``c`` in the quotient ring ``GF(p)[x]/(f)`` such that ``b = c**t
(mod f)`` for some positive power ``t`` of ``p``, and a positive
integer ``n``, returns a mapping::
a -> a**t**n, a + a**t + a**t**2 + ... + a**t**n (mod f)
In factorization context, ``b = x**p mod f`` and ``c = x mod f``.
This way we can efficiently compute trace polynomials in equal
degree factorization routine, much faster than with other methods,
like iterated Frobenius algorithm, for large degrees.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_trace_map
>>> gf_trace_map([1, 2], [4, 4], [1, 1], 4, [3, 2, 4], 5, ZZ)
([1, 3], [1, 3])
References
==========
.. [1] [Gathen92]_
"""
u = gf_compose_mod(a, b, f, p, K)
v = b
if n & 1:
U = gf_add(a, u, p, K)
V = b
else:
U = a
V = c
n >>= 1
while n:
u = gf_add(u, gf_compose_mod(u, v, f, p, K), p, K)
v = gf_compose_mod(v, v, f, p, K)
if n & 1:
U = gf_add(U, gf_compose_mod(u, V, f, p, K), p, K)
V = gf_compose_mod(v, V, f, p, K)
n >>= 1
return gf_compose_mod(a, V, f, p, K), U
def _gf_trace_map(f, n, g, b, p, K):
"""
utility for ``gf_edf_shoup``
"""
f = gf_rem(f, g, p, K)
h = f
r = f
for i in range(1, n):
h = gf_frobenius_map(h, g, b, p, K)
r = gf_add(r, h, p, K)
r = gf_rem(r, g, p, K)
return r
def gf_random(n, p, K):
"""
Generate a random polynomial in ``GF(p)[x]`` of degree ``n``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_random
>>> gf_random(10, 5, ZZ) #doctest: +SKIP
[1, 2, 3, 2, 1, 1, 1, 2, 0, 4, 2]
"""
return [K.one] + [ K(int(uniform(0, p))) for i in range(0, n) ]
def gf_irreducible(n, p, K):
"""
Generate random irreducible polynomial of degree ``n`` in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_irreducible
>>> gf_irreducible(10, 5, ZZ) #doctest: +SKIP
[1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4]
"""
while True:
f = gf_random(n, p, K)
if gf_irreducible_p(f, p, K):
return f
def gf_irred_p_ben_or(f, p, K):
"""
Ben-Or's polynomial irreducibility test over finite fields.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_irred_p_ben_or
>>> gf_irred_p_ben_or(ZZ.map([1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4]), 5, ZZ)
True
>>> gf_irred_p_ben_or(ZZ.map([3, 2, 4]), 5, ZZ)
False
"""
n = gf_degree(f)
if n <= 1:
return True
_, f = gf_monic(f, p, K)
if n < 5:
H = h = gf_pow_mod([K.one, K.zero], p, f, p, K)
for i in range(0, n//2):
g = gf_sub(h, [K.one, K.zero], p, K)
if gf_gcd(f, g, p, K) == [K.one]:
h = gf_compose_mod(h, H, f, p, K)
else:
return False
else:
b = gf_frobenius_monomial_base(f, p, K)
H = h = gf_frobenius_map([K.one, K.zero], f, b, p, K)
for i in range(0, n//2):
g = gf_sub(h, [K.one, K.zero], p, K)
if gf_gcd(f, g, p, K) == [K.one]:
h = gf_frobenius_map(h, f, b, p, K)
else:
return False
return True
def gf_irred_p_rabin(f, p, K):
"""
Rabin's polynomial irreducibility test over finite fields.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_irred_p_rabin
>>> gf_irred_p_rabin(ZZ.map([1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4]), 5, ZZ)
True
>>> gf_irred_p_rabin(ZZ.map([3, 2, 4]), 5, ZZ)
False
"""
n = gf_degree(f)
if n <= 1:
return True
_, f = gf_monic(f, p, K)
x = [K.one, K.zero]
indices = { n//d for d in factorint(n) }
b = gf_frobenius_monomial_base(f, p, K)
h = b[1]
for i in range(1, n):
if i in indices:
g = gf_sub(h, x, p, K)
if gf_gcd(f, g, p, K) != [K.one]:
return False
h = gf_frobenius_map(h, f, b, p, K)
return h == x
_irred_methods = {
'ben-or': gf_irred_p_ben_or,
'rabin': gf_irred_p_rabin,
}
def gf_irreducible_p(f, p, K):
"""
Test irreducibility of a polynomial ``f`` in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_irreducible_p
>>> gf_irreducible_p(ZZ.map([1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4]), 5, ZZ)
True
>>> gf_irreducible_p(ZZ.map([3, 2, 4]), 5, ZZ)
False
"""
method = query('GF_IRRED_METHOD')
if method is not None:
irred = _irred_methods[method](f, p, K)
else:
irred = gf_irred_p_rabin(f, p, K)
return irred
def gf_sqf_p(f, p, K):
"""
Return ``True`` if ``f`` is square-free in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_sqf_p
>>> gf_sqf_p(ZZ.map([3, 2, 4]), 5, ZZ)
True
>>> gf_sqf_p(ZZ.map([2, 4, 4, 2, 2, 1, 4]), 5, ZZ)
False
"""
_, f = gf_monic(f, p, K)
if not f:
return True
else:
return gf_gcd(f, gf_diff(f, p, K), p, K) == [K.one]
def gf_sqf_part(f, p, K):
"""
Return square-free part of a ``GF(p)[x]`` polynomial.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_sqf_part
>>> gf_sqf_part(ZZ.map([1, 1, 3, 0, 1, 0, 2, 2, 1]), 5, ZZ)
[1, 4, 3]
"""
_, sqf = gf_sqf_list(f, p, K)
g = [K.one]
for f, _ in sqf:
g = gf_mul(g, f, p, K)
return g
def gf_sqf_list(f, p, K, all=False):
"""
Return the square-free decomposition of a ``GF(p)[x]`` polynomial.
Given a polynomial ``f`` in ``GF(p)[x]``, returns the leading coefficient
of ``f`` and a square-free decomposition ``f_1**e_1 f_2**e_2 ... f_k**e_k``
such that all ``f_i`` are monic polynomials and ``(f_i, f_j)`` for ``i != j``
are co-prime and ``e_1 ... e_k`` are given in increasing order. All trivial
terms (i.e. ``f_i = 1``) aren't included in the output.
Consider polynomial ``f = x**11 + 1`` over ``GF(11)[x]``::
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import (
... gf_from_dict, gf_diff, gf_sqf_list, gf_pow,
... )
... # doctest: +NORMALIZE_WHITESPACE
>>> f = gf_from_dict({11: ZZ(1), 0: ZZ(1)}, 11, ZZ)
Note that ``f'(x) = 0``::
>>> gf_diff(f, 11, ZZ)
[]
This phenomenon doesn't happen in characteristic zero. However we can
still compute square-free decomposition of ``f`` using ``gf_sqf()``::
>>> gf_sqf_list(f, 11, ZZ)
(1, [([1, 1], 11)])
We obtained factorization ``f = (x + 1)**11``. This is correct because::
>>> gf_pow([1, 1], 11, 11, ZZ) == f
True
References
==========
.. [1] [Geddes92]_
"""
n, sqf, factors, r = 1, False, [], int(p)
lc, f = gf_monic(f, p, K)
if gf_degree(f) < 1:
return lc, []
while True:
F = gf_diff(f, p, K)
if F != []:
g = gf_gcd(f, F, p, K)
h = gf_quo(f, g, p, K)
i = 1
while h != [K.one]:
G = gf_gcd(g, h, p, K)
H = gf_quo(h, G, p, K)
if gf_degree(H) > 0:
factors.append((H, i*n))
g, h, i = gf_quo(g, G, p, K), G, i + 1
if g == [K.one]:
sqf = True
else:
f = g
if not sqf:
d = gf_degree(f) // r
for i in range(0, d + 1):
f[i] = f[i*r]
f, n = f[:d + 1], n*r
else:
break
if all:
raise ValueError("'all=True' is not supported yet")
return lc, factors
def gf_Qmatrix(f, p, K):
"""
Calculate Berlekamp's ``Q`` matrix.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_Qmatrix
>>> gf_Qmatrix([3, 2, 4], 5, ZZ)
[[1, 0],
[3, 4]]
>>> gf_Qmatrix([1, 0, 0, 0, 1], 5, ZZ)
[[1, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 4]]
"""
n, r = gf_degree(f), int(p)
q = [K.one] + [K.zero]*(n - 1)
Q = [list(q)] + [[]]*(n - 1)
for i in range(1, (n - 1)*r + 1):
qq, c = [(-q[-1]*f[-1]) % p], q[-1]
for j in range(1, n):
qq.append((q[j - 1] - c*f[-j - 1]) % p)
if not (i % r):
Q[i//r] = list(qq)
q = qq
return Q
def gf_Qbasis(Q, p, K):
"""
Compute a basis of the kernel of ``Q``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_Qmatrix, gf_Qbasis
>>> gf_Qbasis(gf_Qmatrix([1, 0, 0, 0, 1], 5, ZZ), 5, ZZ)
[[1, 0, 0, 0], [0, 0, 1, 0]]
>>> gf_Qbasis(gf_Qmatrix([3, 2, 4], 5, ZZ), 5, ZZ)
[[1, 0]]
"""
Q, n = [ list(q) for q in Q ], len(Q)
for k in range(0, n):
Q[k][k] = (Q[k][k] - K.one) % p
for k in range(0, n):
for i in range(k, n):
if Q[k][i]:
break
else:
continue
inv = K.invert(Q[k][i], p)
for j in range(0, n):
Q[j][i] = (Q[j][i]*inv) % p
for j in range(0, n):
t = Q[j][k]
Q[j][k] = Q[j][i]
Q[j][i] = t
for i in range(0, n):
if i != k:
q = Q[k][i]
for j in range(0, n):
Q[j][i] = (Q[j][i] - Q[j][k]*q) % p
for i in range(0, n):
for j in range(0, n):
if i == j:
Q[i][j] = (K.one - Q[i][j]) % p
else:
Q[i][j] = (-Q[i][j]) % p
basis = []
for q in Q:
if any(q):
basis.append(q)
return basis
def gf_berlekamp(f, p, K):
"""
Factor a square-free ``f`` in ``GF(p)[x]`` for small ``p``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_berlekamp
>>> gf_berlekamp([1, 0, 0, 0, 1], 5, ZZ)
[[1, 0, 2], [1, 0, 3]]
"""
Q = gf_Qmatrix(f, p, K)
V = gf_Qbasis(Q, p, K)
for i, v in enumerate(V):
V[i] = gf_strip(list(reversed(v)))
factors = [f]
for k in range(1, len(V)):
for f in list(factors):
s = K.zero
while s < p:
g = gf_sub_ground(V[k], s, p, K)
h = gf_gcd(f, g, p, K)
if h != [K.one] and h != f:
factors.remove(f)
f = gf_quo(f, h, p, K)
factors.extend([f, h])
if len(factors) == len(V):
return _sort_factors(factors, multiple=False)
s += K.one
return _sort_factors(factors, multiple=False)
def gf_ddf_zassenhaus(f, p, K):
"""
Cantor-Zassenhaus: Deterministic Distinct Degree Factorization
Given a monic square-free polynomial ``f`` in ``GF(p)[x]``, computes
partial distinct degree factorization ``f_1 ... f_d`` of ``f`` where
``deg(f_i) != deg(f_j)`` for ``i != j``. The result is returned as a
list of pairs ``(f_i, e_i)`` where ``deg(f_i) > 0`` and ``e_i > 0``
is an argument to the equal degree factorization routine.
Consider the polynomial ``x**15 - 1`` in ``GF(11)[x]``::
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_from_dict
>>> f = gf_from_dict({15: ZZ(1), 0: ZZ(-1)}, 11, ZZ)
Distinct degree factorization gives::
>>> from sympy.polys.galoistools import gf_ddf_zassenhaus
>>> gf_ddf_zassenhaus(f, 11, ZZ)
[([1, 0, 0, 0, 0, 10], 1), ([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], 2)]
which means ``x**15 - 1 = (x**5 - 1) (x**10 + x**5 + 1)``. To obtain
factorization into irreducibles, use equal degree factorization
procedure (EDF) with each of the factors.
References
==========
.. [1] [Gathen99]_
.. [2] [Geddes92]_
"""
i, g, factors = 1, [K.one, K.zero], []
b = gf_frobenius_monomial_base(f, p, K)
while 2*i <= gf_degree(f):
g = gf_frobenius_map(g, f, b, p, K)
h = gf_gcd(f, gf_sub(g, [K.one, K.zero], p, K), p, K)
if h != [K.one]:
factors.append((h, i))
f = gf_quo(f, h, p, K)
g = gf_rem(g, f, p, K)
b = gf_frobenius_monomial_base(f, p, K)
i += 1
if f != [K.one]:
return factors + [(f, gf_degree(f))]
else:
return factors
def gf_edf_zassenhaus(f, n, p, K):
"""
Cantor-Zassenhaus: Probabilistic Equal Degree Factorization
Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and
an integer ``n``, such that ``n`` divides ``deg(f)``, returns all
irreducible factors ``f_1,...,f_d`` of ``f``, each of degree ``n``.
EDF procedure gives complete factorization over Galois fields.
Consider the square-free polynomial ``f = x**3 + x**2 + x + 1`` in
``GF(5)[x]``. Let's compute its irreducible factors of degree one::
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_edf_zassenhaus
>>> gf_edf_zassenhaus([1,1,1,1], 1, 5, ZZ)
[[1, 1], [1, 2], [1, 3]]
References
==========
.. [1] [Gathen99]_
.. [2] [Geddes92]_
"""
factors = [f]
if gf_degree(f) <= n:
return factors
N = gf_degree(f) // n
if p != 2:
b = gf_frobenius_monomial_base(f, p, K)
while len(factors) < N:
r = gf_random(2*n - 1, p, K)
if p == 2:
h = r
for i in range(0, 2**(n*N - 1)):
r = gf_pow_mod(r, 2, f, p, K)
h = gf_add(h, r, p, K)
g = gf_gcd(f, h, p, K)
else:
h = _gf_pow_pnm1d2(r, n, f, b, p, K)
g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)
if g != [K.one] and g != f:
factors = gf_edf_zassenhaus(g, n, p, K) \
+ gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K)
return _sort_factors(factors, multiple=False)
def gf_ddf_shoup(f, p, K):
"""
Kaltofen-Shoup: Deterministic Distinct Degree Factorization
Given a monic square-free polynomial ``f`` in ``GF(p)[x]``, computes
partial distinct degree factorization ``f_1,...,f_d`` of ``f`` where
``deg(f_i) != deg(f_j)`` for ``i != j``. The result is returned as a
list of pairs ``(f_i, e_i)`` where ``deg(f_i) > 0`` and ``e_i > 0``
is an argument to the equal degree factorization routine.
This algorithm is an improved version of Zassenhaus algorithm for
large ``deg(f)`` and modulus ``p`` (especially for ``deg(f) ~ lg(p)``).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_ddf_shoup, gf_from_dict
>>> f = gf_from_dict({6: ZZ(1), 5: ZZ(-1), 4: ZZ(1), 3: ZZ(1), 1: ZZ(-1)}, 3, ZZ)
>>> gf_ddf_shoup(f, 3, ZZ)
[([1, 1, 0], 1), ([1, 1, 0, 1, 2], 2)]
References
==========
.. [1] [Kaltofen98]_
.. [2] [Shoup95]_
.. [3] [Gathen92]_
"""
n = gf_degree(f)
k = int(_ceil(_sqrt(n//2)))
b = gf_frobenius_monomial_base(f, p, K)
h = gf_frobenius_map([K.one, K.zero], f, b, p, K)
# U[i] = x**(p**i)
U = [[K.one, K.zero], h] + [K.zero]*(k - 1)
for i in range(2, k + 1):
U[i] = gf_frobenius_map(U[i-1], f, b, p, K)
h, U = U[k], U[:k]
# V[i] = x**(p**(k*(i+1)))
V = [h] + [K.zero]*(k - 1)
for i in range(1, k):
V[i] = gf_compose_mod(V[i - 1], h, f, p, K)
factors = []
for i, v in enumerate(V):
h, j = [K.one], k - 1
for u in U:
g = gf_sub(v, u, p, K)
h = gf_mul(h, g, p, K)
h = gf_rem(h, f, p, K)
g = gf_gcd(f, h, p, K)
f = gf_quo(f, g, p, K)
for u in reversed(U):
h = gf_sub(v, u, p, K)
F = gf_gcd(g, h, p, K)
if F != [K.one]:
factors.append((F, k*(i + 1) - j))
g, j = gf_quo(g, F, p, K), j - 1
if f != [K.one]:
factors.append((f, gf_degree(f)))
return factors
def gf_edf_shoup(f, n, p, K):
"""
Gathen-Shoup: Probabilistic Equal Degree Factorization
Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and integer
``n`` such that ``n`` divides ``deg(f)``, returns all irreducible factors
``f_1,...,f_d`` of ``f``, each of degree ``n``. This is a complete
factorization over Galois fields.
This algorithm is an improved version of Zassenhaus algorithm for
large ``deg(f)`` and modulus ``p`` (especially for ``deg(f) ~ lg(p)``).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_edf_shoup
>>> gf_edf_shoup(ZZ.map([1, 2837, 2277]), 1, 2917, ZZ)
[[1, 852], [1, 1985]]
References
==========
.. [1] [Shoup91]_
.. [2] [Gathen92]_
"""
N, q = gf_degree(f), int(p)
if not N:
return []
if N <= n:
return [f]
factors, x = [f], [K.one, K.zero]
r = gf_random(N - 1, p, K)
if p == 2:
h = gf_pow_mod(x, q, f, p, K)
H = gf_trace_map(r, h, x, n - 1, f, p, K)[1]
h1 = gf_gcd(f, H, p, K)
h2 = gf_quo(f, h1, p, K)
factors = gf_edf_shoup(h1, n, p, K) \
+ gf_edf_shoup(h2, n, p, K)
else:
b = gf_frobenius_monomial_base(f, p, K)
H = _gf_trace_map(r, n, f, b, p, K)
h = gf_pow_mod(H, (q - 1)//2, f, p, K)
h1 = gf_gcd(f, h, p, K)
h2 = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)
h3 = gf_quo(f, gf_mul(h1, h2, p, K), p, K)
factors = gf_edf_shoup(h1, n, p, K) \
+ gf_edf_shoup(h2, n, p, K) \
+ gf_edf_shoup(h3, n, p, K)
return _sort_factors(factors, multiple=False)
def gf_zassenhaus(f, p, K):
"""
Factor a square-free ``f`` in ``GF(p)[x]`` for medium ``p``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_zassenhaus
>>> gf_zassenhaus(ZZ.map([1, 4, 3]), 5, ZZ)
[[1, 1], [1, 3]]
"""
factors = []
for factor, n in gf_ddf_zassenhaus(f, p, K):
factors += gf_edf_zassenhaus(factor, n, p, K)
return _sort_factors(factors, multiple=False)
def gf_shoup(f, p, K):
"""
Factor a square-free ``f`` in ``GF(p)[x]`` for large ``p``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_shoup
>>> gf_shoup(ZZ.map([1, 4, 3]), 5, ZZ)
[[1, 1], [1, 3]]
"""
factors = []
for factor, n in gf_ddf_shoup(f, p, K):
factors += gf_edf_shoup(factor, n, p, K)
return _sort_factors(factors, multiple=False)
_factor_methods = {
'berlekamp': gf_berlekamp, # ``p`` : small
'zassenhaus': gf_zassenhaus, # ``p`` : medium
'shoup': gf_shoup, # ``p`` : large
}
def gf_factor_sqf(f, p, K, method=None):
"""
Factor a square-free polynomial ``f`` in ``GF(p)[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_factor_sqf
>>> gf_factor_sqf(ZZ.map([3, 2, 4]), 5, ZZ)
(3, [[1, 1], [1, 3]])
"""
lc, f = gf_monic(f, p, K)
if gf_degree(f) < 1:
return lc, []
method = method or query('GF_FACTOR_METHOD')
if method is not None:
factors = _factor_methods[method](f, p, K)
else:
factors = gf_zassenhaus(f, p, K)
return lc, factors
def gf_factor(f, p, K):
"""
Factor (non square-free) polynomials in ``GF(p)[x]``.
Given a possibly non square-free polynomial ``f`` in ``GF(p)[x]``,
returns its complete factorization into irreducibles::
f_1(x)**e_1 f_2(x)**e_2 ... f_d(x)**e_d
where each ``f_i`` is a monic polynomial and ``gcd(f_i, f_j) == 1``,
for ``i != j``. The result is given as a tuple consisting of the
leading coefficient of ``f`` and a list of factors of ``f`` with
their multiplicities.
The algorithm proceeds by first computing square-free decomposition
of ``f`` and then iteratively factoring each of square-free factors.
Consider a non square-free polynomial ``f = (7*x + 1) (x + 2)**2`` in
``GF(11)[x]``. We obtain its factorization into irreducibles as follows::
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_factor
>>> gf_factor(ZZ.map([5, 2, 7, 2]), 11, ZZ)
(5, [([1, 2], 1), ([1, 8], 2)])
We arrived with factorization ``f = 5 (x + 2) (x + 8)**2``. We didn't
recover the exact form of the input polynomial because we requested to
get monic factors of ``f`` and its leading coefficient separately.
Square-free factors of ``f`` can be factored into irreducibles over
``GF(p)`` using three very different methods:
Berlekamp
efficient for very small values of ``p`` (usually ``p < 25``)
Cantor-Zassenhaus
efficient on average input and with "typical" ``p``
Shoup-Kaltofen-Gathen
efficient with very large inputs and modulus
If you want to use a specific factorization method, instead of the default
one, set ``GF_FACTOR_METHOD`` with one of ``berlekamp``, ``zassenhaus`` or
``shoup`` values.
References
==========
.. [1] [Gathen99]_
"""
lc, f = gf_monic(f, p, K)
if gf_degree(f) < 1:
return lc, []
factors = []
for g, n in gf_sqf_list(f, p, K)[1]:
for h in gf_factor_sqf(g, p, K)[1]:
factors.append((h, n))
return lc, _sort_factors(factors)
def gf_value(f, a):
"""
Value of polynomial 'f' at 'a' in field R.
Examples
========
>>> from sympy.polys.galoistools import gf_value
>>> gf_value([1, 7, 2, 4], 11)
2204
"""
result = 0
for c in f:
result *= a
result += c
return result
def linear_congruence(a, b, m):
"""
Returns the values of x satisfying a*x congruent b mod(m)
Here m is positive integer and a, b are natural numbers.
This function returns only those values of x which are distinct mod(m).
Examples
========
>>> from sympy.polys.galoistools import linear_congruence
>>> linear_congruence(3, 12, 15)
[4, 9, 14]
There are 3 solutions distinct mod(15) since gcd(a, m) = gcd(3, 15) = 3.
References
==========
.. [1] https://en.wikipedia.org/wiki/Linear_congruence_theorem
"""
from sympy.polys.polytools import gcdex
if a % m == 0:
if b % m == 0:
return list(range(m))
else:
return []
r, _, g = gcdex(a, m)
if b % g != 0:
return []
return [(r * b // g + t * m // g) % m for t in range(g)]
def _raise_mod_power(x, s, p, f):
"""
Used in gf_csolve to generate solutions of f(x) cong 0 mod(p**(s + 1))
from the solutions of f(x) cong 0 mod(p**s).
Examples
========
>>> from sympy.polys.galoistools import _raise_mod_power
>>> from sympy.polys.galoistools import csolve_prime
These is the solutions of f(x) = x**2 + x + 7 cong 0 mod(3)
>>> f = [1, 1, 7]
>>> csolve_prime(f, 3)
[1]
>>> [ i for i in range(3) if not (i**2 + i + 7) % 3]
[1]
The solutions of f(x) cong 0 mod(9) are constructed from the
values returned from _raise_mod_power:
>>> x, s, p = 1, 1, 3
>>> V = _raise_mod_power(x, s, p, f)
>>> [x + v * p**s for v in V]
[1, 4, 7]
And these are confirmed with the following:
>>> [ i for i in range(3**2) if not (i**2 + i + 7) % 3**2]
[1, 4, 7]
"""
from sympy.polys.domains import ZZ
f_f = gf_diff(f, p, ZZ)
alpha = gf_value(f_f, x)
beta = - gf_value(f, x) // p**s
return linear_congruence(alpha, beta, p)
def csolve_prime(f, p, e=1):
"""
Solutions of f(x) congruent 0 mod(p**e).
Examples
========
>>> from sympy.polys.galoistools import csolve_prime
>>> csolve_prime([1, 1, 7], 3, 1)
[1]
>>> csolve_prime([1, 1, 7], 3, 2)
[1, 4, 7]
Solutions [7, 4, 1] (mod 3**2) are generated by ``_raise_mod_power()``
from solution [1] (mod 3).
"""
from sympy.polys.domains import ZZ
X1 = [i for i in range(p) if gf_eval(f, i, p, ZZ) == 0]
if e == 1:
return X1
X = []
S = list(zip(X1, [1]*len(X1)))
while S:
x, s = S.pop()
if s == e:
X.append(x)
else:
s1 = s + 1
ps = p**s
S.extend([(x + v*ps, s1) for v in _raise_mod_power(x, s, p, f)])
return sorted(X)
def gf_csolve(f, n):
"""
To solve f(x) congruent 0 mod(n).
n is divided into canonical factors and f(x) cong 0 mod(p**e) will be
solved for each factor. Applying the Chinese Remainder Theorem to the
results returns the final answers.
Examples
========
Solve [1, 1, 7] congruent 0 mod(189):
>>> from sympy.polys.galoistools import gf_csolve
>>> gf_csolve([1, 1, 7], 189)
[13, 49, 76, 112, 139, 175]
References
==========
.. [1] 'An introduction to the Theory of Numbers' 5th Edition by Ivan Niven,
Zuckerman and Montgomery.
"""
from sympy.polys.domains import ZZ
P = factorint(n)
X = [csolve_prime(f, p, e) for p, e in P.items()]
pools = list(map(tuple, X))
perms = [[]]
for pool in pools:
perms = [x + [y] for x in perms for y in pool]
dist_factors = [pow(p, e) for p, e in P.items()]
return sorted([gf_crt(per, dist_factors, ZZ) for per in perms])
|
816293327def1086d72f39c031e1cd50a42d9b0b7f6dc9cba30ba194ffe3e533 | """
This module contains functions for the computation
of Euclidean, (generalized) Sturmian, (modified) subresultant
polynomial remainder sequences (prs's) of two polynomials;
included are also three functions for the computation of the
resultant of two polynomials.
Except for the function res_z(), which computes the resultant
of two polynomials, the pseudo-remainder function prem()
of sympy is _not_ used by any of the functions in the module.
Instead of prem() we use the function
rem_z().
Included is also the function quo_z().
An explanation of why we avoid prem() can be found in the
references stated in the docstring of rem_z().
1. Theoretical background:
==========================
Consider the polynomials f, g in Z[x] of degrees deg(f) = n and
deg(g) = m with n >= m.
Definition 1:
=============
The sign sequence of a polynomial remainder sequence (prs) is the
sequence of signs of the leading coefficients of its polynomials.
Sign sequences can be computed with the function:
sign_seq(poly_seq, x)
Definition 2:
=============
A polynomial remainder sequence (prs) is called complete if the
degree difference between any two consecutive polynomials is 1;
otherwise, it called incomplete.
It is understood that f, g belong to the sequences mentioned in
the two definitions above.
1A. Euclidean and subresultant prs's:
=====================================
The subresultant prs of f, g is a sequence of polynomials in Z[x]
analogous to the Euclidean prs, the sequence obtained by applying
on f, g Euclid's algorithm for polynomial greatest common divisors
(gcd) in Q[x].
The subresultant prs differs from the Euclidean prs in that the
coefficients of each polynomial in the former sequence are determinants
--- also referred to as subresultants --- of appropriately selected
sub-matrices of sylvester1(f, g, x), Sylvester's matrix of 1840 of
dimensions (n + m) * (n + m).
Recall that the determinant of sylvester1(f, g, x) itself is
called the resultant of f, g and serves as a criterion of whether
the two polynomials have common roots or not.
In sympy the resultant is computed with the function
resultant(f, g, x). This function does _not_ evaluate the
determinant of sylvester(f, g, x, 1); instead, it returns
the last member of the subresultant prs of f, g, multiplied
(if needed) by an appropriate power of -1; see the caveat below.
In this module we use three functions to compute the
resultant of f, g:
a) res(f, g, x) computes the resultant by evaluating
the determinant of sylvester(f, g, x, 1);
b) res_q(f, g, x) computes the resultant recursively, by
performing polynomial divisions in Q[x] with the function rem();
c) res_z(f, g, x) computes the resultant recursively, by
performing polynomial divisions in Z[x] with the function prem().
Caveat: If Df = degree(f, x) and Dg = degree(g, x), then:
resultant(f, g, x) = (-1)**(Df*Dg) * resultant(g, f, x).
For complete prs's the sign sequence of the Euclidean prs of f, g
is identical to the sign sequence of the subresultant prs of f, g
and the coefficients of one sequence are easily computed from the
coefficients of the other.
For incomplete prs's the polynomials in the subresultant prs, generally
differ in sign from those of the Euclidean prs, and --- unlike the
case of complete prs's --- it is not at all obvious how to compute
the coefficients of one sequence from the coefficients of the other.
1B. Sturmian and modified subresultant prs's:
=============================================
For the same polynomials f, g in Z[x] mentioned above, their ``modified''
subresultant prs is a sequence of polynomials similar to the Sturmian
prs, the sequence obtained by applying in Q[x] Sturm's algorithm on f, g.
The two sequences differ in that the coefficients of each polynomial
in the modified subresultant prs are the determinants --- also referred
to as modified subresultants --- of appropriately selected sub-matrices
of sylvester2(f, g, x), Sylvester's matrix of 1853 of dimensions 2n x 2n.
The determinant of sylvester2 itself is called the modified resultant
of f, g and it also can serve as a criterion of whether the two
polynomials have common roots or not.
For complete prs's the sign sequence of the Sturmian prs of f, g is
identical to the sign sequence of the modified subresultant prs of
f, g and the coefficients of one sequence are easily computed from
the coefficients of the other.
For incomplete prs's the polynomials in the modified subresultant prs,
generally differ in sign from those of the Sturmian prs, and --- unlike
the case of complete prs's --- it is not at all obvious how to compute
the coefficients of one sequence from the coefficients of the other.
As Sylvester pointed out, the coefficients of the polynomial remainders
obtained as (modified) subresultants are the smallest possible without
introducing rationals and without computing (integer) greatest common
divisors.
1C. On terminology:
===================
Whence the terminology? Well generalized Sturmian prs's are
``modifications'' of Euclidean prs's; the hint came from the title
of the Pell-Gordon paper of 1917.
In the literature one also encounters the name ``non signed'' and
``signed'' prs for Euclidean and Sturmian prs respectively.
Likewise ``non signed'' and ``signed'' subresultant prs for
subresultant and modified subresultant prs respectively.
2. Functions in the module:
===========================
No function utilizes sympy's function prem().
2A. Matrices:
=============
The functions sylvester(f, g, x, method=1) and
sylvester(f, g, x, method=2) compute either Sylvester matrix.
They can be used to compute (modified) subresultant prs's by
direct determinant evaluation.
The function bezout(f, g, x, method='prs') provides a matrix of
smaller dimensions than either Sylvester matrix. It is the function
of choice for computing (modified) subresultant prs's by direct
determinant evaluation.
sylvester(f, g, x, method=1)
sylvester(f, g, x, method=2)
bezout(f, g, x, method='prs')
The following identity holds:
bezout(f, g, x, method='prs') =
backward_eye(deg(f))*bezout(f, g, x, method='bz')*backward_eye(deg(f))
2B. Subresultant and modified subresultant prs's by
===================================================
determinant evaluations:
=======================
We use the Sylvester matrices of 1840 and 1853 to
compute, respectively, subresultant and modified
subresultant polynomial remainder sequences. However,
for large matrices this approach takes a lot of time.
Instead of utilizing the Sylvester matrices, we can
employ the Bezout matrix which is of smaller dimensions.
subresultants_sylv(f, g, x)
modified_subresultants_sylv(f, g, x)
subresultants_bezout(f, g, x)
modified_subresultants_bezout(f, g, x)
2C. Subresultant prs's by ONE determinant evaluation:
=====================================================
All three functions in this section evaluate one determinant
per remainder polynomial; this is the determinant of an
appropriately selected sub-matrix of sylvester1(f, g, x),
Sylvester's matrix of 1840.
To compute the remainder polynomials the function
subresultants_rem(f, g, x) employs rem(f, g, x).
By contrast, the other two functions implement Van Vleck's ideas
of 1900 and compute the remainder polynomials by trinagularizing
sylvester2(f, g, x), Sylvester's matrix of 1853.
subresultants_rem(f, g, x)
subresultants_vv(f, g, x)
subresultants_vv_2(f, g, x).
2E. Euclidean, Sturmian prs's in Q[x]:
======================================
euclid_q(f, g, x)
sturm_q(f, g, x)
2F. Euclidean, Sturmian and (modified) subresultant prs's P-G:
==============================================================
All functions in this section are based on the Pell-Gordon (P-G)
theorem of 1917.
Computations are done in Q[x], employing the function rem(f, g, x)
for the computation of the remainder polynomials.
euclid_pg(f, g, x)
sturm pg(f, g, x)
subresultants_pg(f, g, x)
modified_subresultants_pg(f, g, x)
2G. Euclidean, Sturmian and (modified) subresultant prs's A-M-V:
================================================================
All functions in this section are based on the Akritas-Malaschonok-
Vigklas (A-M-V) theorem of 2015.
Computations are done in Z[x], employing the function rem_z(f, g, x)
for the computation of the remainder polynomials.
euclid_amv(f, g, x)
sturm_amv(f, g, x)
subresultants_amv(f, g, x)
modified_subresultants_amv(f, g, x)
2Ga. Exception:
===============
subresultants_amv_q(f, g, x)
This function employs rem(f, g, x) for the computation of
the remainder polynomials, despite the fact that it implements
the A-M-V Theorem.
It is included in our module in order to show that theorems P-G
and A-M-V can be implemented utilizing either the function
rem(f, g, x) or the function rem_z(f, g, x).
For clearly historical reasons --- since the Collins-Brown-Traub
coefficients-reduction factor beta_i was not available in 1917 ---
we have implemented the Pell-Gordon theorem with the function
rem(f, g, x) and the A-M-V Theorem with the function rem_z(f, g, x).
2H. Resultants:
===============
res(f, g, x)
res_q(f, g, x)
res_z(f, g, x)
"""
from __future__ import print_function, division
from sympy import (Abs, degree, expand, eye, floor, LC, Matrix, nan, Poly, pprint)
from sympy import (QQ, pquo, quo, prem, rem, S, sign, simplify, summation, var, zeros)
from sympy.polys.polyerrors import PolynomialError
def sylvester(f, g, x, method = 1):
'''
The input polynomials f, g are in Z[x] or in Q[x]. Let m = degree(f, x),
n = degree(g, x) and mx = max( m , n ).
a. If method = 1 (default), computes sylvester1, Sylvester's matrix of 1840
of dimension (m + n) x (m + n). The determinants of properly chosen
submatrices of this matrix (a.k.a. subresultants) can be
used to compute the coefficients of the Euclidean PRS of f, g.
b. If method = 2, computes sylvester2, Sylvester's matrix of 1853
of dimension (2*mx) x (2*mx). The determinants of properly chosen
submatrices of this matrix (a.k.a. ``modified'' subresultants) can be
used to compute the coefficients of the Sturmian PRS of f, g.
Applications of these Matrices can be found in the references below.
Especially, for applications of sylvester2, see the first reference!!
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences. Serdica Journal of Computing,
Vol. 7, No 4, 101-134, 2013.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
'''
# obtain degrees of polys
m, n = degree( Poly(f, x), x), degree( Poly(g, x), x)
# Special cases:
# A:: case m = n < 0 (i.e. both polys are 0)
if m == n and n < 0:
return Matrix([])
# B:: case m = n = 0 (i.e. both polys are constants)
if m == n and n == 0:
return Matrix([])
# C:: m == 0 and n < 0 or m < 0 and n == 0
# (i.e. one poly is constant and the other is 0)
if m == 0 and n < 0:
return Matrix([])
elif m < 0 and n == 0:
return Matrix([])
# D:: m >= 1 and n < 0 or m < 0 and n >=1
# (i.e. one poly is of degree >=1 and the other is 0)
if m >= 1 and n < 0:
return Matrix([0])
elif m < 0 and n >= 1:
return Matrix([0])
fp = Poly(f, x).all_coeffs()
gp = Poly(g, x).all_coeffs()
# Sylvester's matrix of 1840 (default; a.k.a. sylvester1)
if method <= 1:
M = zeros(m + n)
k = 0
for i in range(n):
j = k
for coeff in fp:
M[i, j] = coeff
j = j + 1
k = k + 1
k = 0
for i in range(n, m + n):
j = k
for coeff in gp:
M[i, j] = coeff
j = j + 1
k = k + 1
return M
# Sylvester's matrix of 1853 (a.k.a sylvester2)
if method >= 2:
if len(fp) < len(gp):
h = []
for i in range(len(gp) - len(fp)):
h.append(0)
fp[ : 0] = h
else:
h = []
for i in range(len(fp) - len(gp)):
h.append(0)
gp[ : 0] = h
mx = max(m, n)
dim = 2*mx
M = zeros( dim )
k = 0
for i in range( mx ):
j = k
for coeff in fp:
M[2*i, j] = coeff
j = j + 1
j = k
for coeff in gp:
M[2*i + 1, j] = coeff
j = j + 1
k = k + 1
return M
def process_matrix_output(poly_seq, x):
"""
poly_seq is a polynomial remainder sequence computed either by
(modified_)subresultants_bezout or by (modified_)subresultants_sylv.
This function removes from poly_seq all zero polynomials as well
as all those whose degree is equal to the degree of a preceding
polynomial in poly_seq, as we scan it from left to right.
"""
L = poly_seq[:] # get a copy of the input sequence
d = degree(L[1], x)
i = 2
while i < len(L):
d_i = degree(L[i], x)
if d_i < 0: # zero poly
L.remove(L[i])
i = i - 1
if d == d_i: # poly degree equals degree of previous poly
L.remove(L[i])
i = i - 1
if d_i >= 0:
d = d_i
i = i + 1
return L
def subresultants_sylv(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x]. It is assumed
that deg(f) >= deg(g).
Computes the subresultant polynomial remainder sequence (prs)
of f, g by evaluating determinants of appropriately selected
submatrices of sylvester(f, g, x, 1). The dimensions of the
latter are (deg(f) + deg(g)) x (deg(f) + deg(g)).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of sylvester(f, g, x, 1).
If the subresultant prs is complete, then the output coincides
with the Euclidean sequence of the polynomials f, g.
References:
===========
1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233-266, 2004.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # subresultant list
# form matrix sylvester(f, g, x, 1)
S = sylvester(f, g, x, 1)
# pick appropriate submatrices of S
# and form subresultant polys
j = m - 1
while j > 0:
Sp = S[:, :] # copy of S
# delete last j rows of coeffs of g
for ind in range(m + n - j, m + n):
Sp.row_del(m + n - j)
# delete last j rows of coeffs of f
for ind in range(m - j, m):
Sp.row_del(m - j)
# evaluate determinants and form coefficients list
coeff_L, k, l = [], Sp.rows, 0
while l <= j:
coeff_L.append(Sp[ : , 0 : k].det())
Sp.col_swap(k - 1, k + l)
l += 1
# form poly and append to SP_L
SR_L.append(Poly(coeff_L, x).as_expr())
j -= 1
# j = 0
SR_L.append(S.det())
return process_matrix_output(SR_L, x)
def modified_subresultants_sylv(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x]. It is assumed
that deg(f) >= deg(g).
Computes the modified subresultant polynomial remainder sequence (prs)
of f, g by evaluating determinants of appropriately selected
submatrices of sylvester(f, g, x, 2). The dimensions of the
latter are (2*deg(f)) x (2*deg(f)).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of sylvester(f, g, x, 2).
If the modified subresultant prs is complete, then the output coincides
with the Sturmian sequence of the polynomials f, g.
References:
===========
1. A. G. Akritas,G.I. Malaschonok and P.S. Vigklas:
Sturm Sequences and Modified Subresultant Polynomial Remainder
Sequences. Serdica Journal of Computing, Vol. 8, No 1, 29--46, 2014.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # modified subresultant list
# form matrix sylvester(f, g, x, 2)
S = sylvester(f, g, x, 2)
# pick appropriate submatrices of S
# and form modified subresultant polys
j = m - 1
while j > 0:
# delete last 2*j rows of pairs of coeffs of f, g
Sp = S[0:2*n - 2*j, :] # copy of first 2*n - 2*j rows of S
# evaluate determinants and form coefficients list
coeff_L, k, l = [], Sp.rows, 0
while l <= j:
coeff_L.append(Sp[ : , 0 : k].det())
Sp.col_swap(k - 1, k + l)
l += 1
# form poly and append to SP_L
SR_L.append(Poly(coeff_L, x).as_expr())
j -= 1
# j = 0
SR_L.append(S.det())
return process_matrix_output(SR_L, x)
def res(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x].
The output is the resultant of f, g computed by evaluating
the determinant of the matrix sylvester(f, g, x, 1).
References:
===========
1. J. S. Cohen: Computer Algebra and Symbolic Computation
- Mathematical Methods. A. K. Peters, 2003.
"""
if f == 0 or g == 0:
raise PolynomialError("The resultant of %s and %s is not defined" % (f, g))
else:
return sylvester(f, g, x, 1).det()
def res_q(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x].
The output is the resultant of f, g computed recursively
by polynomial divisions in Q[x], using the function rem.
See Cohen's book p. 281.
References:
===========
1. J. S. Cohen: Computer Algebra and Symbolic Computation
- Mathematical Methods. A. K. Peters, 2003.
"""
m = degree(f, x)
n = degree(g, x)
if m < n:
return (-1)**(m*n) * res_q(g, f, x)
elif n == 0: # g is a constant
return g**m
else:
r = rem(f, g, x)
if r == 0:
return 0
else:
s = degree(r, x)
l = LC(g, x)
return (-1)**(m*n) * l**(m-s)*res_q(g, r, x)
def res_z(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x].
The output is the resultant of f, g computed recursively
by polynomial divisions in Z[x], using the function prem().
See Cohen's book p. 283.
References:
===========
1. J. S. Cohen: Computer Algebra and Symbolic Computation
- Mathematical Methods. A. K. Peters, 2003.
"""
m = degree(f, x)
n = degree(g, x)
if m < n:
return (-1)**(m*n) * res_z(g, f, x)
elif n == 0: # g is a constant
return g**m
else:
r = prem(f, g, x)
if r == 0:
return 0
else:
delta = m - n + 1
w = (-1)**(m*n) * res_z(g, r, x)
s = degree(r, x)
l = LC(g, x)
k = delta * n - m + s
return quo(w, l**k, x)
def sign_seq(poly_seq, x):
"""
Given a sequence of polynomials poly_seq, it returns
the sequence of signs of the leading coefficients of
the polynomials in poly_seq.
"""
return [sign(LC(poly_seq[i], x)) for i in range(len(poly_seq))]
def bezout(p, q, x, method='bz'):
"""
The input polynomials p, q are in Z[x] or in Q[x]. Let
mx = max( degree(p, x) , degree(q, x) ).
The default option bezout(p, q, x, method='bz') returns Bezout's
symmetric matrix of p and q, of dimensions (mx) x (mx). The
determinant of this matrix is equal to the determinant of sylvester2,
Sylvester's matrix of 1853, whose dimensions are (2*mx) x (2*mx);
however the subresultants of these two matrices may differ.
The other option, bezout(p, q, x, 'prs'), is of interest to us
in this module because it returns a matrix equivalent to sylvester2.
In this case all subresultants of the two matrices are identical.
Both the subresultant polynomial remainder sequence (prs) and
the modified subresultant prs of p and q can be computed by
evaluating determinants of appropriately selected submatrices of
bezout(p, q, x, 'prs') --- one determinant per coefficient of the
remainder polynomials.
The matrices bezout(p, q, x, 'bz') and bezout(p, q, x, 'prs')
are related by the formula
bezout(p, q, x, 'prs') =
backward_eye(deg(p)) * bezout(p, q, x, 'bz') * backward_eye(deg(p)),
where backward_eye() is the backward identity function.
References
==========
1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233-266, 2004.
"""
# obtain degrees of polys
m, n = degree( Poly(p, x), x), degree( Poly(q, x), x)
# Special cases:
# A:: case m = n < 0 (i.e. both polys are 0)
if m == n and n < 0:
return Matrix([])
# B:: case m = n = 0 (i.e. both polys are constants)
if m == n and n == 0:
return Matrix([])
# C:: m == 0 and n < 0 or m < 0 and n == 0
# (i.e. one poly is constant and the other is 0)
if m == 0 and n < 0:
return Matrix([])
elif m < 0 and n == 0:
return Matrix([])
# D:: m >= 1 and n < 0 or m < 0 and n >=1
# (i.e. one poly is of degree >=1 and the other is 0)
if m >= 1 and n < 0:
return Matrix([0])
elif m < 0 and n >= 1:
return Matrix([0])
y = var('y')
# expr is 0 when x = y
expr = p * q.subs({x:y}) - p.subs({x:y}) * q
# hence expr is exactly divisible by x - y
poly = Poly( quo(expr, x-y), x, y)
# form Bezout matrix and store them in B as indicated to get
# the LC coefficient of each poly either in the first position
# of each row (method='prs') or in the last (method='bz').
mx = max(m, n)
B = zeros(mx)
for i in range(mx):
for j in range(mx):
if method == 'prs':
B[mx - 1 - i, mx - 1 - j] = poly.nth(i, j)
else:
B[i, j] = poly.nth(i, j)
return B
def backward_eye(n):
'''
Returns the backward identity matrix of dimensions n x n.
Needed to "turn" the Bezout matrices
so that the leading coefficients are first.
See docstring of the function bezout(p, q, x, method='bz').
'''
M = eye(n) # identity matrix of order n
for i in range(int(M.rows / 2)):
M.row_swap(0 + i, M.rows - 1 - i)
return M
def subresultants_bezout(p, q, x):
"""
The input polynomials p, q are in Z[x] or in Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant polynomial remainder sequence
of p, q by evaluating determinants of appropriately selected
submatrices of bezout(p, q, x, 'prs'). The dimensions of the
latter are deg(p) x deg(p).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of bezout(p, q, x, 'prs').
bezout(p, q, x, 'prs) is used instead of sylvester(p, q, x, 1),
Sylvester's matrix of 1840, because the dimensions of the latter
are (deg(p) + deg(q)) x (deg(p) + deg(q)).
If the subresultant prs is complete, then the output coincides
with the Euclidean sequence of the polynomials p, q.
References
==========
1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233-266, 2004.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
f, g = p, q
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # subresultant list
F = LC(f, x)**(degF - degG)
# form the bezout matrix
B = bezout(f, g, x, 'prs')
# pick appropriate submatrices of B
# and form subresultant polys
if degF > degG:
j = 2
if degF == degG:
j = 1
while j <= degF:
M = B[0:j, :]
k, coeff_L = j - 1, []
while k <= degF - 1:
coeff_L.append(M[: ,0 : j].det())
if k < degF - 1:
M.col_swap(j - 1, k + 1)
k = k + 1
# apply Theorem 2.1 in the paper by Toca & Vega 2004
# to get correct signs
SR_L.append(int((-1)**(j*(j-1)/2)) * (Poly(coeff_L, x) / F).as_expr())
j = j + 1
return process_matrix_output(SR_L, x)
def modified_subresultants_bezout(p, q, x):
"""
The input polynomials p, q are in Z[x] or in Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the modified subresultant polynomial remainder sequence
of p, q by evaluating determinants of appropriately selected
submatrices of bezout(p, q, x, 'prs'). The dimensions of the
latter are deg(p) x deg(p).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of bezout(p, q, x, 'prs').
bezout(p, q, x, 'prs') is used instead of sylvester(p, q, x, 2),
Sylvester's matrix of 1853, because the dimensions of the latter
are 2*deg(p) x 2*deg(p).
If the modified subresultant prs is complete, and LC( p ) > 0, the output
coincides with the (generalized) Sturm's sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
2. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233-266, 2004.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
f, g = p, q
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # subresultant list
# form the bezout matrix
B = bezout(f, g, x, 'prs')
# pick appropriate submatrices of B
# and form subresultant polys
if degF > degG:
j = 2
if degF == degG:
j = 1
while j <= degF:
M = B[0:j, :]
k, coeff_L = j - 1, []
while k <= degF - 1:
coeff_L.append(M[: ,0 : j].det())
if k < degF - 1:
M.col_swap(j - 1, k + 1)
k = k + 1
## Theorem 2.1 in the paper by Toca & Vega 2004 is _not needed_
## in this case since
## the bezout matrix is equivalent to sylvester2
SR_L.append(( Poly(coeff_L, x)).as_expr())
j = j + 1
return process_matrix_output(SR_L, x)
def sturm_pg(p, q, x, method=0):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the (generalized) Sturm sequence of p and q in Z[x] or Q[x].
If q = diff(p, x, 1) it is the usual Sturm sequence.
A. If method == 0, default, the remainder coefficients of the sequence
are (in absolute value) ``modified'' subresultants, which for non-monic
polynomials are greater than the coefficients of the corresponding
subresultants by the factor Abs(LC(p)**( deg(p)- deg(q))).
B. If method == 1, the remainder coefficients of the sequence are (in
absolute value) subresultants, which for non-monic polynomials are
smaller than the coefficients of the corresponding ``modified''
subresultants by the factor Abs(LC(p)**( deg(p)- deg(q))).
If the Sturm sequence is complete, method=0 and LC( p ) > 0, the coefficients
of the polynomials in the sequence are ``modified'' subresultants.
That is, they are determinants of appropriately selected submatrices of
sylvester2, Sylvester's matrix of 1853. In this case the Sturm sequence
coincides with the ``modified'' subresultant prs, of the polynomials
p, q.
If the Sturm sequence is incomplete and method=0 then the signs of the
coefficients of the polynomials in the sequence may differ from the signs
of the coefficients of the corresponding polynomials in the ``modified''
subresultant prs; however, the absolute values are the same.
To compute the coefficients, no determinant evaluation takes place. Instead,
polynomial divisions in Q[x] are performed, using the function rem(p, q, x);
the coefficients of the remainders computed this way become (``modified'')
subresultants with the help of the Pell-Gordon Theorem of 1917.
See also the function euclid_pg(p, q, x).
References
==========
1. Pell A. J., R. L. Gordon. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188-193.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# make sure LC(p) > 0
flag = 0
if LC(p,x) < 0:
flag = 1
p = -p
q = -q
# initialize
lcf = LC(p, x)**(d0 - d1) # lcf * subr = modified subr
a0, a1 = p, q # the input polys
sturm_seq = [a0, a1] # the output list
del0 = d0 - d1 # degree difference
rho1 = LC(a1, x) # leading coeff of a1
exp_deg = d1 - 1 # expected degree of a2
a2 = - rem(a0, a1, domain=QQ) # first remainder
rho2 = LC(a2,x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# mul_fac is the factor by which a2 is multiplied to
# get integer coefficients
mul_fac_old = rho1**(del0 + del1 - deg_diff_new)
# append accordingly
if method == 0:
sturm_seq.append( simplify(lcf * a2 * Abs(mul_fac_old)))
else:
sturm_seq.append( simplify( a2 * Abs(mul_fac_old)))
# main loop
deg_diff_old = deg_diff_new
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
del0 = del1 # update degree difference
exp_deg = d1 - 1 # new expected degree
a2 = - rem(a0, a1, domain=QQ) # new remainder
rho3 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# take into consideration the power
# rho1**deg_diff_old that was "left out"
expo_old = deg_diff_old # rho1 raised to this power
expo_new = del0 + del1 - deg_diff_new # rho2 raised to this power
# update variables and append
mul_fac_new = rho2**(expo_new) * rho1**(expo_old) * mul_fac_old
deg_diff_old, mul_fac_old = deg_diff_new, mul_fac_new
rho1, rho2 = rho2, rho3
if method == 0:
sturm_seq.append( simplify(lcf * a2 * Abs(mul_fac_old)))
else:
sturm_seq.append( simplify( a2 * Abs(mul_fac_old)))
if flag: # change the sign of the sequence
sturm_seq = [-i for i in sturm_seq]
# gcd is of degree > 0 ?
m = len(sturm_seq)
if sturm_seq[m - 1] == nan or sturm_seq[m - 1] == 0:
sturm_seq.pop(m - 1)
return sturm_seq
def sturm_q(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the (generalized) Sturm sequence of p and q in Q[x].
Polynomial divisions in Q[x] are performed, using the function rem(p, q, x).
The coefficients of the polynomials in the Sturm sequence can be uniquely
determined from the corresponding coefficients of the polynomials found
either in:
(a) the ``modified'' subresultant prs, (references 1, 2)
or in
(b) the subresultant prs (reference 3).
References
==========
1. Pell A. J., R. L. Gordon. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188-193.
2 Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# make sure LC(p) > 0
flag = 0
if LC(p,x) < 0:
flag = 1
p = -p
q = -q
# initialize
a0, a1 = p, q # the input polys
sturm_seq = [a0, a1] # the output list
a2 = -rem(a0, a1, domain=QQ) # first remainder
d2 = degree(a2, x) # degree of a2
sturm_seq.append( a2 )
# main loop
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
a2 = -rem(a0, a1, domain=QQ) # new remainder
d2 = degree(a2, x) # actual degree of a2
sturm_seq.append( a2 )
if flag: # change the sign of the sequence
sturm_seq = [-i for i in sturm_seq]
# gcd is of degree > 0 ?
m = len(sturm_seq)
if sturm_seq[m - 1] == nan or sturm_seq[m - 1] == 0:
sturm_seq.pop(m - 1)
return sturm_seq
def sturm_amv(p, q, x, method=0):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the (generalized) Sturm sequence of p and q in Z[x] or Q[x].
If q = diff(p, x, 1) it is the usual Sturm sequence.
A. If method == 0, default, the remainder coefficients of the
sequence are (in absolute value) ``modified'' subresultants, which
for non-monic polynomials are greater than the coefficients of the
corresponding subresultants by the factor Abs(LC(p)**( deg(p)- deg(q))).
B. If method == 1, the remainder coefficients of the sequence are (in
absolute value) subresultants, which for non-monic polynomials are
smaller than the coefficients of the corresponding ``modified''
subresultants by the factor Abs( LC(p)**( deg(p)- deg(q)) ).
If the Sturm sequence is complete, method=0 and LC( p ) > 0, then the
coefficients of the polynomials in the sequence are ``modified'' subresultants.
That is, they are determinants of appropriately selected submatrices of
sylvester2, Sylvester's matrix of 1853. In this case the Sturm sequence
coincides with the ``modified'' subresultant prs, of the polynomials
p, q.
If the Sturm sequence is incomplete and method=0 then the signs of the
coefficients of the polynomials in the sequence may differ from the signs
of the coefficients of the corresponding polynomials in the ``modified''
subresultant prs; however, the absolute values are the same.
To compute the coefficients, no determinant evaluation takes place.
Instead, we first compute the euclidean sequence of p and q using
euclid_amv(p, q, x) and then: (a) change the signs of the remainders in the
Euclidean sequence according to the pattern "-, -, +, +, -, -, +, +,..."
(see Lemma 1 in the 1st reference or Theorem 3 in the 2nd reference)
and (b) if method=0, assuming deg(p) > deg(q), we multiply the remainder
coefficients of the Euclidean sequence times the factor
Abs( LC(p)**( deg(p)- deg(q)) ) to make them modified subresultants.
See also the function sturm_pg(p, q, x).
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.'' Serdica
Journal of Computing 9(2) (2015), 123-138.
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
Remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# compute the euclidean sequence
prs = euclid_amv(p, q, x)
# defensive
if prs == [] or len(prs) == 2:
return prs
# the coefficients in prs are subresultants and hence are smaller
# than the corresponding subresultants by the factor
# Abs( LC(prs[0])**( deg(prs[0]) - deg(prs[1])) ); Theorem 2, 2nd reference.
lcf = Abs( LC(prs[0])**( degree(prs[0], x) - degree(prs[1], x) ) )
# the signs of the first two polys in the sequence stay the same
sturm_seq = [prs[0], prs[1]]
# change the signs according to "-, -, +, +, -, -, +, +,..."
# and multiply times lcf if needed
flag = 0
m = len(prs)
i = 2
while i <= m-1:
if flag == 0:
sturm_seq.append( - prs[i] )
i = i + 1
if i == m:
break
sturm_seq.append( - prs[i] )
i = i + 1
flag = 1
elif flag == 1:
sturm_seq.append( prs[i] )
i = i + 1
if i == m:
break
sturm_seq.append( prs[i] )
i = i + 1
flag = 0
# subresultants or modified subresultants?
if method == 0 and lcf > 1:
aux_seq = [sturm_seq[0], sturm_seq[1]]
for i in range(2, m):
aux_seq.append(simplify(sturm_seq[i] * lcf ))
sturm_seq = aux_seq
return sturm_seq
def euclid_pg(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the Euclidean sequence of p and q in Z[x] or Q[x].
If the Euclidean sequence is complete the coefficients of the polynomials
in the sequence are subresultants. That is, they are determinants of
appropriately selected submatrices of sylvester1, Sylvester's matrix of 1840.
In this case the Euclidean sequence coincides with the subresultant prs
of the polynomials p, q.
If the Euclidean sequence is incomplete the signs of the coefficients of the
polynomials in the sequence may differ from the signs of the coefficients of
the corresponding polynomials in the subresultant prs; however, the absolute
values are the same.
To compute the Euclidean sequence, no determinant evaluation takes place.
We first compute the (generalized) Sturm sequence of p and q using
sturm_pg(p, q, x, 1), in which case the coefficients are (in absolute value)
equal to subresultants. Then we change the signs of the remainders in the
Sturm sequence according to the pattern "-, -, +, +, -, -, +, +,..." ;
see Lemma 1 in the 1st reference or Theorem 3 in the 2nd reference as well as
the function sturm_pg(p, q, x).
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.'' Serdica
Journal of Computing 9(2) (2015), 123-138.
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
Remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# compute the sturmian sequence using the Pell-Gordon (or AMV) theorem
# with the coefficients in the prs being (in absolute value) subresultants
prs = sturm_pg(p, q, x, 1) ## any other method would do
# defensive
if prs == [] or len(prs) == 2:
return prs
# the signs of the first two polys in the sequence stay the same
euclid_seq = [prs[0], prs[1]]
# change the signs according to "-, -, +, +, -, -, +, +,..."
flag = 0
m = len(prs)
i = 2
while i <= m-1:
if flag == 0:
euclid_seq.append(- prs[i] )
i = i + 1
if i == m:
break
euclid_seq.append(- prs[i] )
i = i + 1
flag = 1
elif flag == 1:
euclid_seq.append(prs[i] )
i = i + 1
if i == m:
break
euclid_seq.append(prs[i] )
i = i + 1
flag = 0
return euclid_seq
def euclid_q(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the Euclidean sequence of p and q in Q[x].
Polynomial divisions in Q[x] are performed, using the function rem(p, q, x).
The coefficients of the polynomials in the Euclidean sequence can be uniquely
determined from the corresponding coefficients of the polynomials found
either in:
(a) the ``modified'' subresultant polynomial remainder sequence,
(references 1, 2)
or in
(b) the subresultant polynomial remainder sequence (references 3).
References
==========
1. Pell A. J., R. L. Gordon. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188-193.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# make sure LC(p) > 0
flag = 0
if LC(p,x) < 0:
flag = 1
p = -p
q = -q
# initialize
a0, a1 = p, q # the input polys
euclid_seq = [a0, a1] # the output list
a2 = rem(a0, a1, domain=QQ) # first remainder
d2 = degree(a2, x) # degree of a2
euclid_seq.append( a2 )
# main loop
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
a2 = rem(a0, a1, domain=QQ) # new remainder
d2 = degree(a2, x) # actual degree of a2
euclid_seq.append( a2 )
if flag: # change the sign of the sequence
euclid_seq = [-i for i in euclid_seq]
# gcd is of degree > 0 ?
m = len(euclid_seq)
if euclid_seq[m - 1] == nan or euclid_seq[m - 1] == 0:
euclid_seq.pop(m - 1)
return euclid_seq
def euclid_amv(f, g, x):
"""
f, g are polynomials in Z[x] or Q[x]. It is assumed
that degree(f, x) >= degree(g, x).
Computes the Euclidean sequence of p and q in Z[x] or Q[x].
If the Euclidean sequence is complete the coefficients of the polynomials
in the sequence are subresultants. That is, they are determinants of
appropriately selected submatrices of sylvester1, Sylvester's matrix of 1840.
In this case the Euclidean sequence coincides with the subresultant prs,
of the polynomials p, q.
If the Euclidean sequence is incomplete the signs of the coefficients of the
polynomials in the sequence may differ from the signs of the coefficients of
the corresponding polynomials in the subresultant prs; however, the absolute
values are the same.
To compute the coefficients, no determinant evaluation takes place.
Instead, polynomial divisions in Z[x] or Q[x] are performed, using
the function rem_z(f, g, x); the coefficients of the remainders
computed this way become subresultants with the help of the
Collins-Brown-Traub formula for coefficient reduction.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
# make sure proper degrees
d0 = degree(f, x)
d1 = degree(g, x)
if d0 == 0 and d1 == 0:
return [f, g]
if d1 > d0:
d0, d1 = d1, d0
f, g = g, f
if d0 > 0 and d1 == 0:
return [f, g]
# initialize
a0 = f
a1 = g
euclid_seq = [a0, a1]
deg_dif_p1, c = degree(a0, x) - degree(a1, x) + 1, -1
# compute the first polynomial of the prs
i = 1
a2 = rem_z(a0, a1, x) / Abs( (-1)**deg_dif_p1 ) # first remainder
euclid_seq.append( a2 )
d2 = degree(a2, x) # actual degree of a2
# main loop
while d2 >= 1:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
i += 1
sigma0 = -LC(a0)
c = (sigma0**(deg_dif_p1 - 1)) / (c**(deg_dif_p1 - 2))
deg_dif_p1 = degree(a0, x) - d2 + 1
a2 = rem_z(a0, a1, x) / Abs( ((c**(deg_dif_p1 - 1)) * sigma0) )
euclid_seq.append( a2 )
d2 = degree(a2, x) # actual degree of a2
# gcd is of degree > 0 ?
m = len(euclid_seq)
if euclid_seq[m - 1] == nan or euclid_seq[m - 1] == 0:
euclid_seq.pop(m - 1)
return euclid_seq
def modified_subresultants_pg(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the ``modified'' subresultant prs of p and q in Z[x] or Q[x];
the coefficients of the polynomials in the sequence are
``modified'' subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester2, Sylvester's matrix of 1853.
To compute the coefficients, no determinant evaluation takes place. Instead,
polynomial divisions in Q[x] are performed, using the function rem(p, q, x);
the coefficients of the remainders computed this way become ``modified''
subresultants with the help of the Pell-Gordon Theorem of 1917.
If the ``modified'' subresultant prs is complete, and LC( p ) > 0, it coincides
with the (generalized) Sturm sequence of the polynomials p, q.
References
==========
1. Pell A. J., R. L. Gordon. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188-193.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p,x)
d1 = degree(q,x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# initialize
k = var('k') # index in summation formula
u_list = [] # of elements (-1)**u_i
subres_l = [p, q] # mod. subr. prs output list
a0, a1 = p, q # the input polys
del0 = d0 - d1 # degree difference
degdif = del0 # save it
rho_1 = LC(a0) # lead. coeff (a0)
# Initialize Pell-Gordon variables
rho_list_minus_1 = sign( LC(a0, x)) # sign of LC(a0)
rho1 = LC(a1, x) # leading coeff of a1
rho_list = [ sign(rho1)] # of signs
p_list = [del0] # of degree differences
u = summation(k, (k, 1, p_list[0])) # value of u
u_list.append(u) # of u values
v = sum(p_list) # v value
# first remainder
exp_deg = d1 - 1 # expected degree of a2
a2 = - rem(a0, a1, domain=QQ) # first remainder
rho2 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# mul_fac is the factor by which a2 is multiplied to
# get integer coefficients
mul_fac_old = rho1**(del0 + del1 - deg_diff_new)
# update Pell-Gordon variables
p_list.append(1 + deg_diff_new) # deg_diff_new is 0 for complete seq
# apply Pell-Gordon formula (7) in second reference
num = 1 # numerator of fraction
for k in range(len(u_list)):
num *= (-1)**u_list[k]
num = num * (-1)**v
# denominator depends on complete / incomplete seq
if deg_diff_new == 0: # complete seq
den = 1
for k in range(len(rho_list)):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
else: # incomplete seq
den = 1
for k in range(len(rho_list)-1):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
expo = (p_list[len(rho_list) - 1] + p_list[len(rho_list)] - deg_diff_new)
den = den * rho_list[len(rho_list) - 1]**expo
# the sign of the determinant depends on sg(num / den)
if sign(num / den) > 0:
subres_l.append( simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
else:
subres_l.append(- simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
# update Pell-Gordon variables
k = var('k')
rho_list.append( sign(rho2))
u = summation(k, (k, 1, p_list[len(p_list) - 1]))
u_list.append(u)
v = sum(p_list)
deg_diff_old=deg_diff_new
# main loop
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
del0 = del1 # update degree difference
exp_deg = d1 - 1 # new expected degree
a2 = - rem(a0, a1, domain=QQ) # new remainder
rho3 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# take into consideration the power
# rho1**deg_diff_old that was "left out"
expo_old = deg_diff_old # rho1 raised to this power
expo_new = del0 + del1 - deg_diff_new # rho2 raised to this power
mul_fac_new = rho2**(expo_new) * rho1**(expo_old) * mul_fac_old
# update variables
deg_diff_old, mul_fac_old = deg_diff_new, mul_fac_new
rho1, rho2 = rho2, rho3
# update Pell-Gordon variables
p_list.append(1 + deg_diff_new) # deg_diff_new is 0 for complete seq
# apply Pell-Gordon formula (7) in second reference
num = 1 # numerator
for k in range(len(u_list)):
num *= (-1)**u_list[k]
num = num * (-1)**v
# denominator depends on complete / incomplete seq
if deg_diff_new == 0: # complete seq
den = 1
for k in range(len(rho_list)):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
else: # incomplete seq
den = 1
for k in range(len(rho_list)-1):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
expo = (p_list[len(rho_list) - 1] + p_list[len(rho_list)] - deg_diff_new)
den = den * rho_list[len(rho_list) - 1]**expo
# the sign of the determinant depends on sg(num / den)
if sign(num / den) > 0:
subres_l.append( simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
else:
subres_l.append(- simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
# update Pell-Gordon variables
k = var('k')
rho_list.append( sign(rho2))
u = summation(k, (k, 1, p_list[len(p_list) - 1]))
u_list.append(u)
v = sum(p_list)
# gcd is of degree > 0 ?
m = len(subres_l)
if subres_l[m - 1] == nan or subres_l[m - 1] == 0:
subres_l.pop(m - 1)
# LC( p ) < 0
m = len(subres_l) # list may be shorter now due to deg(gcd ) > 0
if LC( p ) < 0:
aux_seq = [subres_l[0], subres_l[1]]
for i in range(2, m):
aux_seq.append(simplify(subres_l[i] * (-1) ))
subres_l = aux_seq
return subres_l
def subresultants_pg(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p and q in Z[x] or Q[x], from
the modified subresultant prs of p and q.
The coefficients of the polynomials in these two sequences differ only
in sign and the factor LC(p)**( deg(p)- deg(q)) as stated in
Theorem 2 of the reference.
The coefficients of the polynomials in the output sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: "On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials."
Serdica Journal of Computing 9(2) (2015), 123-138.
"""
# compute the modified subresultant prs
lst = modified_subresultants_pg(p,q,x) ## any other method would do
# defensive
if lst == [] or len(lst) == 2:
return lst
# the coefficients in lst are modified subresultants and, hence, are
# greater than those of the corresponding subresultants by the factor
# LC(lst[0])**( deg(lst[0]) - deg(lst[1])); see Theorem 2 in reference.
lcf = LC(lst[0])**( degree(lst[0], x) - degree(lst[1], x) )
# Initialize the subresultant prs list
subr_seq = [lst[0], lst[1]]
# compute the degree sequences m_i and j_i of Theorem 2 in reference.
deg_seq = [degree(Poly(poly, x), x) for poly in lst]
deg = deg_seq[0]
deg_seq_s = deg_seq[1:-1]
m_seq = [m-1 for m in deg_seq_s]
j_seq = [deg - m for m in m_seq]
# compute the AMV factors of Theorem 2 in reference.
fact = [(-1)**( j*(j-1)/S(2) ) for j in j_seq]
# shortened list without the first two polys
lst_s = lst[2:]
# poly lst_s[k] is multiplied times fact[k], divided by lcf
# and appended to the subresultant prs list
m = len(fact)
for k in range(m):
if sign(fact[k]) == -1:
subr_seq.append(-lst_s[k] / lcf)
else:
subr_seq.append(lst_s[k] / lcf)
return subr_seq
def subresultants_amv_q(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p and q in Q[x];
the coefficients of the polynomials in the sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
To compute the coefficients, no determinant evaluation takes place.
Instead, polynomial divisions in Q[x] are performed, using the
function rem(p, q, x); the coefficients of the remainders
computed this way become subresultants with the help of the
Akritas-Malaschonok-Vigklas Theorem of 2015.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p, q]
# initialize
i, s = 0, 0 # counters for remainders & odd elements
p_odd_index_sum = 0 # contains the sum of p_1, p_3, etc
subres_l = [p, q] # subresultant prs output list
a0, a1 = p, q # the input polys
sigma1 = LC(a1, x) # leading coeff of a1
p0 = d0 - d1 # degree difference
if p0 % 2 == 1:
s += 1
phi = floor( (s + 1) / 2 )
mul_fac = 1
d2 = d1
# main loop
while d2 > 0:
i += 1
a2 = rem(a0, a1, domain= QQ) # new remainder
if i == 1:
sigma2 = LC(a2, x)
else:
sigma3 = LC(a2, x)
sigma1, sigma2 = sigma2, sigma3
d2 = degree(a2, x)
p1 = d1 - d2
psi = i + phi + p_odd_index_sum
# new mul_fac
mul_fac = sigma1**(p0 + 1) * mul_fac
## compute the sign of the first fraction in formula (9) of the paper
# numerator
num = (-1)**psi
# denominator
den = sign(mul_fac)
# the sign of the determinant depends on sign( num / den ) != 0
if sign(num / den) > 0:
subres_l.append( simplify(expand(a2* Abs(mul_fac))))
else:
subres_l.append(- simplify(expand(a2* Abs(mul_fac))))
## bring into mul_fac the missing power of sigma if there was a degree gap
if p1 - 1 > 0:
mul_fac = mul_fac * sigma1**(p1 - 1)
# update AMV variables
a0, a1, d0, d1 = a1, a2, d1, d2
p0 = p1
if p0 % 2 ==1:
s += 1
phi = floor( (s + 1) / 2 )
if i%2 == 1:
p_odd_index_sum += p0 # p_i has odd index
# gcd is of degree > 0 ?
m = len(subres_l)
if subres_l[m - 1] == nan or subres_l[m - 1] == 0:
subres_l.pop(m - 1)
return subres_l
def compute_sign(base, expo):
'''
base != 0 and expo >= 0 are integers;
returns the sign of base**expo without
evaluating the power itself!
'''
sb = sign(base)
if sb == 1:
return 1
pe = expo % 2
if pe == 0:
return -sb
else:
return sb
def rem_z(p, q, x):
'''
Intended mainly for p, q polynomials in Z[x] so that,
on dividing p by q, the remainder will also be in Z[x]. (However,
it also works fine for polynomials in Q[x].) It is assumed
that degree(p, x) >= degree(q, x).
It premultiplies p by the _absolute_ value of the leading coefficient
of q, raised to the power deg(p) - deg(q) + 1 and then performs
polynomial division in Q[x], using the function rem(p, q, x).
By contrast the function prem(p, q, x) does _not_ use the absolute
value of the leading coefficient of q.
This results not only in ``messing up the signs'' of the Euclidean and
Sturmian prs's as mentioned in the second reference,
but also in violation of the main results of the first and third
references --- Theorem 4 and Theorem 1 respectively. Theorems 4 and 1
establish a one-to-one correspondence between the Euclidean and the
Sturmian prs of p, q, on one hand, and the subresultant prs of p, q,
on the other.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.''
Serdica Journal of Computing, 9(2) (2015), 123-138.
2. http://planetMath.org/sturmstheorem
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result on
the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
'''
if (p.as_poly().is_univariate and q.as_poly().is_univariate and
p.as_poly().gens == q.as_poly().gens):
delta = (degree(p, x) - degree(q, x) + 1)
return rem(Abs(LC(q, x))**delta * p, q, x)
else:
return prem(p, q, x)
def quo_z(p, q, x):
"""
Intended mainly for p, q polynomials in Z[x] so that,
on dividing p by q, the quotient will also be in Z[x]. (However,
it also works fine for polynomials in Q[x].) It is assumed
that degree(p, x) >= degree(q, x).
It premultiplies p by the _absolute_ value of the leading coefficient
of q, raised to the power deg(p) - deg(q) + 1 and then performs
polynomial division in Q[x], using the function quo(p, q, x).
By contrast the function pquo(p, q, x) does _not_ use the absolute
value of the leading coefficient of q.
See also function rem_z(p, q, x) for additional comments and references.
"""
if (p.as_poly().is_univariate and q.as_poly().is_univariate and
p.as_poly().gens == q.as_poly().gens):
delta = (degree(p, x) - degree(q, x) + 1)
return quo(Abs(LC(q, x))**delta * p, q, x)
else:
return pquo(p, q, x)
def subresultants_amv(f, g, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(f, x) >= degree(g, x).
Computes the subresultant prs of p and q in Z[x] or Q[x];
the coefficients of the polynomials in the sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
To compute the coefficients, no determinant evaluation takes place.
Instead, polynomial divisions in Z[x] or Q[x] are performed, using
the function rem_z(p, q, x); the coefficients of the remainders
computed this way become subresultants with the help of the
Akritas-Malaschonok-Vigklas Theorem of 2015 and the Collins-Brown-
Traub formula for coefficient reduction.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
# make sure proper degrees
d0 = degree(f, x)
d1 = degree(g, x)
if d0 == 0 and d1 == 0:
return [f, g]
if d1 > d0:
d0, d1 = d1, d0
f, g = g, f
if d0 > 0 and d1 == 0:
return [f, g]
# initialize
a0 = f
a1 = g
subres_l = [a0, a1]
deg_dif_p1, c = degree(a0, x) - degree(a1, x) + 1, -1
# initialize AMV variables
sigma1 = LC(a1, x) # leading coeff of a1
i, s = 0, 0 # counters for remainders & odd elements
p_odd_index_sum = 0 # contains the sum of p_1, p_3, etc
p0 = deg_dif_p1 - 1
if p0 % 2 == 1:
s += 1
phi = floor( (s + 1) / 2 )
# compute the first polynomial of the prs
i += 1
a2 = rem_z(a0, a1, x) / Abs( (-1)**deg_dif_p1 ) # first remainder
sigma2 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
p1 = d1 - d2 # degree difference
# sgn_den is the factor, the denominator 1st fraction of (9),
# by which a2 is multiplied to get integer coefficients
sgn_den = compute_sign( sigma1, p0 + 1 )
## compute sign of the 1st fraction in formula (9) of the paper
# numerator
psi = i + phi + p_odd_index_sum
num = (-1)**psi
# denominator
den = sgn_den
# the sign of the determinant depends on sign(num / den) != 0
if sign(num / den) > 0:
subres_l.append( a2 )
else:
subres_l.append( -a2 )
# update AMV variable
if p1 % 2 == 1:
s += 1
# bring in the missing power of sigma if there was gap
if p1 - 1 > 0:
sgn_den = sgn_den * compute_sign( sigma1, p1 - 1 )
# main loop
while d2 >= 1:
phi = floor( (s + 1) / 2 )
if i%2 == 1:
p_odd_index_sum += p1 # p_i has odd index
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
p0 = p1 # update degree difference
i += 1
sigma0 = -LC(a0)
c = (sigma0**(deg_dif_p1 - 1)) / (c**(deg_dif_p1 - 2))
deg_dif_p1 = degree(a0, x) - d2 + 1
a2 = rem_z(a0, a1, x) / Abs( ((c**(deg_dif_p1 - 1)) * sigma0) )
sigma3 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
p1 = d1 - d2 # degree difference
psi = i + phi + p_odd_index_sum
# update variables
sigma1, sigma2 = sigma2, sigma3
# new sgn_den
sgn_den = compute_sign( sigma1, p0 + 1 ) * sgn_den
# compute the sign of the first fraction in formula (9) of the paper
# numerator
num = (-1)**psi
# denominator
den = sgn_den
# the sign of the determinant depends on sign( num / den ) != 0
if sign(num / den) > 0:
subres_l.append( a2 )
else:
subres_l.append( -a2 )
# update AMV variable
if p1 % 2 ==1:
s += 1
# bring in the missing power of sigma if there was gap
if p1 - 1 > 0:
sgn_den = sgn_den * compute_sign( sigma1, p1 - 1 )
# gcd is of degree > 0 ?
m = len(subres_l)
if subres_l[m - 1] == nan or subres_l[m - 1] == 0:
subres_l.pop(m - 1)
return subres_l
def modified_subresultants_amv(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the modified subresultant prs of p and q in Z[x] or Q[x],
from the subresultant prs of p and q.
The coefficients of the polynomials in the two sequences differ only
in sign and the factor LC(p)**( deg(p)- deg(q)) as stated in
Theorem 2 of the reference.
The coefficients of the polynomials in the output sequence are
modified subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester2, Sylvester's matrix of 1853.
If the modified subresultant prs is complete, and LC( p ) > 0, it coincides
with the (generalized) Sturm's sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: "On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials."
Serdica Journal of Computing, Serdica Journal of Computing, 9(2) (2015), 123-138.
"""
# compute the subresultant prs
lst = subresultants_amv(p,q,x) ## any other method would do
# defensive
if lst == [] or len(lst) == 2:
return lst
# the coefficients in lst are subresultants and, hence, smaller than those
# of the corresponding modified subresultants by the factor
# LC(lst[0])**( deg(lst[0]) - deg(lst[1])); see Theorem 2.
lcf = LC(lst[0])**( degree(lst[0], x) - degree(lst[1], x) )
# Initialize the modified subresultant prs list
subr_seq = [lst[0], lst[1]]
# compute the degree sequences m_i and j_i of Theorem 2
deg_seq = [degree(Poly(poly, x), x) for poly in lst]
deg = deg_seq[0]
deg_seq_s = deg_seq[1:-1]
m_seq = [m-1 for m in deg_seq_s]
j_seq = [deg - m for m in m_seq]
# compute the AMV factors of Theorem 2
fact = [(-1)**( j*(j-1)/S(2) ) for j in j_seq]
# shortened list without the first two polys
lst_s = lst[2:]
# poly lst_s[k] is multiplied times fact[k] and times lcf
# and appended to the subresultant prs list
m = len(fact)
for k in range(m):
if sign(fact[k]) == -1:
subr_seq.append( simplify(-lst_s[k] * lcf) )
else:
subr_seq.append( simplify(lst_s[k] * lcf) )
return subr_seq
def correct_sign(deg_f, deg_g, s1, rdel, cdel):
"""
Used in various subresultant prs algorithms.
Evaluates the determinant, (a.k.a. subresultant) of a properly selected
submatrix of s1, Sylvester's matrix of 1840, to get the correct sign
and value of the leading coefficient of a given polynomial remainder.
deg_f, deg_g are the degrees of the original polynomials p, q for which the
matrix s1 = sylvester(p, q, x, 1) was constructed.
rdel denotes the expected degree of the remainder; it is the number of
rows to be deleted from each group of rows in s1 as described in the
reference below.
cdel denotes the expected degree minus the actual degree of the remainder;
it is the number of columns to be deleted --- starting with the last column
forming the square matrix --- from the matrix resulting after the row deletions.
References
==========
Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
"""
M = s1[:, :] # copy of matrix s1
# eliminate rdel rows from the first deg_g rows
for i in range(M.rows - deg_f - 1, M.rows - deg_f - rdel - 1, -1):
M.row_del(i)
# eliminate rdel rows from the last deg_f rows
for i in range(M.rows - 1, M.rows - rdel - 1, -1):
M.row_del(i)
# eliminate cdel columns
for i in range(cdel):
M.col_del(M.rows - 1)
# define submatrix
Md = M[:, 0: M.rows]
return Md.det()
def subresultants_rem(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p and q in Z[x] or Q[x];
the coefficients of the polynomials in the sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
To compute the coefficients polynomial divisions in Q[x] are
performed, using the function rem(p, q, x). The coefficients
of the remainders computed this way become subresultants by evaluating
one subresultant per remainder --- that of the leading coefficient.
This way we obtain the correct sign and value of the leading coefficient
of the remainder and we easily ``force'' the rest of the coefficients
to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS's).'' Serdica Journal of Computing 9(1) (2015), 1-26.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
sr_list = [f, g] # subresultant list
# main loop
while deg_g > 0:
r = rem(p, q, x)
d = degree(r, x)
if d < 0:
return sr_list
# make coefficients subresultants evaluating ONE determinant
exp_deg = deg_g - 1 # expected degree
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
r = simplify((r / LC(r, x)) * sign_value)
# append poly with subresultant coeffs
sr_list.append(r)
# update degrees and polys
deg_f, deg_g = deg_g, d
p, q = q, r
# gcd is of degree > 0 ?
m = len(sr_list)
if sr_list[m - 1] == nan or sr_list[m - 1] == 0:
sr_list.pop(m - 1)
return sr_list
def pivot(M, i, j):
'''
M is a matrix, and M[i, j] specifies the pivot element.
All elements below M[i, j], in the j-th column, will
be zeroed, if they are not already 0, according to
Dodgson-Bareiss' integer preserving transformations.
References
==========
1. Akritas, A. G.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101-134, 2013.
'''
ma = M[:, :] # copy of matrix M
rs = ma.rows # No. of rows
cs = ma.cols # No. of cols
for r in range(i+1, rs):
if ma[r, j] != 0:
for c in range(j + 1, cs):
ma[r, c] = ma[i, j] * ma[r, c] - ma[i, c] * ma[r, j]
ma[r, j] = 0
return ma
def rotate_r(L, k):
'''
Rotates right by k. L is a row of a matrix or a list.
'''
ll = list(L)
if ll == []:
return []
for i in range(k):
el = ll.pop(len(ll) - 1)
ll.insert(0, el)
return ll if type(L) is list else Matrix([ll])
def rotate_l(L, k):
'''
Rotates left by k. L is a row of a matrix or a list.
'''
ll = list(L)
if ll == []:
return []
for i in range(k):
el = ll.pop(0)
ll.insert(len(ll) - 1, el)
return ll if type(L) is list else Matrix([ll])
def row2poly(row, deg, x):
'''
Converts the row of a matrix to a poly of degree deg and variable x.
Some entries at the beginning and/or at the end of the row may be zero.
'''
k = 0
poly = []
leng = len(row)
# find the beginning of the poly ; i.e. the first
# non-zero element of the row
while row[k] == 0:
k = k + 1
# append the next deg + 1 elements to poly
for j in range( deg + 1):
if k + j <= leng:
poly.append(row[k + j])
return Poly(poly, x)
def create_ma(deg_f, deg_g, row1, row2, col_num):
'''
Creates a ``small'' matrix M to be triangularized.
deg_f, deg_g are the degrees of the divident and of the
divisor polynomials respectively, deg_g > deg_f.
The coefficients of the divident poly are the elements
in row2 and those of the divisor poly are the elements
in row1.
col_num defines the number of columns of the matrix M.
'''
if deg_g - deg_f >= 1:
print('Reverse degrees')
return
m = zeros(deg_f - deg_g + 2, col_num)
for i in range(deg_f - deg_g + 1):
m[i, :] = rotate_r(row1, i)
m[deg_f - deg_g + 1, :] = row2
return m
def find_degree(M, deg_f):
'''
Finds the degree of the poly corresponding (after triangularization)
to the _last_ row of the ``small'' matrix M, created by create_ma().
deg_f is the degree of the divident poly.
If _last_ row is all 0's returns None.
'''
j = deg_f
for i in range(0, M.cols):
if M[M.rows - 1, i] == 0:
j = j - 1
else:
return j if j >= 0 else 0
def final_touches(s2, r, deg_g):
"""
s2 is sylvester2, r is the row pointer in s2,
deg_g is the degree of the poly last inserted in s2.
After a gcd of degree > 0 has been found with Van Vleck's
method, and was inserted into s2, if its last term is not
in the last column of s2, then it is inserted as many
times as needed, rotated right by one each time, until
the condition is met.
"""
R = s2.row(r-1)
# find the first non zero term
for i in range(s2.cols):
if R[0,i] == 0:
continue
else:
break
# missing rows until last term is in last column
mr = s2.cols - (i + deg_g + 1)
# insert them by replacing the existing entries in the row
i = 0
while mr != 0 and r + i < s2.rows :
s2[r + i, : ] = rotate_r(R, i + 1)
i += 1
mr -= 1
return s2
def subresultants_vv(p, q, x, method = 0):
"""
p, q are polynomials in Z[x] (intended) or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p, q by triangularizing,
in Z[x] or in Q[x], all the smaller matrices encountered in the
process of triangularizing sylvester2, Sylvester's matrix of 1853;
see references 1 and 2 for Van Vleck's method. With each remainder,
sylvester2 gets updated and is prepared to be printed if requested.
If sylvester2 has small dimensions and you want to see the final,
triangularized matrix use this version with method=1; otherwise,
use either this version with method=0 (default) or the faster version,
subresultants_vv_2(p, q, x), where sylvester2 is used implicitly.
Sylvester's matrix sylvester1 is also used to compute one
subresultant per remainder; namely, that of the leading
coefficient, in order to obtain the correct sign and to
force the remainder coefficients to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
If the final, triangularized matrix s2 is printed, then:
(a) if deg(p) - deg(q) > 1 or deg( gcd(p, q) ) > 0, several
of the last rows in s2 will remain unprocessed;
(b) if deg(p) - deg(q) == 0, p will not appear in the final matrix.
References
==========
1. Akritas, A. G.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101-134, 2013.
3. Akritas, A. G.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS's).'' Serdica Journal of Computing 9(1) (2015), 1-26.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
s2 = sylvester(f, g, x, 2)
sr_list = [f, g]
col_num = 2 * n # columns in s2
# make two rows (row0, row1) of poly coefficients
row0 = Poly(f, x, domain = QQ).all_coeffs()
leng0 = len(row0)
for i in range(col_num - leng0):
row0.append(0)
row0 = Matrix([row0])
row1 = Poly(g,x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
# row pointer for deg_f - deg_g == 1; may be reset below
r = 2
# modify first rows of s2 matrix depending on poly degrees
if deg_f - deg_g > 1:
r = 1
# replacing the existing entries in the rows of s2,
# insert row0 (deg_f - deg_g - 1) times, rotated each time
for i in range(deg_f - deg_g - 1):
s2[r + i, : ] = rotate_r(row0, i + 1)
r = r + deg_f - deg_g - 1
# insert row1 (deg_f - deg_g) times, rotated each time
for i in range(deg_f - deg_g):
s2[r + i, : ] = rotate_r(row1, r + i)
r = r + deg_f - deg_g
if deg_f - deg_g == 0:
r = 0
# main loop
while deg_g > 0:
# create a small matrix M, and triangularize it;
M = create_ma(deg_f, deg_g, row1, row0, col_num)
# will need only the first and last rows of M
for i in range(deg_f - deg_g + 1):
M1 = pivot(M, i, i)
M = M1[:, :]
# treat last row of M as poly; find its degree
d = find_degree(M, deg_f)
if d is None:
break
exp_deg = deg_g - 1
# evaluate one determinant & make coefficients subresultants
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
poly = row2poly(M[M.rows - 1, :], d, x)
temp2 = LC(poly, x)
poly = simplify((poly / temp2) * sign_value)
# update s2 by inserting first row of M as needed
row0 = M[0, :]
for i in range(deg_g - d):
s2[r + i, :] = rotate_r(row0, r + i)
r = r + deg_g - d
# update s2 by inserting last row of M as needed
row1 = rotate_l(M[M.rows - 1, :], deg_f - d)
row1 = (row1 / temp2) * sign_value
for i in range(deg_g - d):
s2[r + i, :] = rotate_r(row1, r + i)
r = r + deg_g - d
# update degrees
deg_f, deg_g = deg_g, d
# append poly with subresultant coeffs
sr_list.append(poly)
# final touches to print the s2 matrix
if method != 0 and s2.rows > 2:
s2 = final_touches(s2, r, deg_g)
pprint(s2)
elif method != 0 and s2.rows == 2:
s2[1, :] = rotate_r(s2.row(1), 1)
pprint(s2)
return sr_list
def subresultants_vv_2(p, q, x):
"""
p, q are polynomials in Z[x] (intended) or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p, q by triangularizing,
in Z[x] or in Q[x], all the smaller matrices encountered in the
process of triangularizing sylvester2, Sylvester's matrix of 1853;
see references 1 and 2 for Van Vleck's method.
If the sylvester2 matrix has big dimensions use this version,
where sylvester2 is used implicitly. If you want to see the final,
triangularized matrix sylvester2, then use the first version,
subresultants_vv(p, q, x, 1).
sylvester1, Sylvester's matrix of 1840, is also used to compute
one subresultant per remainder; namely, that of the leading
coefficient, in order to obtain the correct sign and to
``force'' the remainder coefficients to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101-134, 2013.
3. Akritas, A. G.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS's).'' Serdica Journal of Computing 9(1) (2015), 1-26.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
sr_list = [f, g] # subresultant list
col_num = 2 * n # columns in sylvester2
# make two rows (row0, row1) of poly coefficients
row0 = Poly(f, x, domain = QQ).all_coeffs()
leng0 = len(row0)
for i in range(col_num - leng0):
row0.append(0)
row0 = Matrix([row0])
row1 = Poly(g,x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
# main loop
while deg_g > 0:
# create a small matrix M, and triangularize it
M = create_ma(deg_f, deg_g, row1, row0, col_num)
for i in range(deg_f - deg_g + 1):
M1 = pivot(M, i, i)
M = M1[:, :]
# treat last row of M as poly; find its degree
d = find_degree(M, deg_f)
if d is None:
return sr_list
exp_deg = deg_g - 1
# evaluate one determinant & make coefficients subresultants
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
poly = row2poly(M[M.rows - 1, :], d, x)
poly = simplify((poly / LC(poly, x)) * sign_value)
# append poly with subresultant coeffs
sr_list.append(poly)
# update degrees and rows
deg_f, deg_g = deg_g, d
row0 = row1
row1 = Poly(poly, x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
return sr_list
|
8529e951df967725efc1a46e317e49cec68624b0197850d56c5305ed6cb091c4 | """Sparse polynomial rings. """
from __future__ import print_function, division
from typing import Any, Dict
from operator import add, mul, lt, le, gt, ge
from types import GeneratorType
from sympy.core.compatibility import is_sequence, reduce
from sympy.core.expr import Expr
from sympy.core.numbers import igcd, oo
from sympy.core.symbol import Symbol, symbols as _symbols
from sympy.core.sympify import CantSympify, sympify
from sympy.ntheory.multinomial import multinomial_coefficients
from sympy.polys.compatibility import IPolys
from sympy.polys.constructor import construct_domain
from sympy.polys.densebasic import dmp_to_dict, dmp_from_dict
from sympy.polys.domains.domainelement import DomainElement
from sympy.polys.domains.polynomialring import PolynomialRing
from sympy.polys.heuristicgcd import heugcd
from sympy.polys.monomials import MonomialOps
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import (
CoercionFailed, GeneratorsError,
ExactQuotientFailed, MultivariatePolynomialError)
from sympy.polys.polyoptions import (Domain as DomainOpt,
Order as OrderOpt, build_options)
from sympy.polys.polyutils import (expr_from_dict, _dict_reorder,
_parallel_dict_from_expr)
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.magic import pollute
@public
def ring(symbols, domain, order=lex):
"""Construct a polynomial ring returning ``(ring, x_1, ..., x_n)``.
Parameters
==========
symbols : str
Symbol/Expr or sequence of str, Symbol/Expr (non-empty)
domain : :class:`~.Domain` or coercible
order : :class:`~.MonomialOrder` or coercible, optional, defaults to ``lex``
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex
>>> R, x, y, z = ring("x,y,z", ZZ, lex)
>>> R
Polynomial ring in x, y, z over ZZ with lex order
>>> x + y + z
x + y + z
>>> type(_)
<class 'sympy.polys.rings.PolyElement'>
"""
_ring = PolyRing(symbols, domain, order)
return (_ring,) + _ring.gens
@public
def xring(symbols, domain, order=lex):
"""Construct a polynomial ring returning ``(ring, (x_1, ..., x_n))``.
Parameters
==========
symbols : str
Symbol/Expr or sequence of str, Symbol/Expr (non-empty)
domain : :class:`~.Domain` or coercible
order : :class:`~.MonomialOrder` or coercible, optional, defaults to ``lex``
Examples
========
>>> from sympy.polys.rings import xring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex
>>> R, (x, y, z) = xring("x,y,z", ZZ, lex)
>>> R
Polynomial ring in x, y, z over ZZ with lex order
>>> x + y + z
x + y + z
>>> type(_)
<class 'sympy.polys.rings.PolyElement'>
"""
_ring = PolyRing(symbols, domain, order)
return (_ring, _ring.gens)
@public
def vring(symbols, domain, order=lex):
"""Construct a polynomial ring and inject ``x_1, ..., x_n`` into the global namespace.
Parameters
==========
symbols : str
Symbol/Expr or sequence of str, Symbol/Expr (non-empty)
domain : :class:`~.Domain` or coercible
order : :class:`~.MonomialOrder` or coercible, optional, defaults to ``lex``
Examples
========
>>> from sympy.polys.rings import vring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex
>>> vring("x,y,z", ZZ, lex)
Polynomial ring in x, y, z over ZZ with lex order
>>> x + y + z
x + y + z
>>> type(_)
<class 'sympy.polys.rings.PolyElement'>
"""
_ring = PolyRing(symbols, domain, order)
pollute([ sym.name for sym in _ring.symbols ], _ring.gens)
return _ring
@public
def sring(exprs, *symbols, **options):
"""Construct a ring deriving generators and domain from options and input expressions.
Parameters
==========
exprs : :class:`~.Expr` or sequence of :class:`~.Expr` (sympifiable)
symbols : sequence of :class:`~.Symbol`/:class:`~.Expr`
options : keyword arguments understood by :class:`~.Options`
Examples
========
>>> from sympy.core import symbols
>>> from sympy.polys.rings import sring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex
>>> x, y, z = symbols("x,y,z")
>>> R, f = sring(x + 2*y + 3*z)
>>> R
Polynomial ring in x, y, z over ZZ with lex order
>>> f
x + 2*y + 3*z
>>> type(_)
<class 'sympy.polys.rings.PolyElement'>
"""
single = False
if not is_sequence(exprs):
exprs, single = [exprs], True
exprs = list(map(sympify, exprs))
opt = build_options(symbols, options)
# TODO: rewrite this so that it doesn't use expand() (see poly()).
reps, opt = _parallel_dict_from_expr(exprs, opt)
if opt.domain is None:
# NOTE: this is inefficient because construct_domain() automatically
# performs conversion to the target domain. It shouldn't do this.
coeffs = sum([ list(rep.values()) for rep in reps ], [])
opt.domain, _ = construct_domain(coeffs, opt=opt)
_ring = PolyRing(opt.gens, opt.domain, opt.order)
polys = list(map(_ring.from_dict, reps))
if single:
return (_ring, polys[0])
else:
return (_ring, polys)
def _parse_symbols(symbols):
if isinstance(symbols, str):
return _symbols(symbols, seq=True) if symbols else ()
elif isinstance(symbols, Expr):
return (symbols,)
elif is_sequence(symbols):
if all(isinstance(s, str) for s in symbols):
return _symbols(symbols)
elif all(isinstance(s, Expr) for s in symbols):
return symbols
raise GeneratorsError("expected a string, Symbol or expression or a non-empty sequence of strings, Symbols or expressions")
_ring_cache = {} # type: Dict[Any, Any]
class PolyRing(DefaultPrinting, IPolys):
"""Multivariate distributed polynomial ring. """
def __new__(cls, symbols, domain, order=lex):
symbols = tuple(_parse_symbols(symbols))
ngens = len(symbols)
domain = DomainOpt.preprocess(domain)
order = OrderOpt.preprocess(order)
_hash_tuple = (cls.__name__, symbols, ngens, domain, order)
obj = _ring_cache.get(_hash_tuple)
if obj is None:
if domain.is_Composite and set(symbols) & set(domain.symbols):
raise GeneratorsError("polynomial ring and it's ground domain share generators")
obj = object.__new__(cls)
obj._hash_tuple = _hash_tuple
obj._hash = hash(_hash_tuple)
obj.dtype = type("PolyElement", (PolyElement,), {"ring": obj})
obj.symbols = symbols
obj.ngens = ngens
obj.domain = domain
obj.order = order
obj.zero_monom = (0,)*ngens
obj.gens = obj._gens()
obj._gens_set = set(obj.gens)
obj._one = [(obj.zero_monom, domain.one)]
if ngens:
# These expect monomials in at least one variable
codegen = MonomialOps(ngens)
obj.monomial_mul = codegen.mul()
obj.monomial_pow = codegen.pow()
obj.monomial_mulpow = codegen.mulpow()
obj.monomial_ldiv = codegen.ldiv()
obj.monomial_div = codegen.div()
obj.monomial_lcm = codegen.lcm()
obj.monomial_gcd = codegen.gcd()
else:
monunit = lambda a, b: ()
obj.monomial_mul = monunit
obj.monomial_pow = monunit
obj.monomial_mulpow = lambda a, b, c: ()
obj.monomial_ldiv = monunit
obj.monomial_div = monunit
obj.monomial_lcm = monunit
obj.monomial_gcd = monunit
if order is lex:
obj.leading_expv = lambda f: max(f)
else:
obj.leading_expv = lambda f: max(f, key=order)
for symbol, generator in zip(obj.symbols, obj.gens):
if isinstance(symbol, Symbol):
name = symbol.name
if not hasattr(obj, name):
setattr(obj, name, generator)
_ring_cache[_hash_tuple] = obj
return obj
def _gens(self):
"""Return a list of polynomial generators. """
one = self.domain.one
_gens = []
for i in range(self.ngens):
expv = self.monomial_basis(i)
poly = self.zero
poly[expv] = one
_gens.append(poly)
return tuple(_gens)
def __getnewargs__(self):
return (self.symbols, self.domain, self.order)
def __getstate__(self):
state = self.__dict__.copy()
del state["leading_expv"]
for key, value in state.items():
if key.startswith("monomial_"):
del state[key]
return state
def __hash__(self):
return self._hash
def __eq__(self, other):
return isinstance(other, PolyRing) and \
(self.symbols, self.domain, self.ngens, self.order) == \
(other.symbols, other.domain, other.ngens, other.order)
def __ne__(self, other):
return not self == other
def clone(self, symbols=None, domain=None, order=None):
return self.__class__(symbols or self.symbols, domain or self.domain, order or self.order)
def monomial_basis(self, i):
"""Return the ith-basis element. """
basis = [0]*self.ngens
basis[i] = 1
return tuple(basis)
@property
def zero(self):
return self.dtype()
@property
def one(self):
return self.dtype(self._one)
def domain_new(self, element, orig_domain=None):
return self.domain.convert(element, orig_domain)
def ground_new(self, coeff):
return self.term_new(self.zero_monom, coeff)
def term_new(self, monom, coeff):
coeff = self.domain_new(coeff)
poly = self.zero
if coeff:
poly[monom] = coeff
return poly
def ring_new(self, element):
if isinstance(element, PolyElement):
if self == element.ring:
return element
elif isinstance(self.domain, PolynomialRing) and self.domain.ring == element.ring:
return self.ground_new(element)
else:
raise NotImplementedError("conversion")
elif isinstance(element, str):
raise NotImplementedError("parsing")
elif isinstance(element, dict):
return self.from_dict(element)
elif isinstance(element, list):
try:
return self.from_terms(element)
except ValueError:
return self.from_list(element)
elif isinstance(element, Expr):
return self.from_expr(element)
else:
return self.ground_new(element)
__call__ = ring_new
def from_dict(self, element):
domain_new = self.domain_new
poly = self.zero
for monom, coeff in element.items():
coeff = domain_new(coeff)
if coeff:
poly[monom] = coeff
return poly
def from_terms(self, element):
return self.from_dict(dict(element))
def from_list(self, element):
return self.from_dict(dmp_to_dict(element, self.ngens-1, self.domain))
def _rebuild_expr(self, expr, mapping):
domain = self.domain
def _rebuild(expr):
generator = mapping.get(expr)
if generator is not None:
return generator
elif expr.is_Add:
return reduce(add, list(map(_rebuild, expr.args)))
elif expr.is_Mul:
return reduce(mul, list(map(_rebuild, expr.args)))
elif expr.is_Pow and expr.exp.is_Integer and expr.exp >= 0:
return _rebuild(expr.base)**int(expr.exp)
else:
return domain.convert(expr)
return _rebuild(sympify(expr))
def from_expr(self, expr):
mapping = dict(list(zip(self.symbols, self.gens)))
try:
poly = self._rebuild_expr(expr, mapping)
except CoercionFailed:
raise ValueError("expected an expression convertible to a polynomial in %s, got %s" % (self, expr))
else:
return self.ring_new(poly)
def index(self, gen):
"""Compute index of ``gen`` in ``self.gens``. """
if gen is None:
if self.ngens:
i = 0
else:
i = -1 # indicate impossible choice
elif isinstance(gen, int):
i = gen
if 0 <= i and i < self.ngens:
pass
elif -self.ngens <= i and i <= -1:
i = -i - 1
else:
raise ValueError("invalid generator index: %s" % gen)
elif isinstance(gen, self.dtype):
try:
i = self.gens.index(gen)
except ValueError:
raise ValueError("invalid generator: %s" % gen)
elif isinstance(gen, str):
try:
i = self.symbols.index(gen)
except ValueError:
raise ValueError("invalid generator: %s" % gen)
else:
raise ValueError("expected a polynomial generator, an integer, a string or None, got %s" % gen)
return i
def drop(self, *gens):
"""Remove specified generators from this ring. """
indices = set(map(self.index, gens))
symbols = [ s for i, s in enumerate(self.symbols) if i not in indices ]
if not symbols:
return self.domain
else:
return self.clone(symbols=symbols)
def __getitem__(self, key):
symbols = self.symbols[key]
if not symbols:
return self.domain
else:
return self.clone(symbols=symbols)
def to_ground(self):
# TODO: should AlgebraicField be a Composite domain?
if self.domain.is_Composite or hasattr(self.domain, 'domain'):
return self.clone(domain=self.domain.domain)
else:
raise ValueError("%s is not a composite domain" % self.domain)
def to_domain(self):
return PolynomialRing(self)
def to_field(self):
from sympy.polys.fields import FracField
return FracField(self.symbols, self.domain, self.order)
@property
def is_univariate(self):
return len(self.gens) == 1
@property
def is_multivariate(self):
return len(self.gens) > 1
def add(self, *objs):
"""
Add a sequence of polynomials or containers of polynomials.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> R, x = ring("x", ZZ)
>>> R.add([ x**2 + 2*i + 3 for i in range(4) ])
4*x**2 + 24
>>> _.factor_list()
(4, [(x**2 + 6, 1)])
"""
p = self.zero
for obj in objs:
if is_sequence(obj, include=GeneratorType):
p += self.add(*obj)
else:
p += obj
return p
def mul(self, *objs):
"""
Multiply a sequence of polynomials or containers of polynomials.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> R, x = ring("x", ZZ)
>>> R.mul([ x**2 + 2*i + 3 for i in range(4) ])
x**8 + 24*x**6 + 206*x**4 + 744*x**2 + 945
>>> _.factor_list()
(1, [(x**2 + 3, 1), (x**2 + 5, 1), (x**2 + 7, 1), (x**2 + 9, 1)])
"""
p = self.one
for obj in objs:
if is_sequence(obj, include=GeneratorType):
p *= self.mul(*obj)
else:
p *= obj
return p
def drop_to_ground(self, *gens):
r"""
Remove specified generators from the ring and inject them into
its domain.
"""
indices = set(map(self.index, gens))
symbols = [s for i, s in enumerate(self.symbols) if i not in indices]
gens = [gen for i, gen in enumerate(self.gens) if i not in indices]
if not symbols:
return self
else:
return self.clone(symbols=symbols, domain=self.drop(*gens))
def compose(self, other):
"""Add the generators of ``other`` to ``self``"""
if self != other:
syms = set(self.symbols).union(set(other.symbols))
return self.clone(symbols=list(syms))
else:
return self
def add_gens(self, symbols):
"""Add the elements of ``symbols`` as generators to ``self``"""
syms = set(self.symbols).union(set(symbols))
return self.clone(symbols=list(syms))
class PolyElement(DomainElement, DefaultPrinting, CantSympify, dict):
"""Element of multivariate distributed polynomial ring. """
def new(self, init):
return self.__class__(init)
def parent(self):
return self.ring.to_domain()
def __getnewargs__(self):
return (self.ring, list(self.iterterms()))
_hash = None
def __hash__(self):
# XXX: This computes a hash of a dictionary, but currently we don't
# protect dictionary from being changed so any use site modifications
# will make hashing go wrong. Use this feature with caution until we
# figure out how to make a safe API without compromising speed of this
# low-level class.
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.ring, frozenset(self.items())))
return _hash
def copy(self):
"""Return a copy of polynomial self.
Polynomials are mutable; if one is interested in preserving
a polynomial, and one plans to use inplace operations, one
can copy the polynomial. This method makes a shallow copy.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> R, x, y = ring('x, y', ZZ)
>>> p = (x + y)**2
>>> p1 = p.copy()
>>> p2 = p
>>> p[R.zero_monom] = 3
>>> p
x**2 + 2*x*y + y**2 + 3
>>> p1
x**2 + 2*x*y + y**2
>>> p2
x**2 + 2*x*y + y**2 + 3
"""
return self.new(self)
def set_ring(self, new_ring):
if self.ring == new_ring:
return self
elif self.ring.symbols != new_ring.symbols:
terms = list(zip(*_dict_reorder(self, self.ring.symbols, new_ring.symbols)))
return new_ring.from_terms(terms)
else:
return new_ring.from_dict(self)
def as_expr(self, *symbols):
if symbols and len(symbols) != self.ring.ngens:
raise ValueError("not enough symbols, expected %s got %s" % (self.ring.ngens, len(symbols)))
else:
symbols = self.ring.symbols
return expr_from_dict(self.as_expr_dict(), *symbols)
def as_expr_dict(self):
to_sympy = self.ring.domain.to_sympy
return {monom: to_sympy(coeff) for monom, coeff in self.iterterms()}
def clear_denoms(self):
domain = self.ring.domain
if not domain.is_Field or not domain.has_assoc_Ring:
return domain.one, self
ground_ring = domain.get_ring()
common = ground_ring.one
lcm = ground_ring.lcm
denom = domain.denom
for coeff in self.values():
common = lcm(common, denom(coeff))
poly = self.new([ (k, v*common) for k, v in self.items() ])
return common, poly
def strip_zero(self):
"""Eliminate monomials with zero coefficient. """
for k, v in list(self.items()):
if not v:
del self[k]
def __eq__(p1, p2):
"""Equality test for polynomials.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> p1 = (x + y)**2 + (x - y)**2
>>> p1 == 4*x*y
False
>>> p1 == 2*(x**2 + y**2)
True
"""
if not p2:
return not p1
elif isinstance(p2, PolyElement) and p2.ring == p1.ring:
return dict.__eq__(p1, p2)
elif len(p1) > 1:
return False
else:
return p1.get(p1.ring.zero_monom) == p2
def __ne__(p1, p2):
return not p1 == p2
def almosteq(p1, p2, tolerance=None):
"""Approximate equality test for polynomials. """
ring = p1.ring
if isinstance(p2, ring.dtype):
if set(p1.keys()) != set(p2.keys()):
return False
almosteq = ring.domain.almosteq
for k in p1.keys():
if not almosteq(p1[k], p2[k], tolerance):
return False
return True
elif len(p1) > 1:
return False
else:
try:
p2 = ring.domain.convert(p2)
except CoercionFailed:
return False
else:
return ring.domain.almosteq(p1.const(), p2, tolerance)
def sort_key(self):
return (len(self), self.terms())
def _cmp(p1, p2, op):
if isinstance(p2, p1.ring.dtype):
return op(p1.sort_key(), p2.sort_key())
else:
return NotImplemented
def __lt__(p1, p2):
return p1._cmp(p2, lt)
def __le__(p1, p2):
return p1._cmp(p2, le)
def __gt__(p1, p2):
return p1._cmp(p2, gt)
def __ge__(p1, p2):
return p1._cmp(p2, ge)
def _drop(self, gen):
ring = self.ring
i = ring.index(gen)
if ring.ngens == 1:
return i, ring.domain
else:
symbols = list(ring.symbols)
del symbols[i]
return i, ring.clone(symbols=symbols)
def drop(self, gen):
i, ring = self._drop(gen)
if self.ring.ngens == 1:
if self.is_ground:
return self.coeff(1)
else:
raise ValueError("can't drop %s" % gen)
else:
poly = ring.zero
for k, v in self.items():
if k[i] == 0:
K = list(k)
del K[i]
poly[tuple(K)] = v
else:
raise ValueError("can't drop %s" % gen)
return poly
def _drop_to_ground(self, gen):
ring = self.ring
i = ring.index(gen)
symbols = list(ring.symbols)
del symbols[i]
return i, ring.clone(symbols=symbols, domain=ring[i])
def drop_to_ground(self, gen):
if self.ring.ngens == 1:
raise ValueError("can't drop only generator to ground")
i, ring = self._drop_to_ground(gen)
poly = ring.zero
gen = ring.domain.gens[0]
for monom, coeff in self.iterterms():
mon = monom[:i] + monom[i+1:]
if not mon in poly:
poly[mon] = (gen**monom[i]).mul_ground(coeff)
else:
poly[mon] += (gen**monom[i]).mul_ground(coeff)
return poly
def to_dense(self):
return dmp_from_dict(self, self.ring.ngens-1, self.ring.domain)
def to_dict(self):
return dict(self)
def str(self, printer, precedence, exp_pattern, mul_symbol):
if not self:
return printer._print(self.ring.domain.zero)
prec_mul = precedence["Mul"]
prec_atom = precedence["Atom"]
ring = self.ring
symbols = ring.symbols
ngens = ring.ngens
zm = ring.zero_monom
sexpvs = []
for expv, coeff in self.terms():
positive = ring.domain.is_positive(coeff)
sign = " + " if positive else " - "
sexpvs.append(sign)
if expv == zm:
scoeff = printer._print(coeff)
if scoeff.startswith("-"):
scoeff = scoeff[1:]
else:
if not positive:
coeff = -coeff
if coeff != 1:
scoeff = printer.parenthesize(coeff, prec_mul, strict=True)
else:
scoeff = ''
sexpv = []
for i in range(ngens):
exp = expv[i]
if not exp:
continue
symbol = printer.parenthesize(symbols[i], prec_atom, strict=True)
if exp != 1:
if exp != int(exp) or exp < 0:
sexp = printer.parenthesize(exp, prec_atom, strict=False)
else:
sexp = exp
sexpv.append(exp_pattern % (symbol, sexp))
else:
sexpv.append('%s' % symbol)
if scoeff:
sexpv = [scoeff] + sexpv
sexpvs.append(mul_symbol.join(sexpv))
if sexpvs[0] in [" + ", " - "]:
head = sexpvs.pop(0)
if head == " - ":
sexpvs.insert(0, "-")
return "".join(sexpvs)
@property
def is_generator(self):
return self in self.ring._gens_set
@property
def is_ground(self):
return not self or (len(self) == 1 and self.ring.zero_monom in self)
@property
def is_monomial(self):
return not self or (len(self) == 1 and self.LC == 1)
@property
def is_term(self):
return len(self) <= 1
@property
def is_negative(self):
return self.ring.domain.is_negative(self.LC)
@property
def is_positive(self):
return self.ring.domain.is_positive(self.LC)
@property
def is_nonnegative(self):
return self.ring.domain.is_nonnegative(self.LC)
@property
def is_nonpositive(self):
return self.ring.domain.is_nonpositive(self.LC)
@property
def is_zero(f):
return not f
@property
def is_one(f):
return f == f.ring.one
@property
def is_monic(f):
return f.ring.domain.is_one(f.LC)
@property
def is_primitive(f):
return f.ring.domain.is_one(f.content())
@property
def is_linear(f):
return all(sum(monom) <= 1 for monom in f.itermonoms())
@property
def is_quadratic(f):
return all(sum(monom) <= 2 for monom in f.itermonoms())
@property
def is_squarefree(f):
if not f.ring.ngens:
return True
return f.ring.dmp_sqf_p(f)
@property
def is_irreducible(f):
if not f.ring.ngens:
return True
return f.ring.dmp_irreducible_p(f)
@property
def is_cyclotomic(f):
if f.ring.is_univariate:
return f.ring.dup_cyclotomic_p(f)
else:
raise MultivariatePolynomialError("cyclotomic polynomial")
def __neg__(self):
return self.new([ (monom, -coeff) for monom, coeff in self.iterterms() ])
def __pos__(self):
return self
def __add__(p1, p2):
"""Add two polynomials.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> (x + y)**2 + (x - y)**2
2*x**2 + 2*y**2
"""
if not p2:
return p1.copy()
ring = p1.ring
if isinstance(p2, ring.dtype):
p = p1.copy()
get = p.get
zero = ring.domain.zero
for k, v in p2.items():
v = get(k, zero) + v
if v:
p[k] = v
else:
del p[k]
return p
elif isinstance(p2, PolyElement):
if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring:
pass
elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring:
return p2.__radd__(p1)
else:
return NotImplemented
try:
cp2 = ring.domain_new(p2)
except CoercionFailed:
return NotImplemented
else:
p = p1.copy()
if not cp2:
return p
zm = ring.zero_monom
if zm not in p1.keys():
p[zm] = cp2
else:
if p2 == -p[zm]:
del p[zm]
else:
p[zm] += cp2
return p
def __radd__(p1, n):
p = p1.copy()
if not n:
return p
ring = p1.ring
try:
n = ring.domain_new(n)
except CoercionFailed:
return NotImplemented
else:
zm = ring.zero_monom
if zm not in p1.keys():
p[zm] = n
else:
if n == -p[zm]:
del p[zm]
else:
p[zm] += n
return p
def __sub__(p1, p2):
"""Subtract polynomial p2 from p1.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> p1 = x + y**2
>>> p2 = x*y + y**2
>>> p1 - p2
-x*y + x
"""
if not p2:
return p1.copy()
ring = p1.ring
if isinstance(p2, ring.dtype):
p = p1.copy()
get = p.get
zero = ring.domain.zero
for k, v in p2.items():
v = get(k, zero) - v
if v:
p[k] = v
else:
del p[k]
return p
elif isinstance(p2, PolyElement):
if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring:
pass
elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring:
return p2.__rsub__(p1)
else:
return NotImplemented
try:
p2 = ring.domain_new(p2)
except CoercionFailed:
return NotImplemented
else:
p = p1.copy()
zm = ring.zero_monom
if zm not in p1.keys():
p[zm] = -p2
else:
if p2 == p[zm]:
del p[zm]
else:
p[zm] -= p2
return p
def __rsub__(p1, n):
"""n - p1 with n convertible to the coefficient domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> p = x + y
>>> 4 - p
-x - y + 4
"""
ring = p1.ring
try:
n = ring.domain_new(n)
except CoercionFailed:
return NotImplemented
else:
p = ring.zero
for expv in p1:
p[expv] = -p1[expv]
p += n
return p
def __mul__(p1, p2):
"""Multiply two polynomials.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', QQ)
>>> p1 = x + y
>>> p2 = x - y
>>> p1*p2
x**2 - y**2
"""
ring = p1.ring
p = ring.zero
if not p1 or not p2:
return p
elif isinstance(p2, ring.dtype):
get = p.get
zero = ring.domain.zero
monomial_mul = ring.monomial_mul
p2it = list(p2.items())
for exp1, v1 in p1.items():
for exp2, v2 in p2it:
exp = monomial_mul(exp1, exp2)
p[exp] = get(exp, zero) + v1*v2
p.strip_zero()
return p
elif isinstance(p2, PolyElement):
if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring:
pass
elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring:
return p2.__rmul__(p1)
else:
return NotImplemented
try:
p2 = ring.domain_new(p2)
except CoercionFailed:
return NotImplemented
else:
for exp1, v1 in p1.items():
v = v1*p2
if v:
p[exp1] = v
return p
def __rmul__(p1, p2):
"""p2 * p1 with p2 in the coefficient domain of p1.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> p = x + y
>>> 4 * p
4*x + 4*y
"""
p = p1.ring.zero
if not p2:
return p
try:
p2 = p.ring.domain_new(p2)
except CoercionFailed:
return NotImplemented
else:
for exp1, v1 in p1.items():
v = p2*v1
if v:
p[exp1] = v
return p
def __pow__(self, n):
"""raise polynomial to power `n`
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> p = x + y**2
>>> p**3
x**3 + 3*x**2*y**2 + 3*x*y**4 + y**6
"""
ring = self.ring
if not n:
if self:
return ring.one
else:
raise ValueError("0**0")
elif len(self) == 1:
monom, coeff = list(self.items())[0]
p = ring.zero
if coeff == 1:
p[ring.monomial_pow(monom, n)] = coeff
else:
p[ring.monomial_pow(monom, n)] = coeff**n
return p
# For ring series, we need negative and rational exponent support only
# with monomials.
n = int(n)
if n < 0:
raise ValueError("Negative exponent")
elif n == 1:
return self.copy()
elif n == 2:
return self.square()
elif n == 3:
return self*self.square()
elif len(self) <= 5: # TODO: use an actual density measure
return self._pow_multinomial(n)
else:
return self._pow_generic(n)
def _pow_generic(self, n):
p = self.ring.one
c = self
while True:
if n & 1:
p = p*c
n -= 1
if not n:
break
c = c.square()
n = n // 2
return p
def _pow_multinomial(self, n):
multinomials = list(multinomial_coefficients(len(self), n).items())
monomial_mulpow = self.ring.monomial_mulpow
zero_monom = self.ring.zero_monom
terms = list(self.iterterms())
zero = self.ring.domain.zero
poly = self.ring.zero
for multinomial, multinomial_coeff in multinomials:
product_monom = zero_monom
product_coeff = multinomial_coeff
for exp, (monom, coeff) in zip(multinomial, terms):
if exp:
product_monom = monomial_mulpow(product_monom, monom, exp)
product_coeff *= coeff**exp
monom = tuple(product_monom)
coeff = product_coeff
coeff = poly.get(monom, zero) + coeff
if coeff:
poly[monom] = coeff
else:
del poly[monom]
return poly
def square(self):
"""square of a polynomial
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> p = x + y**2
>>> p.square()
x**2 + 2*x*y**2 + y**4
"""
ring = self.ring
p = ring.zero
get = p.get
keys = list(self.keys())
zero = ring.domain.zero
monomial_mul = ring.monomial_mul
for i in range(len(keys)):
k1 = keys[i]
pk = self[k1]
for j in range(i):
k2 = keys[j]
exp = monomial_mul(k1, k2)
p[exp] = get(exp, zero) + pk*self[k2]
p = p.imul_num(2)
get = p.get
for k, v in self.items():
k2 = monomial_mul(k, k)
p[k2] = get(k2, zero) + v**2
p.strip_zero()
return p
def __divmod__(p1, p2):
ring = p1.ring
if not p2:
raise ZeroDivisionError("polynomial division")
elif isinstance(p2, ring.dtype):
return p1.div(p2)
elif isinstance(p2, PolyElement):
if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring:
pass
elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring:
return p2.__rdivmod__(p1)
else:
return NotImplemented
try:
p2 = ring.domain_new(p2)
except CoercionFailed:
return NotImplemented
else:
return (p1.quo_ground(p2), p1.rem_ground(p2))
def __rdivmod__(p1, p2):
return NotImplemented
def __mod__(p1, p2):
ring = p1.ring
if not p2:
raise ZeroDivisionError("polynomial division")
elif isinstance(p2, ring.dtype):
return p1.rem(p2)
elif isinstance(p2, PolyElement):
if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring:
pass
elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring:
return p2.__rmod__(p1)
else:
return NotImplemented
try:
p2 = ring.domain_new(p2)
except CoercionFailed:
return NotImplemented
else:
return p1.rem_ground(p2)
def __rmod__(p1, p2):
return NotImplemented
def __truediv__(p1, p2):
ring = p1.ring
if not p2:
raise ZeroDivisionError("polynomial division")
elif isinstance(p2, ring.dtype):
if p2.is_monomial:
return p1*(p2**(-1))
else:
return p1.quo(p2)
elif isinstance(p2, PolyElement):
if isinstance(ring.domain, PolynomialRing) and ring.domain.ring == p2.ring:
pass
elif isinstance(p2.ring.domain, PolynomialRing) and p2.ring.domain.ring == ring:
return p2.__rtruediv__(p1)
else:
return NotImplemented
try:
p2 = ring.domain_new(p2)
except CoercionFailed:
return NotImplemented
else:
return p1.quo_ground(p2)
def __rtruediv__(p1, p2):
return NotImplemented
__floordiv__ = __div__ = __truediv__
__rfloordiv__ = __rdiv__ = __rtruediv__
# TODO: use // (__floordiv__) for exquo()?
def _term_div(self):
zm = self.ring.zero_monom
domain = self.ring.domain
domain_quo = domain.quo
monomial_div = self.ring.monomial_div
if domain.is_Field:
def term_div(a_lm_a_lc, b_lm_b_lc):
a_lm, a_lc = a_lm_a_lc
b_lm, b_lc = b_lm_b_lc
if b_lm == zm: # apparently this is a very common case
monom = a_lm
else:
monom = monomial_div(a_lm, b_lm)
if monom is not None:
return monom, domain_quo(a_lc, b_lc)
else:
return None
else:
def term_div(a_lm_a_lc, b_lm_b_lc):
a_lm, a_lc = a_lm_a_lc
b_lm, b_lc = b_lm_b_lc
if b_lm == zm: # apparently this is a very common case
monom = a_lm
else:
monom = monomial_div(a_lm, b_lm)
if not (monom is None or a_lc % b_lc):
return monom, domain_quo(a_lc, b_lc)
else:
return None
return term_div
def div(self, fv):
"""Division algorithm, see [CLO] p64.
fv array of polynomials
return qv, r such that
self = sum(fv[i]*qv[i]) + r
All polynomials are required not to be Laurent polynomials.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> f = x**3
>>> f0 = x - y**2
>>> f1 = x - y
>>> qv, r = f.div((f0, f1))
>>> qv[0]
x**2 + x*y**2 + y**4
>>> qv[1]
0
>>> r
y**6
"""
ring = self.ring
ret_single = False
if isinstance(fv, PolyElement):
ret_single = True
fv = [fv]
if any(not f for f in fv):
raise ZeroDivisionError("polynomial division")
if not self:
if ret_single:
return ring.zero, ring.zero
else:
return [], ring.zero
for f in fv:
if f.ring != ring:
raise ValueError('self and f must have the same ring')
s = len(fv)
qv = [ring.zero for i in range(s)]
p = self.copy()
r = ring.zero
term_div = self._term_div()
expvs = [fx.leading_expv() for fx in fv]
while p:
i = 0
divoccurred = 0
while i < s and divoccurred == 0:
expv = p.leading_expv()
term = term_div((expv, p[expv]), (expvs[i], fv[i][expvs[i]]))
if term is not None:
expv1, c = term
qv[i] = qv[i]._iadd_monom((expv1, c))
p = p._iadd_poly_monom(fv[i], (expv1, -c))
divoccurred = 1
else:
i += 1
if not divoccurred:
expv = p.leading_expv()
r = r._iadd_monom((expv, p[expv]))
del p[expv]
if expv == ring.zero_monom:
r += p
if ret_single:
if not qv:
return ring.zero, r
else:
return qv[0], r
else:
return qv, r
def rem(self, G):
f = self
if isinstance(G, PolyElement):
G = [G]
if any(not g for g in G):
raise ZeroDivisionError("polynomial division")
ring = f.ring
domain = ring.domain
zero = domain.zero
monomial_mul = ring.monomial_mul
r = ring.zero
term_div = f._term_div()
ltf = f.LT
f = f.copy()
get = f.get
while f:
for g in G:
tq = term_div(ltf, g.LT)
if tq is not None:
m, c = tq
for mg, cg in g.iterterms():
m1 = monomial_mul(mg, m)
c1 = get(m1, zero) - c*cg
if not c1:
del f[m1]
else:
f[m1] = c1
ltm = f.leading_expv()
if ltm is not None:
ltf = ltm, f[ltm]
break
else:
ltm, ltc = ltf
if ltm in r:
r[ltm] += ltc
else:
r[ltm] = ltc
del f[ltm]
ltm = f.leading_expv()
if ltm is not None:
ltf = ltm, f[ltm]
return r
def quo(f, G):
return f.div(G)[0]
def exquo(f, G):
q, r = f.div(G)
if not r:
return q
else:
raise ExactQuotientFailed(f, G)
def _iadd_monom(self, mc):
"""add to self the monomial coeff*x0**i0*x1**i1*...
unless self is a generator -- then just return the sum of the two.
mc is a tuple, (monom, coeff), where monomial is (i0, i1, ...)
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> p = x**4 + 2*y
>>> m = (1, 2)
>>> p1 = p._iadd_monom((m, 5))
>>> p1
x**4 + 5*x*y**2 + 2*y
>>> p1 is p
True
>>> p = x
>>> p1 = p._iadd_monom((m, 5))
>>> p1
5*x*y**2 + x
>>> p1 is p
False
"""
if self in self.ring._gens_set:
cpself = self.copy()
else:
cpself = self
expv, coeff = mc
c = cpself.get(expv)
if c is None:
cpself[expv] = coeff
else:
c += coeff
if c:
cpself[expv] = c
else:
del cpself[expv]
return cpself
def _iadd_poly_monom(self, p2, mc):
"""add to self the product of (p)*(coeff*x0**i0*x1**i1*...)
unless self is a generator -- then just return the sum of the two.
mc is a tuple, (monom, coeff), where monomial is (i0, i1, ...)
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = ring('x, y, z', ZZ)
>>> p1 = x**4 + 2*y
>>> p2 = y + z
>>> m = (1, 2, 3)
>>> p1 = p1._iadd_poly_monom(p2, (m, 3))
>>> p1
x**4 + 3*x*y**3*z**3 + 3*x*y**2*z**4 + 2*y
"""
p1 = self
if p1 in p1.ring._gens_set:
p1 = p1.copy()
(m, c) = mc
get = p1.get
zero = p1.ring.domain.zero
monomial_mul = p1.ring.monomial_mul
for k, v in p2.items():
ka = monomial_mul(k, m)
coeff = get(ka, zero) + v*c
if coeff:
p1[ka] = coeff
else:
del p1[ka]
return p1
def degree(f, x=None):
"""
The leading degree in ``x`` or the main variable.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
"""
i = f.ring.index(x)
if not f:
return -oo
elif i < 0:
return 0
else:
return max([ monom[i] for monom in f.itermonoms() ])
def degrees(f):
"""
A tuple containing leading degrees in all variables.
Note that the degree of 0 is negative infinity (the SymPy object -oo)
"""
if not f:
return (-oo,)*f.ring.ngens
else:
return tuple(map(max, list(zip(*f.itermonoms()))))
def tail_degree(f, x=None):
"""
The tail degree in ``x`` or the main variable.
Note that the degree of 0 is negative infinity (the SymPy object -oo)
"""
i = f.ring.index(x)
if not f:
return -oo
elif i < 0:
return 0
else:
return min([ monom[i] for monom in f.itermonoms() ])
def tail_degrees(f):
"""
A tuple containing tail degrees in all variables.
Note that the degree of 0 is negative infinity (the SymPy object -oo)
"""
if not f:
return (-oo,)*f.ring.ngens
else:
return tuple(map(min, list(zip(*f.itermonoms()))))
def leading_expv(self):
"""Leading monomial tuple according to the monomial ordering.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = ring('x, y, z', ZZ)
>>> p = x**4 + x**3*y + x**2*z**2 + z**7
>>> p.leading_expv()
(4, 0, 0)
"""
if self:
return self.ring.leading_expv(self)
else:
return None
def _get_coeff(self, expv):
return self.get(expv, self.ring.domain.zero)
def coeff(self, element):
"""
Returns the coefficient that stands next to the given monomial.
Parameters
==========
element : PolyElement (with ``is_monomial = True``) or 1
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = ring("x,y,z", ZZ)
>>> f = 3*x**2*y - x*y*z + 7*z**3 + 23
>>> f.coeff(x**2*y)
3
>>> f.coeff(x*y)
0
>>> f.coeff(1)
23
"""
if element == 1:
return self._get_coeff(self.ring.zero_monom)
elif isinstance(element, self.ring.dtype):
terms = list(element.iterterms())
if len(terms) == 1:
monom, coeff = terms[0]
if coeff == self.ring.domain.one:
return self._get_coeff(monom)
raise ValueError("expected a monomial, got %s" % element)
def const(self):
"""Returns the constant coeffcient. """
return self._get_coeff(self.ring.zero_monom)
@property
def LC(self):
return self._get_coeff(self.leading_expv())
@property
def LM(self):
expv = self.leading_expv()
if expv is None:
return self.ring.zero_monom
else:
return expv
def leading_monom(self):
"""
Leading monomial as a polynomial element.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> (3*x*y + y**2).leading_monom()
x*y
"""
p = self.ring.zero
expv = self.leading_expv()
if expv:
p[expv] = self.ring.domain.one
return p
@property
def LT(self):
expv = self.leading_expv()
if expv is None:
return (self.ring.zero_monom, self.ring.domain.zero)
else:
return (expv, self._get_coeff(expv))
def leading_term(self):
"""Leading term as a polynomial element.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> (3*x*y + y**2).leading_term()
3*x*y
"""
p = self.ring.zero
expv = self.leading_expv()
if expv is not None:
p[expv] = self[expv]
return p
def _sorted(self, seq, order):
if order is None:
order = self.ring.order
else:
order = OrderOpt.preprocess(order)
if order is lex:
return sorted(seq, key=lambda monom: monom[0], reverse=True)
else:
return sorted(seq, key=lambda monom: order(monom[0]), reverse=True)
def coeffs(self, order=None):
"""Ordered list of polynomial coefficients.
Parameters
==========
order : :class:`~.MonomialOrder` or coercible, optional
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex, grlex
>>> _, x, y = ring("x, y", ZZ, lex)
>>> f = x*y**7 + 2*x**2*y**3
>>> f.coeffs()
[2, 1]
>>> f.coeffs(grlex)
[1, 2]
"""
return [ coeff for _, coeff in self.terms(order) ]
def monoms(self, order=None):
"""Ordered list of polynomial monomials.
Parameters
==========
order : :class:`~.MonomialOrder` or coercible, optional
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex, grlex
>>> _, x, y = ring("x, y", ZZ, lex)
>>> f = x*y**7 + 2*x**2*y**3
>>> f.monoms()
[(2, 3), (1, 7)]
>>> f.monoms(grlex)
[(1, 7), (2, 3)]
"""
return [ monom for monom, _ in self.terms(order) ]
def terms(self, order=None):
"""Ordered list of polynomial terms.
Parameters
==========
order : :class:`~.MonomialOrder` or coercible, optional
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex, grlex
>>> _, x, y = ring("x, y", ZZ, lex)
>>> f = x*y**7 + 2*x**2*y**3
>>> f.terms()
[((2, 3), 2), ((1, 7), 1)]
>>> f.terms(grlex)
[((1, 7), 1), ((2, 3), 2)]
"""
return self._sorted(list(self.items()), order)
def itercoeffs(self):
"""Iterator over coefficients of a polynomial. """
return iter(self.values())
def itermonoms(self):
"""Iterator over monomials of a polynomial. """
return iter(self.keys())
def iterterms(self):
"""Iterator over terms of a polynomial. """
return iter(self.items())
def listcoeffs(self):
"""Unordered list of polynomial coefficients. """
return list(self.values())
def listmonoms(self):
"""Unordered list of polynomial monomials. """
return list(self.keys())
def listterms(self):
"""Unordered list of polynomial terms. """
return list(self.items())
def imul_num(p, c):
"""multiply inplace the polynomial p by an element in the
coefficient ring, provided p is not one of the generators;
else multiply not inplace
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> p = x + y**2
>>> p1 = p.imul_num(3)
>>> p1
3*x + 3*y**2
>>> p1 is p
True
>>> p = x
>>> p1 = p.imul_num(3)
>>> p1
3*x
>>> p1 is p
False
"""
if p in p.ring._gens_set:
return p*c
if not c:
p.clear()
return
for exp in p:
p[exp] *= c
return p
def content(f):
"""Returns GCD of polynomial's coefficients. """
domain = f.ring.domain
cont = domain.zero
gcd = domain.gcd
for coeff in f.itercoeffs():
cont = gcd(cont, coeff)
return cont
def primitive(f):
"""Returns content and a primitive polynomial. """
cont = f.content()
return cont, f.quo_ground(cont)
def monic(f):
"""Divides all coefficients by the leading coefficient. """
if not f:
return f
else:
return f.quo_ground(f.LC)
def mul_ground(f, x):
if not x:
return f.ring.zero
terms = [ (monom, coeff*x) for monom, coeff in f.iterterms() ]
return f.new(terms)
def mul_monom(f, monom):
monomial_mul = f.ring.monomial_mul
terms = [ (monomial_mul(f_monom, monom), f_coeff) for f_monom, f_coeff in f.items() ]
return f.new(terms)
def mul_term(f, term):
monom, coeff = term
if not f or not coeff:
return f.ring.zero
elif monom == f.ring.zero_monom:
return f.mul_ground(coeff)
monomial_mul = f.ring.monomial_mul
terms = [ (monomial_mul(f_monom, monom), f_coeff*coeff) for f_monom, f_coeff in f.items() ]
return f.new(terms)
def quo_ground(f, x):
domain = f.ring.domain
if not x:
raise ZeroDivisionError('polynomial division')
if not f or x == domain.one:
return f
if domain.is_Field:
quo = domain.quo
terms = [ (monom, quo(coeff, x)) for monom, coeff in f.iterterms() ]
else:
terms = [ (monom, coeff // x) for monom, coeff in f.iterterms() if not (coeff % x) ]
return f.new(terms)
def quo_term(f, term):
monom, coeff = term
if not coeff:
raise ZeroDivisionError("polynomial division")
elif not f:
return f.ring.zero
elif monom == f.ring.zero_monom:
return f.quo_ground(coeff)
term_div = f._term_div()
terms = [ term_div(t, term) for t in f.iterterms() ]
return f.new([ t for t in terms if t is not None ])
def trunc_ground(f, p):
if f.ring.domain.is_ZZ:
terms = []
for monom, coeff in f.iterterms():
coeff = coeff % p
if coeff > p // 2:
coeff = coeff - p
terms.append((monom, coeff))
else:
terms = [ (monom, coeff % p) for monom, coeff in f.iterterms() ]
poly = f.new(terms)
poly.strip_zero()
return poly
rem_ground = trunc_ground
def extract_ground(self, g):
f = self
fc = f.content()
gc = g.content()
gcd = f.ring.domain.gcd(fc, gc)
f = f.quo_ground(gcd)
g = g.quo_ground(gcd)
return gcd, f, g
def _norm(f, norm_func):
if not f:
return f.ring.domain.zero
else:
ground_abs = f.ring.domain.abs
return norm_func([ ground_abs(coeff) for coeff in f.itercoeffs() ])
def max_norm(f):
return f._norm(max)
def l1_norm(f):
return f._norm(sum)
def deflate(f, *G):
ring = f.ring
polys = [f] + list(G)
J = [0]*ring.ngens
for p in polys:
for monom in p.itermonoms():
for i, m in enumerate(monom):
J[i] = igcd(J[i], m)
for i, b in enumerate(J):
if not b:
J[i] = 1
J = tuple(J)
if all(b == 1 for b in J):
return J, polys
H = []
for p in polys:
h = ring.zero
for I, coeff in p.iterterms():
N = [ i // j for i, j in zip(I, J) ]
h[tuple(N)] = coeff
H.append(h)
return J, H
def inflate(f, J):
poly = f.ring.zero
for I, coeff in f.iterterms():
N = [ i*j for i, j in zip(I, J) ]
poly[tuple(N)] = coeff
return poly
def lcm(self, g):
f = self
domain = f.ring.domain
if not domain.is_Field:
fc, f = f.primitive()
gc, g = g.primitive()
c = domain.lcm(fc, gc)
h = (f*g).quo(f.gcd(g))
if not domain.is_Field:
return h.mul_ground(c)
else:
return h.monic()
def gcd(f, g):
return f.cofactors(g)[0]
def cofactors(f, g):
if not f and not g:
zero = f.ring.zero
return zero, zero, zero
elif not f:
h, cff, cfg = f._gcd_zero(g)
return h, cff, cfg
elif not g:
h, cfg, cff = g._gcd_zero(f)
return h, cff, cfg
elif len(f) == 1:
h, cff, cfg = f._gcd_monom(g)
return h, cff, cfg
elif len(g) == 1:
h, cfg, cff = g._gcd_monom(f)
return h, cff, cfg
J, (f, g) = f.deflate(g)
h, cff, cfg = f._gcd(g)
return (h.inflate(J), cff.inflate(J), cfg.inflate(J))
def _gcd_zero(f, g):
one, zero = f.ring.one, f.ring.zero
if g.is_nonnegative:
return g, zero, one
else:
return -g, zero, -one
def _gcd_monom(f, g):
ring = f.ring
ground_gcd = ring.domain.gcd
ground_quo = ring.domain.quo
monomial_gcd = ring.monomial_gcd
monomial_ldiv = ring.monomial_ldiv
mf, cf = list(f.iterterms())[0]
_mgcd, _cgcd = mf, cf
for mg, cg in g.iterterms():
_mgcd = monomial_gcd(_mgcd, mg)
_cgcd = ground_gcd(_cgcd, cg)
h = f.new([(_mgcd, _cgcd)])
cff = f.new([(monomial_ldiv(mf, _mgcd), ground_quo(cf, _cgcd))])
cfg = f.new([(monomial_ldiv(mg, _mgcd), ground_quo(cg, _cgcd)) for mg, cg in g.iterterms()])
return h, cff, cfg
def _gcd(f, g):
ring = f.ring
if ring.domain.is_QQ:
return f._gcd_QQ(g)
elif ring.domain.is_ZZ:
return f._gcd_ZZ(g)
else: # TODO: don't use dense representation (port PRS algorithms)
return ring.dmp_inner_gcd(f, g)
def _gcd_ZZ(f, g):
return heugcd(f, g)
def _gcd_QQ(self, g):
f = self
ring = f.ring
new_ring = ring.clone(domain=ring.domain.get_ring())
cf, f = f.clear_denoms()
cg, g = g.clear_denoms()
f = f.set_ring(new_ring)
g = g.set_ring(new_ring)
h, cff, cfg = f._gcd_ZZ(g)
h = h.set_ring(ring)
c, h = h.LC, h.monic()
cff = cff.set_ring(ring).mul_ground(ring.domain.quo(c, cf))
cfg = cfg.set_ring(ring).mul_ground(ring.domain.quo(c, cg))
return h, cff, cfg
def cancel(self, g):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> (2*x**2 - 2).cancel(x**2 - 2*x + 1)
(2*x + 2, x - 1)
"""
f = self
ring = f.ring
if not f:
return f, ring.one
domain = ring.domain
if not (domain.is_Field and domain.has_assoc_Ring):
_, p, q = f.cofactors(g)
if q.is_negative:
p, q = -p, -q
else:
new_ring = ring.clone(domain=domain.get_ring())
cq, f = f.clear_denoms()
cp, g = g.clear_denoms()
f = f.set_ring(new_ring)
g = g.set_ring(new_ring)
_, p, q = f.cofactors(g)
_, cp, cq = new_ring.domain.cofactors(cp, cq)
p = p.set_ring(ring)
q = q.set_ring(ring)
p_neg = p.is_negative
q_neg = q.is_negative
if p_neg and q_neg:
p, q = -p, -q
elif p_neg:
cp, p = -cp, -p
elif q_neg:
cp, q = -cp, -q
p = p.mul_ground(cp)
q = q.mul_ground(cq)
return p, q
def diff(f, x):
"""Computes partial derivative in ``x``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring("x,y", ZZ)
>>> p = x + x**2*y**3
>>> p.diff(x)
2*x*y**3 + 1
"""
ring = f.ring
i = ring.index(x)
m = ring.monomial_basis(i)
g = ring.zero
for expv, coeff in f.iterterms():
if expv[i]:
e = ring.monomial_ldiv(expv, m)
g[e] = ring.domain_new(coeff*expv[i])
return g
def __call__(f, *values):
if 0 < len(values) <= f.ring.ngens:
return f.evaluate(list(zip(f.ring.gens, values)))
else:
raise ValueError("expected at least 1 and at most %s values, got %s" % (f.ring.ngens, len(values)))
def evaluate(self, x, a=None):
f = self
if isinstance(x, list) and a is None:
(X, a), x = x[0], x[1:]
f = f.evaluate(X, a)
if not x:
return f
else:
x = [ (Y.drop(X), a) for (Y, a) in x ]
return f.evaluate(x)
ring = f.ring
i = ring.index(x)
a = ring.domain.convert(a)
if ring.ngens == 1:
result = ring.domain.zero
for (n,), coeff in f.iterterms():
result += coeff*a**n
return result
else:
poly = ring.drop(x).zero
for monom, coeff in f.iterterms():
n, monom = monom[i], monom[:i] + monom[i+1:]
coeff = coeff*a**n
if monom in poly:
coeff = coeff + poly[monom]
if coeff:
poly[monom] = coeff
else:
del poly[monom]
else:
if coeff:
poly[monom] = coeff
return poly
def subs(self, x, a=None):
f = self
if isinstance(x, list) and a is None:
for X, a in x:
f = f.subs(X, a)
return f
ring = f.ring
i = ring.index(x)
a = ring.domain.convert(a)
if ring.ngens == 1:
result = ring.domain.zero
for (n,), coeff in f.iterterms():
result += coeff*a**n
return ring.ground_new(result)
else:
poly = ring.zero
for monom, coeff in f.iterterms():
n, monom = monom[i], monom[:i] + (0,) + monom[i+1:]
coeff = coeff*a**n
if monom in poly:
coeff = coeff + poly[monom]
if coeff:
poly[monom] = coeff
else:
del poly[monom]
else:
if coeff:
poly[monom] = coeff
return poly
def compose(f, x, a=None):
ring = f.ring
poly = ring.zero
gens_map = dict(list(zip(ring.gens, list(range(ring.ngens)))))
if a is not None:
replacements = [(x, a)]
else:
if isinstance(x, list):
replacements = list(x)
elif isinstance(x, dict):
replacements = sorted(list(x.items()), key=lambda k: gens_map[k[0]])
else:
raise ValueError("expected a generator, value pair a sequence of such pairs")
for k, (x, g) in enumerate(replacements):
replacements[k] = (gens_map[x], ring.ring_new(g))
for monom, coeff in f.iterterms():
monom = list(monom)
subpoly = ring.one
for i, g in replacements:
n, monom[i] = monom[i], 0
if n:
subpoly *= g**n
subpoly = subpoly.mul_term((tuple(monom), coeff))
poly += subpoly
return poly
# TODO: following methods should point to polynomial
# representation independent algorithm implementations.
def pdiv(f, g):
return f.ring.dmp_pdiv(f, g)
def prem(f, g):
return f.ring.dmp_prem(f, g)
def pquo(f, g):
return f.ring.dmp_quo(f, g)
def pexquo(f, g):
return f.ring.dmp_exquo(f, g)
def half_gcdex(f, g):
return f.ring.dmp_half_gcdex(f, g)
def gcdex(f, g):
return f.ring.dmp_gcdex(f, g)
def subresultants(f, g):
return f.ring.dmp_subresultants(f, g)
def resultant(f, g):
return f.ring.dmp_resultant(f, g)
def discriminant(f):
return f.ring.dmp_discriminant(f)
def decompose(f):
if f.ring.is_univariate:
return f.ring.dup_decompose(f)
else:
raise MultivariatePolynomialError("polynomial decomposition")
def shift(f, a):
if f.ring.is_univariate:
return f.ring.dup_shift(f, a)
else:
raise MultivariatePolynomialError("polynomial shift")
def sturm(f):
if f.ring.is_univariate:
return f.ring.dup_sturm(f)
else:
raise MultivariatePolynomialError("sturm sequence")
def gff_list(f):
return f.ring.dmp_gff_list(f)
def sqf_norm(f):
return f.ring.dmp_sqf_norm(f)
def sqf_part(f):
return f.ring.dmp_sqf_part(f)
def sqf_list(f, all=False):
return f.ring.dmp_sqf_list(f, all=all)
def factor_list(f):
return f.ring.dmp_factor_list(f)
|
e6cd48fe20fe2ae35a2b8435537eeeffbc7aeeef72e67e3e7d0f1d803dba477f | """Options manager for :class:`~.Poly` and public API functions. """
from __future__ import print_function, division
__all__ = ["Options"]
from typing import Dict, List, Optional, Type
from sympy.core import S, Basic, sympify
from sympy.polys.polyerrors import GeneratorsError, OptionError, FlagError
from sympy.utilities import numbered_symbols, topological_sort, public
from sympy.utilities.iterables import has_dups
import sympy.polys
import re
class Option(object):
"""Base class for all kinds of options. """
option = None # type: Optional[str]
is_Flag = False
requires = [] # type: List[str]
excludes = [] # type: List[str]
after = [] # type: List[str]
before = [] # type: List[str]
@classmethod
def default(cls):
return None
@classmethod
def preprocess(cls, option):
return None
@classmethod
def postprocess(cls, options):
pass
class Flag(Option):
"""Base class for all kinds of flags. """
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned. """
@classmethod
def preprocess(cls, value):
if value in [True, False]:
return bool(value)
else:
raise OptionError("'%s' must have a boolean value assigned, got %s" % (cls.option, value))
class OptionType(type):
"""Base type for all options that does registers options. """
def __init__(cls, *args, **kwargs):
@property
def getter(self):
try:
return self[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
@public
class Options(dict):
"""
Options manager for polynomial manipulation module.
Examples
========
>>> from sympy.polys.polyoptions import Options
>>> from sympy.polys.polyoptions import build_options
>>> from sympy.abc import x, y, z
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
* Series --- boolean flag
"""
__order__ = None
__options__ = {} # type: Dict[str, Type[Option]]
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError(
"both '*gens' and keyword argument 'gens' supplied")
elif gens:
args = dict(args)
args['gens'] = gens
defaults = args.pop('defaults', {})
def preprocess_options(args):
for option, value in args.items():
try:
cls = self.__options__[option]
except KeyError:
raise OptionError("'%s' is not a valid option" % option)
if issubclass(cls, Flag):
if flags is None or option not in flags:
if strict:
raise OptionError("'%s' flag is not allowed in this context" % option)
if value is not None:
self[option] = cls.preprocess(value)
preprocess_options(args)
for key, value in dict(defaults).items():
if key in self:
del defaults[key]
else:
for option in self.keys():
cls = self.__options__[option]
if key in cls.excludes:
del defaults[key]
break
preprocess_options(defaults)
for option in self.keys():
cls = self.__options__[option]
for require_option in cls.requires:
if self.get(require_option) is None:
raise OptionError("'%s' option is only allowed together with '%s'" % (option, require_option))
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError("'%s' option is not allowed together with '%s'" % (option, exclude_option))
for option in self.__order__:
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing. """
if cls.__order__ is None:
vertices, edges = [], set([])
for name, option in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError:
raise RuntimeError(
"cycle detected in sympy.polys options framework")
def clone(self, updates={}):
"""Clone ``self`` and update specified options. """
obj = dict.__new__(self.__class__)
for option, value in self.items():
obj[option] = value
for option, value in updates.items():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super(Options, self).__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.items():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.items():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.items():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(BooleanOption, metaclass=OptionType):
"""``expand`` option to polynomial manipulation functions. """
option = 'expand'
requires = [] # type: List[str]
excludes = [] # type: List[str]
@classmethod
def default(cls):
return True
class Gens(Option, metaclass=OptionType):
"""``gens`` option to polynomial manipulation functions. """
option = 'gens'
requires = [] # type: List[str]
excludes = [] # type: List[str]
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, gens):
if isinstance(gens, Basic):
gens = (gens,)
elif len(gens) == 1 and hasattr(gens[0], '__iter__'):
gens = gens[0]
if gens == (None,):
gens = ()
elif has_dups(gens):
raise GeneratorsError("duplicated generators: %s" % str(gens))
elif any(gen.is_commutative is False for gen in gens):
raise GeneratorsError("non-commutative generators: %s" % str(gens))
return tuple(gens)
class Wrt(Option, metaclass=OptionType):
"""``wrt`` option to polynomial manipulation functions. """
option = 'wrt'
requires = [] # type: List[str]
excludes = [] # type: List[str]
_re_split = re.compile(r"\s*,\s*|\s+")
@classmethod
def preprocess(cls, wrt):
if isinstance(wrt, Basic):
return [str(wrt)]
elif isinstance(wrt, str):
wrt = wrt.strip()
if wrt.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not wrt:
return []
return [ gen for gen in cls._re_split.split(wrt) ]
elif hasattr(wrt, '__getitem__'):
return list(map(str, wrt))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(Option, metaclass=OptionType):
"""``sort`` option to polynomial manipulation functions. """
option = 'sort'
requires = [] # type: List[str]
excludes = [] # type: List[str]
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, sort):
if isinstance(sort, str):
return [ gen.strip() for gen in sort.split('>') ]
elif hasattr(sort, '__getitem__'):
return list(map(str, sort))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(Option, metaclass=OptionType):
"""``order`` option to polynomial manipulation functions. """
option = 'order'
requires = [] # type: List[str]
excludes = [] # type: List[str]
@classmethod
def default(cls):
return sympy.polys.orderings.lex
@classmethod
def preprocess(cls, order):
return sympy.polys.orderings.monomial_key(order)
class Field(BooleanOption, metaclass=OptionType):
"""``field`` option to polynomial manipulation functions. """
option = 'field'
requires = [] # type: List[str]
excludes = ['domain', 'split', 'gaussian']
class Greedy(BooleanOption, metaclass=OptionType):
"""``greedy`` option to polynomial manipulation functions. """
option = 'greedy'
requires = [] # type: List[str]
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Composite(BooleanOption, metaclass=OptionType):
"""``composite`` option to polynomial manipulation functions. """
option = 'composite'
@classmethod
def default(cls):
return None
requires = [] # type: List[str]
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Domain(Option, metaclass=OptionType):
"""``domain`` option to polynomial manipulation functions. """
option = 'domain'
requires = [] # type: List[str]
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
after = ['gens']
_re_realfield = re.compile(r"^(R|RR)(_(\d+))?$")
_re_complexfield = re.compile(r"^(C|CC)(_(\d+))?$")
_re_finitefield = re.compile(r"^(FF|GF)\((\d+)\)$")
_re_polynomial = re.compile(r"^(Z|ZZ|Q|QQ|R|RR|C|CC)\[(.+)\]$")
_re_fraction = re.compile(r"^(Z|ZZ|Q|QQ)\((.+)\)$")
_re_algebraic = re.compile(r"^(Q|QQ)\<(.+)\>$")
@classmethod
def preprocess(cls, domain):
if isinstance(domain, sympy.polys.domains.Domain):
return domain
elif hasattr(domain, 'to_domain'):
return domain.to_domain()
elif isinstance(domain, str):
if domain in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ
if domain in ['Q', 'QQ']:
return sympy.polys.domains.QQ
if domain == 'EX':
return sympy.polys.domains.EX
r = cls._re_realfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return sympy.polys.domains.RR
else:
return sympy.polys.domains.RealField(int(prec))
r = cls._re_complexfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return sympy.polys.domains.CC
else:
return sympy.polys.domains.ComplexField(int(prec))
r = cls._re_finitefield.match(domain)
if r is not None:
return sympy.polys.domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.poly_ring(*gens)
elif ground in ['Q', 'QQ']:
return sympy.polys.domains.QQ.poly_ring(*gens)
elif ground in ['R', 'RR']:
return sympy.polys.domains.RR.poly_ring(*gens)
else:
return sympy.polys.domains.CC.poly_ring(*gens)
r = cls._re_fraction.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.frac_field(*gens)
else:
return sympy.polys.domains.QQ.frac_field(*gens)
r = cls._re_algebraic.match(domain)
if r is not None:
gens = list(map(sympify, r.groups()[1].split(',')))
return sympy.polys.domains.QQ.algebraic_field(*gens)
raise OptionError('expected a valid domain specification, got %s' % domain)
@classmethod
def postprocess(cls, options):
if 'gens' in options and 'domain' in options and options['domain'].is_Composite and \
(set(options['domain'].symbols) & set(options['gens'])):
raise GeneratorsError(
"ground domain and generators interfere together")
elif ('gens' not in options or not options['gens']) and \
'domain' in options and options['domain'] == sympy.polys.domains.EX:
raise GeneratorsError("you have to provide generators because EX domain was requested")
class Split(BooleanOption, metaclass=OptionType):
"""``split`` option to polynomial manipulation functions. """
option = 'split'
requires = [] # type: List[str]
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension',
'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(BooleanOption, metaclass=OptionType):
"""``gaussian`` option to polynomial manipulation functions. """
option = 'gaussian'
requires = [] # type: List[str]
excludes = ['field', 'greedy', 'domain', 'split', 'extension',
'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = set([S.ImaginaryUnit])
Extension.postprocess(options)
class Extension(Option, metaclass=OptionType):
"""``extension`` option to polynomial manipulation functions. """
option = 'extension'
requires = [] # type: List[str]
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus',
'symmetric']
@classmethod
def preprocess(cls, extension):
if extension == 1:
return bool(extension)
elif extension == 0:
raise OptionError("'False' is an invalid argument for 'extension'")
else:
if not hasattr(extension, '__iter__'):
extension = set([extension])
else:
if not extension:
extension = None
else:
extension = set(extension)
return extension
@classmethod
def postprocess(cls, options):
if 'extension' in options and options['extension'] is not True:
options['domain'] = sympy.polys.domains.QQ.algebraic_field(
*options['extension'])
class Modulus(Option, metaclass=OptionType):
"""``modulus`` option to polynomial manipulation functions. """
option = 'modulus'
requires = [] # type: List[str]
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, modulus):
modulus = sympify(modulus)
if modulus.is_Integer and modulus > 0:
return int(modulus)
else:
raise OptionError(
"'modulus' must a positive integer, got %s" % modulus)
@classmethod
def postprocess(cls, options):
if 'modulus' in options:
modulus = options['modulus']
symmetric = options.get('symmetric', True)
options['domain'] = sympy.polys.domains.FF(modulus, symmetric)
class Symmetric(BooleanOption, metaclass=OptionType):
"""``symmetric`` option to polynomial manipulation functions. """
option = 'symmetric'
requires = ['modulus']
excludes = ['greedy', 'domain', 'split', 'gaussian', 'extension']
class Strict(BooleanOption, metaclass=OptionType):
"""``strict`` option to polynomial manipulation functions. """
option = 'strict'
@classmethod
def default(cls):
return True
class Auto(BooleanOption, Flag, metaclass=OptionType):
"""``auto`` flag to polynomial manipulation functions. """
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(BooleanOption, Flag, metaclass=OptionType):
"""``auto`` option to polynomial manipulation functions. """
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(BooleanOption, Flag, metaclass=OptionType):
"""``formal`` flag to polynomial manipulation functions. """
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(BooleanOption, Flag, metaclass=OptionType):
"""``polys`` flag to polynomial manipulation functions. """
option = 'polys'
class Include(BooleanOption, Flag, metaclass=OptionType):
"""``include`` flag to polynomial manipulation functions. """
option = 'include'
@classmethod
def default(cls):
return False
class All(BooleanOption, Flag, metaclass=OptionType):
"""``all`` flag to polynomial manipulation functions. """
option = 'all'
@classmethod
def default(cls):
return False
class Gen(Flag, metaclass=OptionType):
"""``gen`` flag to polynomial manipulation functions. """
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, gen):
if isinstance(gen, (Basic, int)):
return gen
else:
raise OptionError("invalid argument for 'gen' option")
class Series(BooleanOption, Flag, metaclass=OptionType):
"""``series`` flag to polynomial manipulation functions. """
option = 'series'
@classmethod
def default(cls):
return False
class Symbols(Flag, metaclass=OptionType):
"""``symbols`` flag to polynomial manipulation functions. """
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, symbols):
if hasattr(symbols, '__iter__'):
return iter(symbols)
else:
raise OptionError("expected an iterator or iterable container, got %s" % symbols)
class Method(Flag, metaclass=OptionType):
"""``method`` flag to polynomial manipulation functions. """
option = 'method'
@classmethod
def preprocess(cls, method):
if isinstance(method, str):
return method.lower()
else:
raise OptionError("expected a string, got %s" % method)
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options. """
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> from sympy.polys.polyoptions import allowed_flags
>>> from sympy.polys.domains import ZZ
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args.keys():
try:
if Options.__options__[arg].is_Flag and not arg in flags:
raise FlagError(
"'%s' flag is not allowed in this context" % arg)
except KeyError:
raise OptionError("'%s' is not a valid option" % arg)
def set_defaults(options, **defaults):
"""Update options with default values. """
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order()
|
4fd42e3b11f4906e77b74a5c8b27aeb575dff8b80c75489ddcd90f477f31b34c | """Groebner bases algorithms. """
from __future__ import print_function, division
from sympy.core.symbol import Dummy
from sympy.polys.monomials import monomial_mul, monomial_lcm, monomial_divides, term_div
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import DomainError
from sympy.polys.polyconfig import query
def groebner(seq, ring, method=None):
"""
Computes Groebner basis for a set of polynomials in `K[X]`.
Wrapper around the (default) improved Buchberger and the other algorithms
for computing Groebner bases. The choice of algorithm can be changed via
``method`` argument or :func:`sympy.polys.polyconfig.setup`, where
``method`` can be either ``buchberger`` or ``f5b``.
"""
if method is None:
method = query('groebner')
_groebner_methods = {
'buchberger': _buchberger,
'f5b': _f5b,
}
try:
_groebner = _groebner_methods[method]
except KeyError:
raise ValueError("'%s' is not a valid Groebner bases algorithm (valid are 'buchberger' and 'f5b')" % method)
domain, orig = ring.domain, None
if not domain.is_Field or not domain.has_assoc_Field:
try:
orig, ring = ring, ring.clone(domain=domain.get_field())
except DomainError:
raise DomainError("can't compute a Groebner basis over %s" % domain)
else:
seq = [ s.set_ring(ring) for s in seq ]
G = _groebner(seq, ring)
if orig is not None:
G = [ g.clear_denoms()[1].set_ring(orig) for g in G ]
return G
def _buchberger(f, ring):
"""
Computes Groebner basis for a set of polynomials in `K[X]`.
Given a set of multivariate polynomials `F`, finds another
set `G`, such that Ideal `F = Ideal G` and `G` is a reduced
Groebner basis.
The resulting basis is unique and has monic generators if the
ground domains is a field. Otherwise the result is non-unique
but Groebner bases over e.g. integers can be computed (if the
input polynomials are monic).
Groebner bases can be used to choose specific generators for a
polynomial ideal. Because these bases are unique you can check
for ideal equality by comparing the Groebner bases. To see if
one polynomial lies in an ideal, divide by the elements in the
base and see if the remainder vanishes.
They can also be used to solve systems of polynomial equations
as, by choosing lexicographic ordering, you can eliminate one
variable at a time, provided that the ideal is zero-dimensional
(finite number of solutions).
Notes
=====
Algorithm used: an improved version of Buchberger's algorithm
as presented in T. Becker, V. Weispfenning, Groebner Bases: A
Computational Approach to Commutative Algebra, Springer, 1993,
page 232.
References
==========
.. [1] [Bose03]_
.. [2] [Giovini91]_
.. [3] [Ajwa95]_
.. [4] [Cox97]_
"""
order = ring.order
monomial_mul = ring.monomial_mul
monomial_div = ring.monomial_div
monomial_lcm = ring.monomial_lcm
def select(P):
# normal selection strategy
# select the pair with minimum LCM(LM(f), LM(g))
pr = min(P, key=lambda pair: order(monomial_lcm(f[pair[0]].LM, f[pair[1]].LM)))
return pr
def normal(g, J):
h = g.rem([ f[j] for j in J ])
if not h:
return None
else:
h = h.monic()
if not h in I:
I[h] = len(f)
f.append(h)
return h.LM, I[h]
def update(G, B, ih):
# update G using the set of critical pairs B and h
# [BW] page 230
h = f[ih]
mh = h.LM
# filter new pairs (h, g), g in G
C = G.copy()
D = set()
while C:
# select a pair (h, g) by popping an element from C
ig = C.pop()
g = f[ig]
mg = g.LM
LCMhg = monomial_lcm(mh, mg)
def lcm_divides(ip):
# LCM(LM(h), LM(p)) divides LCM(LM(h), LM(g))
m = monomial_lcm(mh, f[ip].LM)
return monomial_div(LCMhg, m)
# HT(h) and HT(g) disjoint: mh*mg == LCMhg
if monomial_mul(mh, mg) == LCMhg or (
not any(lcm_divides(ipx) for ipx in C) and
not any(lcm_divides(pr[1]) for pr in D)):
D.add((ih, ig))
E = set()
while D:
# select h, g from D (h the same as above)
ih, ig = D.pop()
mg = f[ig].LM
LCMhg = monomial_lcm(mh, mg)
if not monomial_mul(mh, mg) == LCMhg:
E.add((ih, ig))
# filter old pairs
B_new = set()
while B:
# select g1, g2 from B (-> CP)
ig1, ig2 = B.pop()
mg1 = f[ig1].LM
mg2 = f[ig2].LM
LCM12 = monomial_lcm(mg1, mg2)
# if HT(h) does not divide lcm(HT(g1), HT(g2))
if not monomial_div(LCM12, mh) or \
monomial_lcm(mg1, mh) == LCM12 or \
monomial_lcm(mg2, mh) == LCM12:
B_new.add((ig1, ig2))
B_new |= E
# filter polynomials
G_new = set()
while G:
ig = G.pop()
mg = f[ig].LM
if not monomial_div(mg, mh):
G_new.add(ig)
G_new.add(ih)
return G_new, B_new
# end of update ################################
if not f:
return []
# replace f with a reduced list of initial polynomials; see [BW] page 203
f1 = f[:]
while True:
f = f1[:]
f1 = []
for i in range(len(f)):
p = f[i]
r = p.rem(f[:i])
if r:
f1.append(r.monic())
if f == f1:
break
I = {} # ip = I[p]; p = f[ip]
F = set() # set of indices of polynomials
G = set() # set of indices of intermediate would-be Groebner basis
CP = set() # set of pairs of indices of critical pairs
for i, h in enumerate(f):
I[h] = i
F.add(i)
#####################################
# algorithm GROEBNERNEWS2 in [BW] page 232
while F:
# select p with minimum monomial according to the monomial ordering
h = min([f[x] for x in F], key=lambda f: order(f.LM))
ih = I[h]
F.remove(ih)
G, CP = update(G, CP, ih)
# count the number of critical pairs which reduce to zero
reductions_to_zero = 0
while CP:
ig1, ig2 = select(CP)
CP.remove((ig1, ig2))
h = spoly(f[ig1], f[ig2], ring)
# ordering divisors is on average more efficient [Cox] page 111
G1 = sorted(G, key=lambda g: order(f[g].LM))
ht = normal(h, G1)
if ht:
G, CP = update(G, CP, ht[1])
else:
reductions_to_zero += 1
######################################
# now G is a Groebner basis; reduce it
Gr = set()
for ig in G:
ht = normal(f[ig], G - set([ig]))
if ht:
Gr.add(ht[1])
Gr = [f[ig] for ig in Gr]
# order according to the monomial ordering
Gr = sorted(Gr, key=lambda f: order(f.LM), reverse=True)
return Gr
def spoly(p1, p2, ring):
"""
Compute LCM(LM(p1), LM(p2))/LM(p1)*p1 - LCM(LM(p1), LM(p2))/LM(p2)*p2
This is the S-poly provided p1 and p2 are monic
"""
LM1 = p1.LM
LM2 = p2.LM
LCM12 = ring.monomial_lcm(LM1, LM2)
m1 = ring.monomial_div(LCM12, LM1)
m2 = ring.monomial_div(LCM12, LM2)
s1 = p1.mul_monom(m1)
s2 = p2.mul_monom(m2)
s = s1 - s2
return s
# F5B
# convenience functions
def Sign(f):
return f[0]
def Polyn(f):
return f[1]
def Num(f):
return f[2]
def sig(monomial, index):
return (monomial, index)
def lbp(signature, polynomial, number):
return (signature, polynomial, number)
# signature functions
def sig_cmp(u, v, order):
"""
Compare two signatures by extending the term order to K[X]^n.
u < v iff
- the index of v is greater than the index of u
or
- the index of v is equal to the index of u and u[0] < v[0] w.r.t. order
u > v otherwise
"""
if u[1] > v[1]:
return -1
if u[1] == v[1]:
#if u[0] == v[0]:
# return 0
if order(u[0]) < order(v[0]):
return -1
return 1
def sig_key(s, order):
"""
Key for comparing two signatures.
s = (m, k), t = (n, l)
s < t iff [k > l] or [k == l and m < n]
s > t otherwise
"""
return (-s[1], order(s[0]))
def sig_mult(s, m):
"""
Multiply a signature by a monomial.
The product of a signature (m, i) and a monomial n is defined as
(m * t, i).
"""
return sig(monomial_mul(s[0], m), s[1])
# labeled polynomial functions
def lbp_sub(f, g):
"""
Subtract labeled polynomial g from f.
The signature and number of the difference of f and g are signature
and number of the maximum of f and g, w.r.t. lbp_cmp.
"""
if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) < 0:
max_poly = g
else:
max_poly = f
ret = Polyn(f) - Polyn(g)
return lbp(Sign(max_poly), ret, Num(max_poly))
def lbp_mul_term(f, cx):
"""
Multiply a labeled polynomial with a term.
The product of a labeled polynomial (s, p, k) by a monomial is
defined as (m * s, m * p, k).
"""
return lbp(sig_mult(Sign(f), cx[0]), Polyn(f).mul_term(cx), Num(f))
def lbp_cmp(f, g):
"""
Compare two labeled polynomials.
f < g iff
- Sign(f) < Sign(g)
or
- Sign(f) == Sign(g) and Num(f) > Num(g)
f > g otherwise
"""
if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) == -1:
return -1
if Sign(f) == Sign(g):
if Num(f) > Num(g):
return -1
#if Num(f) == Num(g):
# return 0
return 1
def lbp_key(f):
"""
Key for comparing two labeled polynomials.
"""
return (sig_key(Sign(f), Polyn(f).ring.order), -Num(f))
# algorithm and helper functions
def critical_pair(f, g, ring):
"""
Compute the critical pair corresponding to two labeled polynomials.
A critical pair is a tuple (um, f, vm, g), where um and vm are
terms such that um * f - vm * g is the S-polynomial of f and g (so,
wlog assume um * f > vm * g).
For performance sake, a critical pair is represented as a tuple
(Sign(um * f), um, f, Sign(vm * g), vm, g), since um * f creates
a new, relatively expensive object in memory, whereas Sign(um *
f) and um are lightweight and f (in the tuple) is a reference to
an already existing object in memory.
"""
domain = ring.domain
ltf = Polyn(f).LT
ltg = Polyn(g).LT
lt = (monomial_lcm(ltf[0], ltg[0]), domain.one)
um = term_div(lt, ltf, domain)
vm = term_div(lt, ltg, domain)
# The full information is not needed (now), so only the product
# with the leading term is considered:
fr = lbp_mul_term(lbp(Sign(f), Polyn(f).leading_term(), Num(f)), um)
gr = lbp_mul_term(lbp(Sign(g), Polyn(g).leading_term(), Num(g)), vm)
# return in proper order, such that the S-polynomial is just
# u_first * f_first - u_second * f_second:
if lbp_cmp(fr, gr) == -1:
return (Sign(gr), vm, g, Sign(fr), um, f)
else:
return (Sign(fr), um, f, Sign(gr), vm, g)
def cp_cmp(c, d):
"""
Compare two critical pairs c and d.
c < d iff
- lbp(c[0], _, Num(c[2]) < lbp(d[0], _, Num(d[2])) (this
corresponds to um_c * f_c and um_d * f_d)
or
- lbp(c[0], _, Num(c[2]) >< lbp(d[0], _, Num(d[2])) and
lbp(c[3], _, Num(c[5])) < lbp(d[3], _, Num(d[5])) (this
corresponds to vm_c * g_c and vm_d * g_d)
c > d otherwise
"""
zero = Polyn(c[2]).ring.zero
c0 = lbp(c[0], zero, Num(c[2]))
d0 = lbp(d[0], zero, Num(d[2]))
r = lbp_cmp(c0, d0)
if r == -1:
return -1
if r == 0:
c1 = lbp(c[3], zero, Num(c[5]))
d1 = lbp(d[3], zero, Num(d[5]))
r = lbp_cmp(c1, d1)
if r == -1:
return -1
#if r == 0:
# return 0
return 1
def cp_key(c, ring):
"""
Key for comparing critical pairs.
"""
return (lbp_key(lbp(c[0], ring.zero, Num(c[2]))), lbp_key(lbp(c[3], ring.zero, Num(c[5]))))
def s_poly(cp):
"""
Compute the S-polynomial of a critical pair.
The S-polynomial of a critical pair cp is cp[1] * cp[2] - cp[4] * cp[5].
"""
return lbp_sub(lbp_mul_term(cp[2], cp[1]), lbp_mul_term(cp[5], cp[4]))
def is_rewritable_or_comparable(sign, num, B):
"""
Check if a labeled polynomial is redundant by checking if its
signature and number imply rewritability or comparability.
(sign, num) is comparable if there exists a labeled polynomial
h in B, such that sign[1] (the index) is less than Sign(h)[1]
and sign[0] is divisible by the leading monomial of h.
(sign, num) is rewritable if there exists a labeled polynomial
h in B, such thatsign[1] is equal to Sign(h)[1], num < Num(h)
and sign[0] is divisible by Sign(h)[0].
"""
for h in B:
# comparable
if sign[1] < Sign(h)[1]:
if monomial_divides(Polyn(h).LM, sign[0]):
return True
# rewritable
if sign[1] == Sign(h)[1]:
if num < Num(h):
if monomial_divides(Sign(h)[0], sign[0]):
return True
return False
def f5_reduce(f, B):
"""
F5-reduce a labeled polynomial f by B.
Continuously searches for non-zero labeled polynomial h in B, such
that the leading term lt_h of h divides the leading term lt_f of
f and Sign(lt_h * h) < Sign(f). If such a labeled polynomial h is
found, f gets replaced by f - lt_f / lt_h * h. If no such h can be
found or f is 0, f is no further F5-reducible and f gets returned.
A polynomial that is reducible in the usual sense need not be
F5-reducible, e.g.:
>>> from sympy.polys.groebnertools import lbp, sig, f5_reduce, Polyn
>>> from sympy.polys import ring, QQ, lex
>>> R, x,y,z = ring("x,y,z", QQ, lex)
>>> f = lbp(sig((1, 1, 1), 4), x, 3)
>>> g = lbp(sig((0, 0, 0), 2), x, 2)
>>> Polyn(f).rem([Polyn(g)])
0
>>> f5_reduce(f, [g])
(((1, 1, 1), 4), x, 3)
"""
order = Polyn(f).ring.order
domain = Polyn(f).ring.domain
if not Polyn(f):
return f
while True:
g = f
for h in B:
if Polyn(h):
if monomial_divides(Polyn(h).LM, Polyn(f).LM):
t = term_div(Polyn(f).LT, Polyn(h).LT, domain)
if sig_cmp(sig_mult(Sign(h), t[0]), Sign(f), order) < 0:
# The following check need not be done and is in general slower than without.
#if not is_rewritable_or_comparable(Sign(gp), Num(gp), B):
hp = lbp_mul_term(h, t)
f = lbp_sub(f, hp)
break
if g == f or not Polyn(f):
return f
def _f5b(F, ring):
"""
Computes a reduced Groebner basis for the ideal generated by F.
f5b is an implementation of the F5B algorithm by Yao Sun and
Dingkang Wang. Similarly to Buchberger's algorithm, the algorithm
proceeds by computing critical pairs, computing the S-polynomial,
reducing it and adjoining the reduced S-polynomial if it is not 0.
Unlike Buchberger's algorithm, each polynomial contains additional
information, namely a signature and a number. The signature
specifies the path of computation (i.e. from which polynomial in
the original basis was it derived and how), the number says when
the polynomial was added to the basis. With this information it
is (often) possible to decide if an S-polynomial will reduce to
0 and can be discarded.
Optimizations include: Reducing the generators before computing
a Groebner basis, removing redundant critical pairs when a new
polynomial enters the basis and sorting the critical pairs and
the current basis.
Once a Groebner basis has been found, it gets reduced.
References
==========
.. [1] Yao Sun, Dingkang Wang: "A New Proof for the Correctness of F5
(F5-Like) Algorithm", http://arxiv.org/abs/1004.0084 (specifically
v4)
.. [2] Thomas Becker, Volker Weispfenning, Groebner bases: A computational
approach to commutative algebra, 1993, p. 203, 216
"""
order = ring.order
# reduce polynomials (like in Mario Pernici's implementation) (Becker, Weispfenning, p. 203)
B = F
while True:
F = B
B = []
for i in range(len(F)):
p = F[i]
r = p.rem(F[:i])
if r:
B.append(r)
if F == B:
break
# basis
B = [lbp(sig(ring.zero_monom, i + 1), F[i], i + 1) for i in range(len(F))]
B.sort(key=lambda f: order(Polyn(f).LM), reverse=True)
# critical pairs
CP = [critical_pair(B[i], B[j], ring) for i in range(len(B)) for j in range(i + 1, len(B))]
CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True)
k = len(B)
reductions_to_zero = 0
while len(CP):
cp = CP.pop()
# discard redundant critical pairs:
if is_rewritable_or_comparable(cp[0], Num(cp[2]), B):
continue
if is_rewritable_or_comparable(cp[3], Num(cp[5]), B):
continue
s = s_poly(cp)
p = f5_reduce(s, B)
p = lbp(Sign(p), Polyn(p).monic(), k + 1)
if Polyn(p):
# remove old critical pairs, that become redundant when adding p:
indices = []
for i, cp in enumerate(CP):
if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]):
indices.append(i)
elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]):
indices.append(i)
for i in reversed(indices):
del CP[i]
# only add new critical pairs that are not made redundant by p:
for g in B:
if Polyn(g):
cp = critical_pair(p, g, ring)
if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]):
continue
elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]):
continue
CP.append(cp)
# sort (other sorting methods/selection strategies were not as successful)
CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True)
# insert p into B:
m = Polyn(p).LM
if order(m) <= order(Polyn(B[-1]).LM):
B.append(p)
else:
for i, q in enumerate(B):
if order(m) > order(Polyn(q).LM):
B.insert(i, p)
break
k += 1
#print(len(B), len(CP), "%d critical pairs removed" % len(indices))
else:
reductions_to_zero += 1
# reduce Groebner basis:
H = [Polyn(g).monic() for g in B]
H = red_groebner(H, ring)
return sorted(H, key=lambda f: order(f.LM), reverse=True)
def red_groebner(G, ring):
"""
Compute reduced Groebner basis, from BeckerWeispfenning93, p. 216
Selects a subset of generators, that already generate the ideal
and computes a reduced Groebner basis for them.
"""
def reduction(P):
"""
The actual reduction algorithm.
"""
Q = []
for i, p in enumerate(P):
h = p.rem(P[:i] + P[i + 1:])
if h:
Q.append(h)
return [p.monic() for p in Q]
F = G
H = []
while F:
f0 = F.pop()
if not any(monomial_divides(f.LM, f0.LM) for f in F + H):
H.append(f0)
# Becker, Weispfenning, p. 217: H is Groebner basis of the ideal generated by G.
return reduction(H)
def is_groebner(G, ring):
"""
Check if G is a Groebner basis.
"""
for i in range(len(G)):
for j in range(i + 1, len(G)):
s = spoly(G[i], G[j], ring)
s = s.rem(G)
if s:
return False
return True
def is_minimal(G, ring):
"""
Checks if G is a minimal Groebner basis.
"""
order = ring.order
domain = ring.domain
G.sort(key=lambda g: order(g.LM))
for i, g in enumerate(G):
if g.LC != domain.one:
return False
for h in G[:i] + G[i + 1:]:
if monomial_divides(h.LM, g.LM):
return False
return True
def is_reduced(G, ring):
"""
Checks if G is a reduced Groebner basis.
"""
order = ring.order
domain = ring.domain
G.sort(key=lambda g: order(g.LM))
for i, g in enumerate(G):
if g.LC != domain.one:
return False
for term in g.terms():
for h in G[:i] + G[i + 1:]:
if monomial_divides(h.LM, term[0]):
return False
return True
def groebner_lcm(f, g):
"""
Computes LCM of two polynomials using Groebner bases.
The LCM is computed as the unique generator of the intersection
of the two ideals generated by `f` and `g`. The approach is to
compute a Groebner basis with respect to lexicographic ordering
of `t*f` and `(1 - t)*g`, where `t` is an unrelated variable and
then filtering out the solution that doesn't contain `t`.
References
==========
.. [1] [Cox97]_
"""
if f.ring != g.ring:
raise ValueError("Values should be equal")
ring = f.ring
domain = ring.domain
if not f or not g:
return ring.zero
if len(f) <= 1 and len(g) <= 1:
monom = monomial_lcm(f.LM, g.LM)
coeff = domain.lcm(f.LC, g.LC)
return ring.term_new(monom, coeff)
fc, f = f.primitive()
gc, g = g.primitive()
lcm = domain.lcm(fc, gc)
f_terms = [ ((1,) + monom, coeff) for monom, coeff in f.terms() ]
g_terms = [ ((0,) + monom, coeff) for monom, coeff in g.terms() ] \
+ [ ((1,) + monom,-coeff) for monom, coeff in g.terms() ]
t = Dummy("t")
t_ring = ring.clone(symbols=(t,) + ring.symbols, order=lex)
F = t_ring.from_terms(f_terms)
G = t_ring.from_terms(g_terms)
basis = groebner([F, G], t_ring)
def is_independent(h, j):
return all(not monom[j] for monom in h.monoms())
H = [ h for h in basis if is_independent(h, 0) ]
h_terms = [ (monom[1:], coeff*lcm) for monom, coeff in H[0].terms() ]
h = ring.from_terms(h_terms)
return h
def groebner_gcd(f, g):
"""Computes GCD of two polynomials using Groebner bases. """
if f.ring != g.ring:
raise ValueError("Values should be equal")
domain = f.ring.domain
if not domain.is_Field:
fc, f = f.primitive()
gc, g = g.primitive()
gcd = domain.gcd(fc, gc)
H = (f*g).quo([groebner_lcm(f, g)])
if len(H) != 1:
raise ValueError("Length should be 1")
h = H[0]
if not domain.is_Field:
return gcd*h
else:
return h.monic()
|
194575a148b43f3e1ab89c7b37a78c476132696b1139b956808ff80f22fe9db8 | from sympy import Dummy
from sympy.ntheory import nextprime
from sympy.ntheory.modular import crt
from sympy.polys.domains import PolynomialRing
from sympy.polys.galoistools import (
gf_gcd, gf_from_dict, gf_gcdex, gf_div, gf_lcm)
from sympy.polys.polyerrors import ModularGCDFailed
from mpmath import sqrt
import random
def _trivial_gcd(f, g):
"""
Compute the GCD of two polynomials in trivial cases, i.e. when one
or both polynomials are zero.
"""
ring = f.ring
if not (f or g):
return ring.zero, ring.zero, ring.zero
elif not f:
if g.LC < ring.domain.zero:
return -g, ring.zero, -ring.one
else:
return g, ring.zero, ring.one
elif not g:
if f.LC < ring.domain.zero:
return -f, -ring.one, ring.zero
else:
return f, ring.one, ring.zero
return None
def _gf_gcd(fp, gp, p):
r"""
Compute the GCD of two univariate polynomials in `\mathbb{Z}_p[x]`.
"""
dom = fp.ring.domain
while gp:
rem = fp
deg = gp.degree()
lcinv = dom.invert(gp.LC, p)
while True:
degrem = rem.degree()
if degrem < deg:
break
rem = (rem - gp.mul_monom((degrem - deg,)).mul_ground(lcinv * rem.LC)).trunc_ground(p)
fp = gp
gp = rem
return fp.mul_ground(dom.invert(fp.LC, p)).trunc_ground(p)
def _degree_bound_univariate(f, g):
r"""
Compute an upper bound for the degree of the GCD of two univariate
integer polynomials `f` and `g`.
The function chooses a suitable prime `p` and computes the GCD of
`f` and `g` in `\mathbb{Z}_p[x]`. The choice of `p` guarantees that
the degree in `\mathbb{Z}_p[x]` is greater than or equal to the degree
in `\mathbb{Z}[x]`.
Parameters
==========
f : PolyElement
univariate integer polynomial
g : PolyElement
univariate integer polynomial
"""
gamma = f.ring.domain.gcd(f.LC, g.LC)
p = 1
p = nextprime(p)
while gamma % p == 0:
p = nextprime(p)
fp = f.trunc_ground(p)
gp = g.trunc_ground(p)
hp = _gf_gcd(fp, gp, p)
deghp = hp.degree()
return deghp
def _chinese_remainder_reconstruction_univariate(hp, hq, p, q):
r"""
Construct a polynomial `h_{pq}` in `\mathbb{Z}_{p q}[x]` such that
.. math ::
h_{pq} = h_p \; \mathrm{mod} \, p
h_{pq} = h_q \; \mathrm{mod} \, q
for relatively prime integers `p` and `q` and polynomials
`h_p` and `h_q` in `\mathbb{Z}_p[x]` and `\mathbb{Z}_q[x]`
respectively.
The coefficients of the polynomial `h_{pq}` are computed with the
Chinese Remainder Theorem. The symmetric representation in
`\mathbb{Z}_p[x]`, `\mathbb{Z}_q[x]` and `\mathbb{Z}_{p q}[x]` is used.
It is assumed that `h_p` and `h_q` have the same degree.
Parameters
==========
hp : PolyElement
univariate integer polynomial with coefficients in `\mathbb{Z}_p`
hq : PolyElement
univariate integer polynomial with coefficients in `\mathbb{Z}_q`
p : Integer
modulus of `h_p`, relatively prime to `q`
q : Integer
modulus of `h_q`, relatively prime to `p`
Examples
========
>>> from sympy.polys.modulargcd import _chinese_remainder_reconstruction_univariate
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> p = 3
>>> q = 5
>>> hp = -x**3 - 1
>>> hq = 2*x**3 - 2*x**2 + x
>>> hpq = _chinese_remainder_reconstruction_univariate(hp, hq, p, q)
>>> hpq
2*x**3 + 3*x**2 + 6*x + 5
>>> hpq.trunc_ground(p) == hp
True
>>> hpq.trunc_ground(q) == hq
True
"""
n = hp.degree()
x = hp.ring.gens[0]
hpq = hp.ring.zero
for i in range(n+1):
hpq[(i,)] = crt([p, q], [hp.coeff(x**i), hq.coeff(x**i)], symmetric=True)[0]
hpq.strip_zero()
return hpq
def modgcd_univariate(f, g):
r"""
Computes the GCD of two polynomials in `\mathbb{Z}[x]` using a modular
algorithm.
The algorithm computes the GCD of two univariate integer polynomials
`f` and `g` by computing the GCD in `\mathbb{Z}_p[x]` for suitable
primes `p` and then reconstructing the coefficients with the Chinese
Remainder Theorem. Trial division is only made for candidates which
are very likely the desired GCD.
Parameters
==========
f : PolyElement
univariate integer polynomial
g : PolyElement
univariate integer polynomial
Returns
=======
h : PolyElement
GCD of the polynomials `f` and `g`
cff : PolyElement
cofactor of `f`, i.e. `\frac{f}{h}`
cfg : PolyElement
cofactor of `g`, i.e. `\frac{g}{h}`
Examples
========
>>> from sympy.polys.modulargcd import modgcd_univariate
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**5 - 1
>>> g = x - 1
>>> h, cff, cfg = modgcd_univariate(f, g)
>>> h, cff, cfg
(x - 1, x**4 + x**3 + x**2 + x + 1, 1)
>>> cff * h == f
True
>>> cfg * h == g
True
>>> f = 6*x**2 - 6
>>> g = 2*x**2 + 4*x + 2
>>> h, cff, cfg = modgcd_univariate(f, g)
>>> h, cff, cfg
(2*x + 2, 3*x - 3, x + 1)
>>> cff * h == f
True
>>> cfg * h == g
True
References
==========
1. [Monagan00]_
"""
assert f.ring == g.ring and f.ring.domain.is_ZZ
result = _trivial_gcd(f, g)
if result is not None:
return result
ring = f.ring
cf, f = f.primitive()
cg, g = g.primitive()
ch = ring.domain.gcd(cf, cg)
bound = _degree_bound_univariate(f, g)
if bound == 0:
return ring(ch), f.mul_ground(cf // ch), g.mul_ground(cg // ch)
gamma = ring.domain.gcd(f.LC, g.LC)
m = 1
p = 1
while True:
p = nextprime(p)
while gamma % p == 0:
p = nextprime(p)
fp = f.trunc_ground(p)
gp = g.trunc_ground(p)
hp = _gf_gcd(fp, gp, p)
deghp = hp.degree()
if deghp > bound:
continue
elif deghp < bound:
m = 1
bound = deghp
continue
hp = hp.mul_ground(gamma).trunc_ground(p)
if m == 1:
m = p
hlastm = hp
continue
hm = _chinese_remainder_reconstruction_univariate(hp, hlastm, p, m)
m *= p
if not hm == hlastm:
hlastm = hm
continue
h = hm.quo_ground(hm.content())
fquo, frem = f.div(h)
gquo, grem = g.div(h)
if not frem and not grem:
if h.LC < 0:
ch = -ch
h = h.mul_ground(ch)
cff = fquo.mul_ground(cf // ch)
cfg = gquo.mul_ground(cg // ch)
return h, cff, cfg
def _primitive(f, p):
r"""
Compute the content and the primitive part of a polynomial in
`\mathbb{Z}_p[x_0, \ldots, x_{k-2}, y] \cong \mathbb{Z}_p[y][x_0, \ldots, x_{k-2}]`.
Parameters
==========
f : PolyElement
integer polynomial in `\mathbb{Z}_p[x0, \ldots, x{k-2}, y]`
p : Integer
modulus of `f`
Returns
=======
contf : PolyElement
integer polynomial in `\mathbb{Z}_p[y]`, content of `f`
ppf : PolyElement
primitive part of `f`, i.e. `\frac{f}{contf}`
Examples
========
>>> from sympy.polys.modulargcd import _primitive
>>> from sympy.polys import ring, ZZ
>>> R, x, y = ring("x, y", ZZ)
>>> p = 3
>>> f = x**2*y**2 + x**2*y - y**2 - y
>>> _primitive(f, p)
(y**2 + y, x**2 - 1)
>>> R, x, y, z = ring("x, y, z", ZZ)
>>> f = x*y*z - y**2*z**2
>>> _primitive(f, p)
(z, x*y - y**2*z)
"""
ring = f.ring
dom = ring.domain
k = ring.ngens
coeffs = {}
for monom, coeff in f.iterterms():
if monom[:-1] not in coeffs:
coeffs[monom[:-1]] = {}
coeffs[monom[:-1]][monom[-1]] = coeff
cont = []
for coeff in iter(coeffs.values()):
cont = gf_gcd(cont, gf_from_dict(coeff, p, dom), p, dom)
yring = ring.clone(symbols=ring.symbols[k-1])
contf = yring.from_dense(cont).trunc_ground(p)
return contf, f.quo(contf.set_ring(ring))
def _deg(f):
r"""
Compute the degree of a multivariate polynomial
`f \in K[x_0, \ldots, x_{k-2}, y] \cong K[y][x_0, \ldots, x_{k-2}]`.
Parameters
==========
f : PolyElement
polynomial in `K[x_0, \ldots, x_{k-2}, y]`
Returns
=======
degf : Integer tuple
degree of `f` in `x_0, \ldots, x_{k-2}`
Examples
========
>>> from sympy.polys.modulargcd import _deg
>>> from sympy.polys import ring, ZZ
>>> R, x, y = ring("x, y", ZZ)
>>> f = x**2*y**2 + x**2*y - 1
>>> _deg(f)
(2,)
>>> R, x, y, z = ring("x, y, z", ZZ)
>>> f = x**2*y**2 + x**2*y - 1
>>> _deg(f)
(2, 2)
>>> f = x*y*z - y**2*z**2
>>> _deg(f)
(1, 1)
"""
k = f.ring.ngens
degf = (0,) * (k-1)
for monom in f.itermonoms():
if monom[:-1] > degf:
degf = monom[:-1]
return degf
def _LC(f):
r"""
Compute the leading coefficient of a multivariate polynomial
`f \in K[x_0, \ldots, x_{k-2}, y] \cong K[y][x_0, \ldots, x_{k-2}]`.
Parameters
==========
f : PolyElement
polynomial in `K[x_0, \ldots, x_{k-2}, y]`
Returns
=======
lcf : PolyElement
polynomial in `K[y]`, leading coefficient of `f`
Examples
========
>>> from sympy.polys.modulargcd import _LC
>>> from sympy.polys import ring, ZZ
>>> R, x, y = ring("x, y", ZZ)
>>> f = x**2*y**2 + x**2*y - 1
>>> _LC(f)
y**2 + y
>>> R, x, y, z = ring("x, y, z", ZZ)
>>> f = x**2*y**2 + x**2*y - 1
>>> _LC(f)
1
>>> f = x*y*z - y**2*z**2
>>> _LC(f)
z
"""
ring = f.ring
k = ring.ngens
yring = ring.clone(symbols=ring.symbols[k-1])
y = yring.gens[0]
degf = _deg(f)
lcf = yring.zero
for monom, coeff in f.iterterms():
if monom[:-1] == degf:
lcf += coeff*y**monom[-1]
return lcf
def _swap(f, i):
"""
Make the variable `x_i` the leading one in a multivariate polynomial `f`.
"""
ring = f.ring
fswap = ring.zero
for monom, coeff in f.iterterms():
monomswap = (monom[i],) + monom[:i] + monom[i+1:]
fswap[monomswap] = coeff
return fswap
def _degree_bound_bivariate(f, g):
r"""
Compute upper degree bounds for the GCD of two bivariate
integer polynomials `f` and `g`.
The GCD is viewed as a polynomial in `\mathbb{Z}[y][x]` and the
function returns an upper bound for its degree and one for the degree
of its content. This is done by choosing a suitable prime `p` and
computing the GCD of the contents of `f \; \mathrm{mod} \, p` and
`g \; \mathrm{mod} \, p`. The choice of `p` guarantees that the degree
of the content in `\mathbb{Z}_p[y]` is greater than or equal to the
degree in `\mathbb{Z}[y]`. To obtain the degree bound in the variable
`x`, the polynomials are evaluated at `y = a` for a suitable
`a \in \mathbb{Z}_p` and then their GCD in `\mathbb{Z}_p[x]` is
computed. If no such `a` exists, i.e. the degree in `\mathbb{Z}_p[x]`
is always smaller than the one in `\mathbb{Z}[y][x]`, then the bound is
set to the minimum of the degrees of `f` and `g` in `x`.
Parameters
==========
f : PolyElement
bivariate integer polynomial
g : PolyElement
bivariate integer polynomial
Returns
=======
xbound : Integer
upper bound for the degree of the GCD of the polynomials `f` and
`g` in the variable `x`
ycontbound : Integer
upper bound for the degree of the content of the GCD of the
polynomials `f` and `g` in the variable `y`
References
==========
1. [Monagan00]_
"""
ring = f.ring
gamma1 = ring.domain.gcd(f.LC, g.LC)
gamma2 = ring.domain.gcd(_swap(f, 1).LC, _swap(g, 1).LC)
badprimes = gamma1 * gamma2
p = 1
p = nextprime(p)
while badprimes % p == 0:
p = nextprime(p)
fp = f.trunc_ground(p)
gp = g.trunc_ground(p)
contfp, fp = _primitive(fp, p)
contgp, gp = _primitive(gp, p)
conthp = _gf_gcd(contfp, contgp, p) # polynomial in Z_p[y]
ycontbound = conthp.degree()
# polynomial in Z_p[y]
delta = _gf_gcd(_LC(fp), _LC(gp), p)
for a in range(p):
if not delta.evaluate(0, a) % p:
continue
fpa = fp.evaluate(1, a).trunc_ground(p)
gpa = gp.evaluate(1, a).trunc_ground(p)
hpa = _gf_gcd(fpa, gpa, p)
xbound = hpa.degree()
return xbound, ycontbound
return min(fp.degree(), gp.degree()), ycontbound
def _chinese_remainder_reconstruction_multivariate(hp, hq, p, q):
r"""
Construct a polynomial `h_{pq}` in
`\mathbb{Z}_{p q}[x_0, \ldots, x_{k-1}]` such that
.. math ::
h_{pq} = h_p \; \mathrm{mod} \, p
h_{pq} = h_q \; \mathrm{mod} \, q
for relatively prime integers `p` and `q` and polynomials
`h_p` and `h_q` in `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]` and
`\mathbb{Z}_q[x_0, \ldots, x_{k-1}]` respectively.
The coefficients of the polynomial `h_{pq}` are computed with the
Chinese Remainder Theorem. The symmetric representation in
`\mathbb{Z}_p[x_0, \ldots, x_{k-1}]`,
`\mathbb{Z}_q[x_0, \ldots, x_{k-1}]` and
`\mathbb{Z}_{p q}[x_0, \ldots, x_{k-1}]` is used.
Parameters
==========
hp : PolyElement
multivariate integer polynomial with coefficients in `\mathbb{Z}_p`
hq : PolyElement
multivariate integer polynomial with coefficients in `\mathbb{Z}_q`
p : Integer
modulus of `h_p`, relatively prime to `q`
q : Integer
modulus of `h_q`, relatively prime to `p`
Examples
========
>>> from sympy.polys.modulargcd import _chinese_remainder_reconstruction_multivariate
>>> from sympy.polys import ring, ZZ
>>> R, x, y = ring("x, y", ZZ)
>>> p = 3
>>> q = 5
>>> hp = x**3*y - x**2 - 1
>>> hq = -x**3*y - 2*x*y**2 + 2
>>> hpq = _chinese_remainder_reconstruction_multivariate(hp, hq, p, q)
>>> hpq
4*x**3*y + 5*x**2 + 3*x*y**2 + 2
>>> hpq.trunc_ground(p) == hp
True
>>> hpq.trunc_ground(q) == hq
True
>>> R, x, y, z = ring("x, y, z", ZZ)
>>> p = 6
>>> q = 5
>>> hp = 3*x**4 - y**3*z + z
>>> hq = -2*x**4 + z
>>> hpq = _chinese_remainder_reconstruction_multivariate(hp, hq, p, q)
>>> hpq
3*x**4 + 5*y**3*z + z
>>> hpq.trunc_ground(p) == hp
True
>>> hpq.trunc_ground(q) == hq
True
"""
hpmonoms = set(hp.monoms())
hqmonoms = set(hq.monoms())
monoms = hpmonoms.intersection(hqmonoms)
hpmonoms.difference_update(monoms)
hqmonoms.difference_update(monoms)
zero = hp.ring.domain.zero
hpq = hp.ring.zero
if isinstance(hp.ring.domain, PolynomialRing):
crt_ = _chinese_remainder_reconstruction_multivariate
else:
def crt_(cp, cq, p, q):
return crt([p, q], [cp, cq], symmetric=True)[0]
for monom in monoms:
hpq[monom] = crt_(hp[monom], hq[monom], p, q)
for monom in hpmonoms:
hpq[monom] = crt_(hp[monom], zero, p, q)
for monom in hqmonoms:
hpq[monom] = crt_(zero, hq[monom], p, q)
return hpq
def _interpolate_multivariate(evalpoints, hpeval, ring, i, p, ground=False):
r"""
Reconstruct a polynomial `h_p` in `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]`
from a list of evaluation points in `\mathbb{Z}_p` and a list of
polynomials in
`\mathbb{Z}_p[x_0, \ldots, x_{i-1}, x_{i+1}, \ldots, x_{k-1}]`, which
are the images of `h_p` evaluated in the variable `x_i`.
It is also possible to reconstruct a parameter of the ground domain,
i.e. if `h_p` is a polynomial over `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]`.
In this case, one has to set ``ground=True``.
Parameters
==========
evalpoints : list of Integer objects
list of evaluation points in `\mathbb{Z}_p`
hpeval : list of PolyElement objects
list of polynomials in (resp. over)
`\mathbb{Z}_p[x_0, \ldots, x_{i-1}, x_{i+1}, \ldots, x_{k-1}]`,
images of `h_p` evaluated in the variable `x_i`
ring : PolyRing
`h_p` will be an element of this ring
i : Integer
index of the variable which has to be reconstructed
p : Integer
prime number, modulus of `h_p`
ground : Boolean
indicates whether `x_i` is in the ground domain, default is
``False``
Returns
=======
hp : PolyElement
interpolated polynomial in (resp. over)
`\mathbb{Z}_p[x_0, \ldots, x_{k-1}]`
"""
hp = ring.zero
if ground:
domain = ring.domain.domain
y = ring.domain.gens[i]
else:
domain = ring.domain
y = ring.gens[i]
for a, hpa in zip(evalpoints, hpeval):
numer = ring.one
denom = domain.one
for b in evalpoints:
if b == a:
continue
numer *= y - b
denom *= a - b
denom = domain.invert(denom, p)
coeff = numer.mul_ground(denom)
hp += hpa.set_ring(ring) * coeff
return hp.trunc_ground(p)
def modgcd_bivariate(f, g):
r"""
Computes the GCD of two polynomials in `\mathbb{Z}[x, y]` using a
modular algorithm.
The algorithm computes the GCD of two bivariate integer polynomials
`f` and `g` by calculating the GCD in `\mathbb{Z}_p[x, y]` for
suitable primes `p` and then reconstructing the coefficients with the
Chinese Remainder Theorem. To compute the bivariate GCD over
`\mathbb{Z}_p`, the polynomials `f \; \mathrm{mod} \, p` and
`g \; \mathrm{mod} \, p` are evaluated at `y = a` for certain
`a \in \mathbb{Z}_p` and then their univariate GCD in `\mathbb{Z}_p[x]`
is computed. Interpolating those yields the bivariate GCD in
`\mathbb{Z}_p[x, y]`. To verify the result in `\mathbb{Z}[x, y]`, trial
division is done, but only for candidates which are very likely the
desired GCD.
Parameters
==========
f : PolyElement
bivariate integer polynomial
g : PolyElement
bivariate integer polynomial
Returns
=======
h : PolyElement
GCD of the polynomials `f` and `g`
cff : PolyElement
cofactor of `f`, i.e. `\frac{f}{h}`
cfg : PolyElement
cofactor of `g`, i.e. `\frac{g}{h}`
Examples
========
>>> from sympy.polys.modulargcd import modgcd_bivariate
>>> from sympy.polys import ring, ZZ
>>> R, x, y = ring("x, y", ZZ)
>>> f = x**2 - y**2
>>> g = x**2 + 2*x*y + y**2
>>> h, cff, cfg = modgcd_bivariate(f, g)
>>> h, cff, cfg
(x + y, x - y, x + y)
>>> cff * h == f
True
>>> cfg * h == g
True
>>> f = x**2*y - x**2 - 4*y + 4
>>> g = x + 2
>>> h, cff, cfg = modgcd_bivariate(f, g)
>>> h, cff, cfg
(x + 2, x*y - x - 2*y + 2, 1)
>>> cff * h == f
True
>>> cfg * h == g
True
References
==========
1. [Monagan00]_
"""
assert f.ring == g.ring and f.ring.domain.is_ZZ
result = _trivial_gcd(f, g)
if result is not None:
return result
ring = f.ring
cf, f = f.primitive()
cg, g = g.primitive()
ch = ring.domain.gcd(cf, cg)
xbound, ycontbound = _degree_bound_bivariate(f, g)
if xbound == ycontbound == 0:
return ring(ch), f.mul_ground(cf // ch), g.mul_ground(cg // ch)
fswap = _swap(f, 1)
gswap = _swap(g, 1)
degyf = fswap.degree()
degyg = gswap.degree()
ybound, xcontbound = _degree_bound_bivariate(fswap, gswap)
if ybound == xcontbound == 0:
return ring(ch), f.mul_ground(cf // ch), g.mul_ground(cg // ch)
# TODO: to improve performance, choose the main variable here
gamma1 = ring.domain.gcd(f.LC, g.LC)
gamma2 = ring.domain.gcd(fswap.LC, gswap.LC)
badprimes = gamma1 * gamma2
m = 1
p = 1
while True:
p = nextprime(p)
while badprimes % p == 0:
p = nextprime(p)
fp = f.trunc_ground(p)
gp = g.trunc_ground(p)
contfp, fp = _primitive(fp, p)
contgp, gp = _primitive(gp, p)
conthp = _gf_gcd(contfp, contgp, p) # monic polynomial in Z_p[y]
degconthp = conthp.degree()
if degconthp > ycontbound:
continue
elif degconthp < ycontbound:
m = 1
ycontbound = degconthp
continue
# polynomial in Z_p[y]
delta = _gf_gcd(_LC(fp), _LC(gp), p)
degcontfp = contfp.degree()
degcontgp = contgp.degree()
degdelta = delta.degree()
N = min(degyf - degcontfp, degyg - degcontgp,
ybound - ycontbound + degdelta) + 1
if p < N:
continue
n = 0
evalpoints = []
hpeval = []
unlucky = False
for a in range(p):
deltaa = delta.evaluate(0, a)
if not deltaa % p:
continue
fpa = fp.evaluate(1, a).trunc_ground(p)
gpa = gp.evaluate(1, a).trunc_ground(p)
hpa = _gf_gcd(fpa, gpa, p) # monic polynomial in Z_p[x]
deghpa = hpa.degree()
if deghpa > xbound:
continue
elif deghpa < xbound:
m = 1
xbound = deghpa
unlucky = True
break
hpa = hpa.mul_ground(deltaa).trunc_ground(p)
evalpoints.append(a)
hpeval.append(hpa)
n += 1
if n == N:
break
if unlucky:
continue
if n < N:
continue
hp = _interpolate_multivariate(evalpoints, hpeval, ring, 1, p)
hp = _primitive(hp, p)[1]
hp = hp * conthp.set_ring(ring)
degyhp = hp.degree(1)
if degyhp > ybound:
continue
if degyhp < ybound:
m = 1
ybound = degyhp
continue
hp = hp.mul_ground(gamma1).trunc_ground(p)
if m == 1:
m = p
hlastm = hp
continue
hm = _chinese_remainder_reconstruction_multivariate(hp, hlastm, p, m)
m *= p
if not hm == hlastm:
hlastm = hm
continue
h = hm.quo_ground(hm.content())
fquo, frem = f.div(h)
gquo, grem = g.div(h)
if not frem and not grem:
if h.LC < 0:
ch = -ch
h = h.mul_ground(ch)
cff = fquo.mul_ground(cf // ch)
cfg = gquo.mul_ground(cg // ch)
return h, cff, cfg
def _modgcd_multivariate_p(f, g, p, degbound, contbound):
r"""
Compute the GCD of two polynomials in
`\mathbb{Z}_p[x_0, \ldots, x_{k-1}]`.
The algorithm reduces the problem step by step by evaluating the
polynomials `f` and `g` at `x_{k-1} = a` for suitable
`a \in \mathbb{Z}_p` and then calls itself recursively to compute the GCD
in `\mathbb{Z}_p[x_0, \ldots, x_{k-2}]`. If these recursive calls are
successful for enough evaluation points, the GCD in `k` variables is
interpolated, otherwise the algorithm returns ``None``. Every time a GCD
or a content is computed, their degrees are compared with the bounds. If
a degree greater then the bound is encountered, then the current call
returns ``None`` and a new evaluation point has to be chosen. If at some
point the degree is smaller, the correspondent bound is updated and the
algorithm fails.
Parameters
==========
f : PolyElement
multivariate integer polynomial with coefficients in `\mathbb{Z}_p`
g : PolyElement
multivariate integer polynomial with coefficients in `\mathbb{Z}_p`
p : Integer
prime number, modulus of `f` and `g`
degbound : list of Integer objects
``degbound[i]`` is an upper bound for the degree of the GCD of `f`
and `g` in the variable `x_i`
contbound : list of Integer objects
``contbound[i]`` is an upper bound for the degree of the content of
the GCD in `\mathbb{Z}_p[x_i][x_0, \ldots, x_{i-1}]`,
``contbound[0]`` is not used can therefore be chosen
arbitrarily.
Returns
=======
h : PolyElement
GCD of the polynomials `f` and `g` or ``None``
References
==========
1. [Monagan00]_
2. [Brown71]_
"""
ring = f.ring
k = ring.ngens
if k == 1:
h = _gf_gcd(f, g, p).trunc_ground(p)
degh = h.degree()
if degh > degbound[0]:
return None
if degh < degbound[0]:
degbound[0] = degh
raise ModularGCDFailed
return h
degyf = f.degree(k-1)
degyg = g.degree(k-1)
contf, f = _primitive(f, p)
contg, g = _primitive(g, p)
conth = _gf_gcd(contf, contg, p) # polynomial in Z_p[y]
degcontf = contf.degree()
degcontg = contg.degree()
degconth = conth.degree()
if degconth > contbound[k-1]:
return None
if degconth < contbound[k-1]:
contbound[k-1] = degconth
raise ModularGCDFailed
lcf = _LC(f)
lcg = _LC(g)
delta = _gf_gcd(lcf, lcg, p) # polynomial in Z_p[y]
evaltest = delta
for i in range(k-1):
evaltest *= _gf_gcd(_LC(_swap(f, i)), _LC(_swap(g, i)), p)
degdelta = delta.degree()
N = min(degyf - degcontf, degyg - degcontg,
degbound[k-1] - contbound[k-1] + degdelta) + 1
if p < N:
return None
n = 0
d = 0
evalpoints = []
heval = []
points = set(range(p))
while points:
a = random.sample(points, 1)[0]
points.remove(a)
if not evaltest.evaluate(0, a) % p:
continue
deltaa = delta.evaluate(0, a) % p
fa = f.evaluate(k-1, a).trunc_ground(p)
ga = g.evaluate(k-1, a).trunc_ground(p)
# polynomials in Z_p[x_0, ..., x_{k-2}]
ha = _modgcd_multivariate_p(fa, ga, p, degbound, contbound)
if ha is None:
d += 1
if d > n:
return None
continue
if ha.is_ground:
h = conth.set_ring(ring).trunc_ground(p)
return h
ha = ha.mul_ground(deltaa).trunc_ground(p)
evalpoints.append(a)
heval.append(ha)
n += 1
if n == N:
h = _interpolate_multivariate(evalpoints, heval, ring, k-1, p)
h = _primitive(h, p)[1] * conth.set_ring(ring)
degyh = h.degree(k-1)
if degyh > degbound[k-1]:
return None
if degyh < degbound[k-1]:
degbound[k-1] = degyh
raise ModularGCDFailed
return h
return None
def modgcd_multivariate(f, g):
r"""
Compute the GCD of two polynomials in `\mathbb{Z}[x_0, \ldots, x_{k-1}]`
using a modular algorithm.
The algorithm computes the GCD of two multivariate integer polynomials
`f` and `g` by calculating the GCD in
`\mathbb{Z}_p[x_0, \ldots, x_{k-1}]` for suitable primes `p` and then
reconstructing the coefficients with the Chinese Remainder Theorem. To
compute the multivariate GCD over `\mathbb{Z}_p` the recursive
subroutine :func:`_modgcd_multivariate_p` is used. To verify the result in
`\mathbb{Z}[x_0, \ldots, x_{k-1}]`, trial division is done, but only for
candidates which are very likely the desired GCD.
Parameters
==========
f : PolyElement
multivariate integer polynomial
g : PolyElement
multivariate integer polynomial
Returns
=======
h : PolyElement
GCD of the polynomials `f` and `g`
cff : PolyElement
cofactor of `f`, i.e. `\frac{f}{h}`
cfg : PolyElement
cofactor of `g`, i.e. `\frac{g}{h}`
Examples
========
>>> from sympy.polys.modulargcd import modgcd_multivariate
>>> from sympy.polys import ring, ZZ
>>> R, x, y = ring("x, y", ZZ)
>>> f = x**2 - y**2
>>> g = x**2 + 2*x*y + y**2
>>> h, cff, cfg = modgcd_multivariate(f, g)
>>> h, cff, cfg
(x + y, x - y, x + y)
>>> cff * h == f
True
>>> cfg * h == g
True
>>> R, x, y, z = ring("x, y, z", ZZ)
>>> f = x*z**2 - y*z**2
>>> g = x**2*z + z
>>> h, cff, cfg = modgcd_multivariate(f, g)
>>> h, cff, cfg
(z, x*z - y*z, x**2 + 1)
>>> cff * h == f
True
>>> cfg * h == g
True
References
==========
1. [Monagan00]_
2. [Brown71]_
See also
========
_modgcd_multivariate_p
"""
assert f.ring == g.ring and f.ring.domain.is_ZZ
result = _trivial_gcd(f, g)
if result is not None:
return result
ring = f.ring
k = ring.ngens
# divide out integer content
cf, f = f.primitive()
cg, g = g.primitive()
ch = ring.domain.gcd(cf, cg)
gamma = ring.domain.gcd(f.LC, g.LC)
badprimes = ring.domain.one
for i in range(k):
badprimes *= ring.domain.gcd(_swap(f, i).LC, _swap(g, i).LC)
degbound = [min(fdeg, gdeg) for fdeg, gdeg in zip(f.degrees(), g.degrees())]
contbound = list(degbound)
m = 1
p = 1
while True:
p = nextprime(p)
while badprimes % p == 0:
p = nextprime(p)
fp = f.trunc_ground(p)
gp = g.trunc_ground(p)
try:
# monic GCD of fp, gp in Z_p[x_0, ..., x_{k-2}, y]
hp = _modgcd_multivariate_p(fp, gp, p, degbound, contbound)
except ModularGCDFailed:
m = 1
continue
if hp is None:
continue
hp = hp.mul_ground(gamma).trunc_ground(p)
if m == 1:
m = p
hlastm = hp
continue
hm = _chinese_remainder_reconstruction_multivariate(hp, hlastm, p, m)
m *= p
if not hm == hlastm:
hlastm = hm
continue
h = hm.primitive()[1]
fquo, frem = f.div(h)
gquo, grem = g.div(h)
if not frem and not grem:
if h.LC < 0:
ch = -ch
h = h.mul_ground(ch)
cff = fquo.mul_ground(cf // ch)
cfg = gquo.mul_ground(cg // ch)
return h, cff, cfg
def _gf_div(f, g, p):
r"""
Compute `\frac f g` modulo `p` for two univariate polynomials over
`\mathbb Z_p`.
"""
ring = f.ring
densequo, denserem = gf_div(f.to_dense(), g.to_dense(), p, ring.domain)
return ring.from_dense(densequo), ring.from_dense(denserem)
def _rational_function_reconstruction(c, p, m):
r"""
Reconstruct a rational function `\frac a b` in `\mathbb Z_p(t)` from
.. math::
c = \frac a b \; \mathrm{mod} \, m,
where `c` and `m` are polynomials in `\mathbb Z_p[t]` and `m` has
positive degree.
The algorithm is based on the Euclidean Algorithm. In general, `m` is
not irreducible, so it is possible that `b` is not invertible modulo
`m`. In that case ``None`` is returned.
Parameters
==========
c : PolyElement
univariate polynomial in `\mathbb Z[t]`
p : Integer
prime number
m : PolyElement
modulus, not necessarily irreducible
Returns
=======
frac : FracElement
either `\frac a b` in `\mathbb Z(t)` or ``None``
References
==========
1. [Hoeij04]_
"""
ring = c.ring
domain = ring.domain
M = m.degree()
N = M // 2
D = M - N - 1
r0, s0 = m, ring.zero
r1, s1 = c, ring.one
while r1.degree() > N:
quo = _gf_div(r0, r1, p)[0]
r0, r1 = r1, (r0 - quo*r1).trunc_ground(p)
s0, s1 = s1, (s0 - quo*s1).trunc_ground(p)
a, b = r1, s1
if b.degree() > D or _gf_gcd(b, m, p) != 1:
return None
lc = b.LC
if lc != 1:
lcinv = domain.invert(lc, p)
a = a.mul_ground(lcinv).trunc_ground(p)
b = b.mul_ground(lcinv).trunc_ground(p)
field = ring.to_field()
return field(a) / field(b)
def _rational_reconstruction_func_coeffs(hm, p, m, ring, k):
r"""
Reconstruct every coefficient `c_h` of a polynomial `h` in
`\mathbb Z_p(t_k)[t_1, \ldots, t_{k-1}][x, z]` from the corresponding
coefficient `c_{h_m}` of a polynomial `h_m` in
`\mathbb Z_p[t_1, \ldots, t_k][x, z] \cong \mathbb Z_p[t_k][t_1, \ldots, t_{k-1}][x, z]`
such that
.. math::
c_{h_m} = c_h \; \mathrm{mod} \, m,
where `m \in \mathbb Z_p[t]`.
The reconstruction is based on the Euclidean Algorithm. In general, `m`
is not irreducible, so it is possible that this fails for some
coefficient. In that case ``None`` is returned.
Parameters
==========
hm : PolyElement
polynomial in `\mathbb Z[t_1, \ldots, t_k][x, z]`
p : Integer
prime number, modulus of `\mathbb Z_p`
m : PolyElement
modulus, polynomial in `\mathbb Z[t]`, not necessarily irreducible
ring : PolyRing
`\mathbb Z(t_k)[t_1, \ldots, t_{k-1}][x, z]`, `h` will be an
element of this ring
k : Integer
index of the parameter `t_k` which will be reconstructed
Returns
=======
h : PolyElement
reconstructed polynomial in
`\mathbb Z(t_k)[t_1, \ldots, t_{k-1}][x, z]` or ``None``
See also
========
_rational_function_reconstruction
"""
h = ring.zero
for monom, coeff in hm.iterterms():
if k == 0:
coeffh = _rational_function_reconstruction(coeff, p, m)
if not coeffh:
return None
else:
coeffh = ring.domain.zero
for mon, c in coeff.drop_to_ground(k).iterterms():
ch = _rational_function_reconstruction(c, p, m)
if not ch:
return None
coeffh[mon] = ch
h[monom] = coeffh
return h
def _gf_gcdex(f, g, p):
r"""
Extended Euclidean Algorithm for two univariate polynomials over
`\mathbb Z_p`.
Returns polynomials `s, t` and `h`, such that `h` is the GCD of `f` and
`g` and `sf + tg = h \; \mathrm{mod} \, p`.
"""
ring = f.ring
s, t, h = gf_gcdex(f.to_dense(), g.to_dense(), p, ring.domain)
return ring.from_dense(s), ring.from_dense(t), ring.from_dense(h)
def _trunc(f, minpoly, p):
r"""
Compute the reduced representation of a polynomial `f` in
`\mathbb Z_p[z] / (\check m_{\alpha}(z))[x]`
Parameters
==========
f : PolyElement
polynomial in `\mathbb Z[x, z]`
minpoly : PolyElement
polynomial `\check m_{\alpha} \in \mathbb Z[z]`, not necessarily
irreducible
p : Integer
prime number, modulus of `\mathbb Z_p`
Returns
=======
ftrunc : PolyElement
polynomial in `\mathbb Z[x, z]`, reduced modulo
`\check m_{\alpha}(z)` and `p`
"""
ring = f.ring
minpoly = minpoly.set_ring(ring)
p_ = ring.ground_new(p)
return f.trunc_ground(p).rem([minpoly, p_]).trunc_ground(p)
def _euclidean_algorithm(f, g, minpoly, p):
r"""
Compute the monic GCD of two univariate polynomials in
`\mathbb{Z}_p[z]/(\check m_{\alpha}(z))[x]` with the Euclidean
Algorithm.
In general, `\check m_{\alpha}(z)` is not irreducible, so it is possible
that some leading coefficient is not invertible modulo
`\check m_{\alpha}(z)`. In that case ``None`` is returned.
Parameters
==========
f, g : PolyElement
polynomials in `\mathbb Z[x, z]`
minpoly : PolyElement
polynomial in `\mathbb Z[z]`, not necessarily irreducible
p : Integer
prime number, modulus of `\mathbb Z_p`
Returns
=======
h : PolyElement
GCD of `f` and `g` in `\mathbb Z[z, x]` or ``None``, coefficients
are in `\left[ -\frac{p-1} 2, \frac{p-1} 2 \right]`
"""
ring = f.ring
f = _trunc(f, minpoly, p)
g = _trunc(g, minpoly, p)
while g:
rem = f
deg = g.degree(0) # degree in x
lcinv, _, gcd = _gf_gcdex(ring.dmp_LC(g), minpoly, p)
if not gcd == 1:
return None
while True:
degrem = rem.degree(0) # degree in x
if degrem < deg:
break
quo = (lcinv * ring.dmp_LC(rem)).set_ring(ring)
rem = _trunc(rem - g.mul_monom((degrem - deg, 0))*quo, minpoly, p)
f = g
g = rem
lcfinv = _gf_gcdex(ring.dmp_LC(f), minpoly, p)[0].set_ring(ring)
return _trunc(f * lcfinv, minpoly, p)
def _trial_division(f, h, minpoly, p=None):
r"""
Check if `h` divides `f` in
`\mathbb K[t_1, \ldots, t_k][z]/(m_{\alpha}(z))`, where `\mathbb K` is
either `\mathbb Q` or `\mathbb Z_p`.
This algorithm is based on pseudo division and does not use any
fractions. By default `\mathbb K` is `\mathbb Q`, if a prime number `p`
is given, `\mathbb Z_p` is chosen instead.
Parameters
==========
f, h : PolyElement
polynomials in `\mathbb Z[t_1, \ldots, t_k][x, z]`
minpoly : PolyElement
polynomial `m_{\alpha}(z)` in `\mathbb Z[t_1, \ldots, t_k][z]`
p : Integer or None
if `p` is given, `\mathbb K` is set to `\mathbb Z_p` instead of
`\mathbb Q`, default is ``None``
Returns
=======
rem : PolyElement
remainder of `\frac f h`
References
==========
.. [1] [Hoeij02]_
"""
ring = f.ring
zxring = ring.clone(symbols=(ring.symbols[1], ring.symbols[0]))
minpoly = minpoly.set_ring(ring)
rem = f
degrem = rem.degree()
degh = h.degree()
degm = minpoly.degree(1)
lch = _LC(h).set_ring(ring)
lcm = minpoly.LC
while rem and degrem >= degh:
# polynomial in Z[t_1, ..., t_k][z]
lcrem = _LC(rem).set_ring(ring)
rem = rem*lch - h.mul_monom((degrem - degh, 0))*lcrem
if p:
rem = rem.trunc_ground(p)
degrem = rem.degree(1)
while rem and degrem >= degm:
# polynomial in Z[t_1, ..., t_k][x]
lcrem = _LC(rem.set_ring(zxring)).set_ring(ring)
rem = rem.mul_ground(lcm) - minpoly.mul_monom((0, degrem - degm))*lcrem
if p:
rem = rem.trunc_ground(p)
degrem = rem.degree(1)
degrem = rem.degree()
return rem
def _evaluate_ground(f, i, a):
r"""
Evaluate a polynomial `f` at `a` in the `i`-th variable of the ground
domain.
"""
ring = f.ring.clone(domain=f.ring.domain.ring.drop(i))
fa = ring.zero
for monom, coeff in f.iterterms():
fa[monom] = coeff.evaluate(i, a)
return fa
def _func_field_modgcd_p(f, g, minpoly, p):
r"""
Compute the GCD of two polynomials `f` and `g` in
`\mathbb Z_p(t_1, \ldots, t_k)[z]/(\check m_\alpha(z))[x]`.
The algorithm reduces the problem step by step by evaluating the
polynomials `f` and `g` at `t_k = a` for suitable `a \in \mathbb Z_p`
and then calls itself recursively to compute the GCD in
`\mathbb Z_p(t_1, \ldots, t_{k-1})[z]/(\check m_\alpha(z))[x]`. If these
recursive calls are successful, the GCD over `k` variables is
interpolated, otherwise the algorithm returns ``None``. After
interpolation, Rational Function Reconstruction is used to obtain the
correct coefficients. If this fails, a new evaluation point has to be
chosen, otherwise the desired polynomial is obtained by clearing
denominators. The result is verified with a fraction free trial
division.
Parameters
==========
f, g : PolyElement
polynomials in `\mathbb Z[t_1, \ldots, t_k][x, z]`
minpoly : PolyElement
polynomial in `\mathbb Z[t_1, \ldots, t_k][z]`, not necessarily
irreducible
p : Integer
prime number, modulus of `\mathbb Z_p`
Returns
=======
h : PolyElement
primitive associate in `\mathbb Z[t_1, \ldots, t_k][x, z]` of the
GCD of the polynomials `f` and `g` or ``None``, coefficients are
in `\left[ -\frac{p-1} 2, \frac{p-1} 2 \right]`
References
==========
1. [Hoeij04]_
"""
ring = f.ring
domain = ring.domain # Z[t_1, ..., t_k]
if isinstance(domain, PolynomialRing):
k = domain.ngens
else:
return _euclidean_algorithm(f, g, minpoly, p)
if k == 1:
qdomain = domain.ring.to_field()
else:
qdomain = domain.ring.drop_to_ground(k - 1)
qdomain = qdomain.clone(domain=qdomain.domain.ring.to_field())
qring = ring.clone(domain=qdomain) # = Z(t_k)[t_1, ..., t_{k-1}][x, z]
n = 1
d = 1
# polynomial in Z_p[t_1, ..., t_k][z]
gamma = ring.dmp_LC(f) * ring.dmp_LC(g)
# polynomial in Z_p[t_1, ..., t_k]
delta = minpoly.LC
evalpoints = []
heval = []
LMlist = []
points = set(range(p))
while points:
a = random.sample(points, 1)[0]
points.remove(a)
if k == 1:
test = delta.evaluate(k-1, a) % p == 0
else:
test = delta.evaluate(k-1, a).trunc_ground(p) == 0
if test:
continue
gammaa = _evaluate_ground(gamma, k-1, a)
minpolya = _evaluate_ground(minpoly, k-1, a)
if gammaa.rem([minpolya, gammaa.ring(p)]) == 0:
continue
fa = _evaluate_ground(f, k-1, a)
ga = _evaluate_ground(g, k-1, a)
# polynomial in Z_p[x, t_1, ..., t_{k-1}, z]/(minpoly)
ha = _func_field_modgcd_p(fa, ga, minpolya, p)
if ha is None:
d += 1
if d > n:
return None
continue
if ha == 1:
return ha
LM = [ha.degree()] + [0]*(k-1)
if k > 1:
for monom, coeff in ha.iterterms():
if monom[0] == LM[0] and coeff.LM > tuple(LM[1:]):
LM[1:] = coeff.LM
evalpoints_a = [a]
heval_a = [ha]
if k == 1:
m = qring.domain.get_ring().one
else:
m = qring.domain.domain.get_ring().one
t = m.ring.gens[0]
for b, hb, LMhb in zip(evalpoints, heval, LMlist):
if LMhb == LM:
evalpoints_a.append(b)
heval_a.append(hb)
m *= (t - b)
m = m.trunc_ground(p)
evalpoints.append(a)
heval.append(ha)
LMlist.append(LM)
n += 1
# polynomial in Z_p[t_1, ..., t_k][x, z]
h = _interpolate_multivariate(evalpoints_a, heval_a, ring, k-1, p, ground=True)
# polynomial in Z_p(t_k)[t_1, ..., t_{k-1}][x, z]
h = _rational_reconstruction_func_coeffs(h, p, m, qring, k-1)
if h is None:
continue
if k == 1:
dom = qring.domain.field
den = dom.ring.one
for coeff in h.itercoeffs():
den = dom.ring.from_dense(gf_lcm(den.to_dense(), coeff.denom.to_dense(),
p, dom.domain))
else:
dom = qring.domain.domain.field
den = dom.ring.one
for coeff in h.itercoeffs():
for c in coeff.itercoeffs():
den = dom.ring.from_dense(gf_lcm(den.to_dense(), c.denom.to_dense(),
p, dom.domain))
den = qring.domain_new(den.trunc_ground(p))
h = ring(h.mul_ground(den).as_expr()).trunc_ground(p)
if not _trial_division(f, h, minpoly, p) and not _trial_division(g, h, minpoly, p):
return h
return None
def _integer_rational_reconstruction(c, m, domain):
r"""
Reconstruct a rational number `\frac a b` from
.. math::
c = \frac a b \; \mathrm{mod} \, m,
where `c` and `m` are integers.
The algorithm is based on the Euclidean Algorithm. In general, `m` is
not a prime number, so it is possible that `b` is not invertible modulo
`m`. In that case ``None`` is returned.
Parameters
==========
c : Integer
`c = \frac a b \; \mathrm{mod} \, m`
m : Integer
modulus, not necessarily prime
domain : IntegerRing
`a, b, c` are elements of ``domain``
Returns
=======
frac : Rational
either `\frac a b` in `\mathbb Q` or ``None``
References
==========
1. [Wang81]_
"""
if c < 0:
c += m
r0, s0 = m, domain.zero
r1, s1 = c, domain.one
bound = sqrt(m / 2) # still correct if replaced by ZZ.sqrt(m // 2) ?
while r1 >= bound:
quo = r0 // r1
r0, r1 = r1, r0 - quo*r1
s0, s1 = s1, s0 - quo*s1
if abs(s1) >= bound:
return None
if s1 < 0:
a, b = -r1, -s1
elif s1 > 0:
a, b = r1, s1
else:
return None
field = domain.get_field()
return field(a) / field(b)
def _rational_reconstruction_int_coeffs(hm, m, ring):
r"""
Reconstruct every rational coefficient `c_h` of a polynomial `h` in
`\mathbb Q[t_1, \ldots, t_k][x, z]` from the corresponding integer
coefficient `c_{h_m}` of a polynomial `h_m` in
`\mathbb Z[t_1, \ldots, t_k][x, z]` such that
.. math::
c_{h_m} = c_h \; \mathrm{mod} \, m,
where `m \in \mathbb Z`.
The reconstruction is based on the Euclidean Algorithm. In general,
`m` is not a prime number, so it is possible that this fails for some
coefficient. In that case ``None`` is returned.
Parameters
==========
hm : PolyElement
polynomial in `\mathbb Z[t_1, \ldots, t_k][x, z]`
m : Integer
modulus, not necessarily prime
ring : PolyRing
`\mathbb Q[t_1, \ldots, t_k][x, z]`, `h` will be an element of this
ring
Returns
=======
h : PolyElement
reconstructed polynomial in `\mathbb Q[t_1, \ldots, t_k][x, z]` or
``None``
See also
========
_integer_rational_reconstruction
"""
h = ring.zero
if isinstance(ring.domain, PolynomialRing):
reconstruction = _rational_reconstruction_int_coeffs
domain = ring.domain.ring
else:
reconstruction = _integer_rational_reconstruction
domain = hm.ring.domain
for monom, coeff in hm.iterterms():
coeffh = reconstruction(coeff, m, domain)
if not coeffh:
return None
h[monom] = coeffh
return h
def _func_field_modgcd_m(f, g, minpoly):
r"""
Compute the GCD of two polynomials in
`\mathbb Q(t_1, \ldots, t_k)[z]/(m_{\alpha}(z))[x]` using a modular
algorithm.
The algorithm computes the GCD of two polynomials `f` and `g` by
calculating the GCD in
`\mathbb Z_p(t_1, \ldots, t_k)[z] / (\check m_{\alpha}(z))[x]` for
suitable primes `p` and the primitive associate `\check m_{\alpha}(z)`
of `m_{\alpha}(z)`. Then the coefficients are reconstructed with the
Chinese Remainder Theorem and Rational Reconstruction. To compute the
GCD over `\mathbb Z_p(t_1, \ldots, t_k)[z] / (\check m_{\alpha})[x]`,
the recursive subroutine ``_func_field_modgcd_p`` is used. To verify the
result in `\mathbb Q(t_1, \ldots, t_k)[z] / (m_{\alpha}(z))[x]`, a
fraction free trial division is used.
Parameters
==========
f, g : PolyElement
polynomials in `\mathbb Z[t_1, \ldots, t_k][x, z]`
minpoly : PolyElement
irreducible polynomial in `\mathbb Z[t_1, \ldots, t_k][z]`
Returns
=======
h : PolyElement
the primitive associate in `\mathbb Z[t_1, \ldots, t_k][x, z]` of
the GCD of `f` and `g`
Examples
========
>>> from sympy.polys.modulargcd import _func_field_modgcd_m
>>> from sympy.polys import ring, ZZ
>>> R, x, z = ring('x, z', ZZ)
>>> minpoly = (z**2 - 2).drop(0)
>>> f = x**2 + 2*x*z + 2
>>> g = x + z
>>> _func_field_modgcd_m(f, g, minpoly)
x + z
>>> D, t = ring('t', ZZ)
>>> R, x, z = ring('x, z', D)
>>> minpoly = (z**2-3).drop(0)
>>> f = x**2 + (t + 1)*x*z + 3*t
>>> g = x*z + 3*t
>>> _func_field_modgcd_m(f, g, minpoly)
x + t*z
References
==========
1. [Hoeij04]_
See also
========
_func_field_modgcd_p
"""
ring = f.ring
domain = ring.domain
if isinstance(domain, PolynomialRing):
k = domain.ngens
QQdomain = domain.ring.clone(domain=domain.domain.get_field())
QQring = ring.clone(domain=QQdomain)
else:
k = 0
QQring = ring.clone(domain=ring.domain.get_field())
cf, f = f.primitive()
cg, g = g.primitive()
# polynomial in Z[t_1, ..., t_k][z]
gamma = ring.dmp_LC(f) * ring.dmp_LC(g)
# polynomial in Z[t_1, ..., t_k]
delta = minpoly.LC
p = 1
primes = []
hplist = []
LMlist = []
while True:
p = nextprime(p)
if gamma.trunc_ground(p) == 0:
continue
if k == 0:
test = (delta % p == 0)
else:
test = (delta.trunc_ground(p) == 0)
if test:
continue
fp = f.trunc_ground(p)
gp = g.trunc_ground(p)
minpolyp = minpoly.trunc_ground(p)
hp = _func_field_modgcd_p(fp, gp, minpolyp, p)
if hp is None:
continue
if hp == 1:
return ring.one
LM = [hp.degree()] + [0]*k
if k > 0:
for monom, coeff in hp.iterterms():
if monom[0] == LM[0] and coeff.LM > tuple(LM[1:]):
LM[1:] = coeff.LM
hm = hp
m = p
for q, hq, LMhq in zip(primes, hplist, LMlist):
if LMhq == LM:
hm = _chinese_remainder_reconstruction_multivariate(hq, hm, q, m)
m *= q
primes.append(p)
hplist.append(hp)
LMlist.append(LM)
hm = _rational_reconstruction_int_coeffs(hm, m, QQring)
if hm is None:
continue
if k == 0:
h = hm.clear_denoms()[1]
else:
den = domain.domain.one
for coeff in hm.itercoeffs():
den = domain.domain.lcm(den, coeff.clear_denoms()[0])
h = hm.mul_ground(den)
# convert back to Z[t_1, ..., t_k][x, z] from Q[t_1, ..., t_k][x, z]
h = h.set_ring(ring)
h = h.primitive()[1]
if not (_trial_division(f.mul_ground(cf), h, minpoly) or
_trial_division(g.mul_ground(cg), h, minpoly)):
return h
def _to_ZZ_poly(f, ring):
r"""
Compute an associate of a polynomial
`f \in \mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]` in
`\mathbb Z[x_1, \ldots, x_{n-1}][z] / (\check m_{\alpha}(z))[x_0]`,
where `\check m_{\alpha}(z) \in \mathbb Z[z]` is the primitive associate
of the minimal polynomial `m_{\alpha}(z)` of `\alpha` over
`\mathbb Q`.
Parameters
==========
f : PolyElement
polynomial in `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]`
ring : PolyRing
`\mathbb Z[x_1, \ldots, x_{n-1}][x_0, z]`
Returns
=======
f_ : PolyElement
associate of `f` in
`\mathbb Z[x_1, \ldots, x_{n-1}][x_0, z]`
"""
f_ = ring.zero
if isinstance(ring.domain, PolynomialRing):
domain = ring.domain.domain
else:
domain = ring.domain
den = domain.one
for coeff in f.itercoeffs():
for c in coeff.rep:
if c:
den = domain.lcm(den, c.denominator)
for monom, coeff in f.iterterms():
coeff = coeff.rep
m = ring.domain.one
if isinstance(ring.domain, PolynomialRing):
m = m.mul_monom(monom[1:])
n = len(coeff)
for i in range(n):
if coeff[i]:
c = domain(coeff[i] * den) * m
if (monom[0], n-i-1) not in f_:
f_[(monom[0], n-i-1)] = c
else:
f_[(monom[0], n-i-1)] += c
return f_
def _to_ANP_poly(f, ring):
r"""
Convert a polynomial
`f \in \mathbb Z[x_1, \ldots, x_{n-1}][z]/(\check m_{\alpha}(z))[x_0]`
to a polynomial in `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]`,
where `\check m_{\alpha}(z) \in \mathbb Z[z]` is the primitive associate
of the minimal polynomial `m_{\alpha}(z)` of `\alpha` over
`\mathbb Q`.
Parameters
==========
f : PolyElement
polynomial in `\mathbb Z[x_1, \ldots, x_{n-1}][x_0, z]`
ring : PolyRing
`\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]`
Returns
=======
f_ : PolyElement
polynomial in `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]`
"""
domain = ring.domain
f_ = ring.zero
if isinstance(f.ring.domain, PolynomialRing):
for monom, coeff in f.iterterms():
for mon, coef in coeff.iterterms():
m = (monom[0],) + mon
c = domain([domain.domain(coef)] + [0]*monom[1])
if m not in f_:
f_[m] = c
else:
f_[m] += c
else:
for monom, coeff in f.iterterms():
m = (monom[0],)
c = domain([domain.domain(coeff)] + [0]*monom[1])
if m not in f_:
f_[m] = c
else:
f_[m] += c
return f_
def _minpoly_from_dense(minpoly, ring):
r"""
Change representation of the minimal polynomial from ``DMP`` to
``PolyElement`` for a given ring.
"""
minpoly_ = ring.zero
for monom, coeff in minpoly.terms():
minpoly_[monom] = ring.domain(coeff)
return minpoly_
def _primitive_in_x0(f):
r"""
Compute the content in `x_0` and the primitive part of a polynomial `f`
in
`\mathbb Q(\alpha)[x_0, x_1, \ldots, x_{n-1}] \cong \mathbb Q(\alpha)[x_1, \ldots, x_{n-1}][x_0]`.
"""
fring = f.ring
ring = fring.drop_to_ground(*range(1, fring.ngens))
dom = ring.domain.ring
f_ = ring(f.as_expr())
cont = dom.zero
for coeff in f_.itercoeffs():
cont = func_field_modgcd(cont, coeff)[0]
if cont == dom.one:
return cont, f
return cont, f.quo(cont.set_ring(fring))
# TODO: add support for algebraic function fields
def func_field_modgcd(f, g):
r"""
Compute the GCD of two polynomials `f` and `g` in
`\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]` using a modular algorithm.
The algorithm first computes the primitive associate
`\check m_{\alpha}(z)` of the minimal polynomial `m_{\alpha}` in
`\mathbb{Z}[z]` and the primitive associates of `f` and `g` in
`\mathbb{Z}[x_1, \ldots, x_{n-1}][z]/(\check m_{\alpha})[x_0]`. Then it
computes the GCD in
`\mathbb Q(x_1, \ldots, x_{n-1})[z]/(m_{\alpha}(z))[x_0]`.
This is done by calculating the GCD in
`\mathbb{Z}_p(x_1, \ldots, x_{n-1})[z]/(\check m_{\alpha}(z))[x_0]` for
suitable primes `p` and then reconstructing the coefficients with the
Chinese Remainder Theorem and Rational Reconstuction. The GCD over
`\mathbb{Z}_p(x_1, \ldots, x_{n-1})[z]/(\check m_{\alpha}(z))[x_0]` is
computed with a recursive subroutine, which evaluates the polynomials at
`x_{n-1} = a` for suitable evaluation points `a \in \mathbb Z_p` and
then calls itself recursively until the ground domain does no longer
contain any parameters. For
`\mathbb{Z}_p[z]/(\check m_{\alpha}(z))[x_0]` the Euclidean Algorithm is
used. The results of those recursive calls are then interpolated and
Rational Function Reconstruction is used to obtain the correct
coefficients. The results, both in
`\mathbb Q(x_1, \ldots, x_{n-1})[z]/(m_{\alpha}(z))[x_0]` and
`\mathbb{Z}_p(x_1, \ldots, x_{n-1})[z]/(\check m_{\alpha}(z))[x_0]`, are
verified by a fraction free trial division.
Apart from the above GCD computation some GCDs in
`\mathbb Q(\alpha)[x_1, \ldots, x_{n-1}]` have to be calculated,
because treating the polynomials as univariate ones can result in
a spurious content of the GCD. For this ``func_field_modgcd`` is
called recursively.
Parameters
==========
f, g : PolyElement
polynomials in `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]`
Returns
=======
h : PolyElement
monic GCD of the polynomials `f` and `g`
cff : PolyElement
cofactor of `f`, i.e. `\frac f h`
cfg : PolyElement
cofactor of `g`, i.e. `\frac g h`
Examples
========
>>> from sympy.polys.modulargcd import func_field_modgcd
>>> from sympy.polys import AlgebraicField, QQ, ring
>>> from sympy import sqrt
>>> A = AlgebraicField(QQ, sqrt(2))
>>> R, x = ring('x', A)
>>> f = x**2 - 2
>>> g = x + sqrt(2)
>>> h, cff, cfg = func_field_modgcd(f, g)
>>> h == x + sqrt(2)
True
>>> cff * h == f
True
>>> cfg * h == g
True
>>> R, x, y = ring('x, y', A)
>>> f = x**2 + 2*sqrt(2)*x*y + 2*y**2
>>> g = x + sqrt(2)*y
>>> h, cff, cfg = func_field_modgcd(f, g)
>>> h == x + sqrt(2)*y
True
>>> cff * h == f
True
>>> cfg * h == g
True
>>> f = x + sqrt(2)*y
>>> g = x + y
>>> h, cff, cfg = func_field_modgcd(f, g)
>>> h == R.one
True
>>> cff * h == f
True
>>> cfg * h == g
True
References
==========
1. [Hoeij04]_
"""
ring = f.ring
domain = ring.domain
n = ring.ngens
assert ring == g.ring and domain.is_Algebraic
result = _trivial_gcd(f, g)
if result is not None:
return result
z = Dummy('z')
ZZring = ring.clone(symbols=ring.symbols + (z,), domain=domain.domain.get_ring())
if n == 1:
f_ = _to_ZZ_poly(f, ZZring)
g_ = _to_ZZ_poly(g, ZZring)
minpoly = ZZring.drop(0).from_dense(domain.mod.rep)
h = _func_field_modgcd_m(f_, g_, minpoly)
h = _to_ANP_poly(h, ring)
else:
# contx0f in Q(a)[x_1, ..., x_{n-1}], f in Q(a)[x_0, ..., x_{n-1}]
contx0f, f = _primitive_in_x0(f)
contx0g, g = _primitive_in_x0(g)
contx0h = func_field_modgcd(contx0f, contx0g)[0]
ZZring_ = ZZring.drop_to_ground(*range(1, n))
f_ = _to_ZZ_poly(f, ZZring_)
g_ = _to_ZZ_poly(g, ZZring_)
minpoly = _minpoly_from_dense(domain.mod, ZZring_.drop(0))
h = _func_field_modgcd_m(f_, g_, minpoly)
h = _to_ANP_poly(h, ring)
contx0h_, h = _primitive_in_x0(h)
h *= contx0h.set_ring(ring)
f *= contx0f.set_ring(ring)
g *= contx0g.set_ring(ring)
h = h.quo_ground(h.LC)
return h, f.quo(h), g.quo(h)
|
859d023461857019763af76aeba102307b1368988e7a95795ad1111163d3bfa9 | """High-level polynomials manipulation functions. """
from __future__ import print_function, division
from sympy.core import S, Basic, Add, Mul, symbols, Dummy
from sympy.polys.polyerrors import (
PolificationFailed, ComputationFailed,
MultivariatePolynomialError, OptionError)
from sympy.polys.polyoptions import allowed_flags
from sympy.polys.polytools import (
poly_from_expr, parallel_poly_from_expr, Poly)
from sympy.polys.specialpolys import (
symmetric_poly, interpolating_poly)
from sympy.utilities import numbered_symbols, take, public
@public
def symmetrize(F, *gens, **args):
"""
Rewrite a polynomial in terms of elementary symmetric polynomials.
A symmetric polynomial is a multivariate polynomial that remains invariant
under any variable permutation, i.e., if ``f = f(x_1, x_2, ..., x_n)``,
then ``f = f(x_{i_1}, x_{i_2}, ..., x_{i_n})``, where
``(i_1, i_2, ..., i_n)`` is a permutation of ``(1, 2, ..., n)`` (an
element of the group ``S_n``).
Returns a tuple of symmetric polynomials ``(f1, f2, ..., fn)`` such that
``f = f1 + f2 + ... + fn``.
Examples
========
>>> from sympy.polys.polyfuncs import symmetrize
>>> from sympy.abc import x, y
>>> symmetrize(x**2 + y**2)
(-2*x*y + (x + y)**2, 0)
>>> symmetrize(x**2 + y**2, formal=True)
(s1**2 - 2*s2, 0, [(s1, x + y), (s2, x*y)])
>>> symmetrize(x**2 - y**2)
(-2*x*y + (x + y)**2, -2*y**2)
>>> symmetrize(x**2 - y**2, formal=True)
(s1**2 - 2*s2, -2*y**2, [(s1, x + y), (s2, x*y)])
"""
allowed_flags(args, ['formal', 'symbols'])
iterable = True
if not hasattr(F, '__iter__'):
iterable = False
F = [F]
try:
F, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed as exc:
result = []
for expr in exc.exprs:
if expr.is_Number:
result.append((expr, S.Zero))
else:
raise ComputationFailed('symmetrize', len(F), exc)
if not iterable:
result, = result
if not exc.opt.formal:
return result
else:
if iterable:
return result, []
else:
return result + ([],)
polys, symbols = [], opt.symbols
gens, dom = opt.gens, opt.domain
for i in range(len(gens)):
poly = symmetric_poly(i + 1, gens, polys=True)
polys.append((next(symbols), poly.set_domain(dom)))
indices = list(range(len(gens) - 1))
weights = list(range(len(gens), 0, -1))
result = []
for f in F:
symmetric = []
if not f.is_homogeneous:
symmetric.append(f.TC())
f -= f.TC().as_poly(f.gens)
while f:
_height, _monom, _coeff = -1, None, None
for i, (monom, coeff) in enumerate(f.terms()):
if all(monom[i] >= monom[i + 1] for i in indices):
height = max([n*m for n, m in zip(weights, monom)])
if height > _height:
_height, _monom, _coeff = height, monom, coeff
if _height != -1:
monom, coeff = _monom, _coeff
else:
break
exponents = []
for m1, m2 in zip(monom, monom[1:] + (0,)):
exponents.append(m1 - m2)
term = [s**n for (s, _), n in zip(polys, exponents)]
poly = [p**n for (_, p), n in zip(polys, exponents)]
symmetric.append(Mul(coeff, *term))
product = poly[0].mul(coeff)
for p in poly[1:]:
product = product.mul(p)
f -= product
result.append((Add(*symmetric), f.as_expr()))
polys = [(s, p.as_expr()) for s, p in polys]
if not opt.formal:
for i, (sym, non_sym) in enumerate(result):
result[i] = (sym.subs(polys), non_sym)
if not iterable:
result, = result
if not opt.formal:
return result
else:
if iterable:
return result, polys
else:
return result + (polys,)
@public
def horner(f, *gens, **args):
"""
Rewrite a polynomial in Horner form.
Among other applications, evaluation of a polynomial at a point is optimal
when it is applied using the Horner scheme ([1]).
Examples
========
>>> from sympy.polys.polyfuncs import horner
>>> from sympy.abc import x, y, a, b, c, d, e
>>> horner(9*x**4 + 8*x**3 + 7*x**2 + 6*x + 5)
x*(x*(x*(9*x + 8) + 7) + 6) + 5
>>> horner(a*x**4 + b*x**3 + c*x**2 + d*x + e)
e + x*(d + x*(c + x*(a*x + b)))
>>> f = 4*x**2*y**2 + 2*x**2*y + 2*x*y**2 + x*y
>>> horner(f, wrt=x)
x*(x*y*(4*y + 2) + y*(2*y + 1))
>>> horner(f, wrt=y)
y*(x*y*(4*x + 2) + x*(2*x + 1))
References
==========
[1] - https://en.wikipedia.org/wiki/Horner_scheme
"""
allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
return exc.expr
form, gen = S.Zero, F.gen
if F.is_univariate:
for coeff in F.all_coeffs():
form = form*gen + coeff
else:
F, gens = Poly(F, gen), gens[1:]
for coeff in F.all_coeffs():
form = form*gen + horner(coeff, *gens, **args)
return form
@public
def interpolate(data, x):
"""
Construct an interpolating polynomial for the data points
evaluated at point x (which can be symbolic or numeric).
Examples
========
>>> from sympy.polys.polyfuncs import interpolate
>>> from sympy.abc import a, b, x
A list is interpreted as though it were paired with a range starting
from 1:
>>> interpolate([1, 4, 9, 16], x)
x**2
This can be made explicit by giving a list of coordinates:
>>> interpolate([(1, 1), (2, 4), (3, 9)], x)
x**2
The (x, y) coordinates can also be given as keys and values of a
dictionary (and the points need not be equispaced):
>>> interpolate([(-1, 2), (1, 2), (2, 5)], x)
x**2 + 1
>>> interpolate({-1: 2, 1: 2, 2: 5}, x)
x**2 + 1
If the interpolation is going to be used only once then the
value of interest can be passed instead of passing a symbol:
>>> interpolate([1, 4, 9], 5)
25
Symbolic coordinates are also supported:
>>> [(i,interpolate((a, b), i)) for i in range(1, 4)]
[(1, a), (2, b), (3, -a + 2*b)]
"""
n = len(data)
if isinstance(data, dict):
if x in data:
return S(data[x])
X, Y = list(zip(*data.items()))
else:
if isinstance(data[0], tuple):
X, Y = list(zip(*data))
if x in X:
return S(Y[X.index(x)])
else:
if x in range(1, n + 1):
return S(data[x - 1])
Y = list(data)
X = list(range(1, n + 1))
try:
return interpolating_poly(n, x, X, Y).expand()
except ValueError:
d = Dummy()
return interpolating_poly(n, d, X, Y).expand().subs(d, x)
@public
def rational_interpolate(data, degnum, X=symbols('x')):
"""
Returns a rational interpolation, where the data points are element of
any integral domain.
The first argument contains the data (as a list of coordinates). The
``degnum`` argument is the degree in the numerator of the rational
function. Setting it too high will decrease the maximal degree in the
denominator for the same amount of data.
Examples
========
>>> from sympy.polys.polyfuncs import rational_interpolate
>>> data = [(1, -210), (2, -35), (3, 105), (4, 231), (5, 350), (6, 465)]
>>> rational_interpolate(data, 2)
(105*x**2 - 525)/(x + 1)
Values do not need to be integers:
>>> from sympy import sympify
>>> x = [1, 2, 3, 4, 5, 6]
>>> y = sympify("[-1, 0, 2, 22/5, 7, 68/7]")
>>> rational_interpolate(zip(x, y), 2)
(3*x**2 - 7*x + 2)/(x + 1)
The symbol for the variable can be changed if needed:
>>> from sympy import symbols
>>> z = symbols('z')
>>> rational_interpolate(data, 2, X=z)
(105*z**2 - 525)/(z + 1)
References
==========
.. [1] Algorithm is adapted from:
http://axiom-wiki.newsynthesis.org/RationalInterpolation
"""
from sympy.matrices.dense import ones
xdata, ydata = list(zip(*data))
k = len(xdata) - degnum - 1
if k < 0:
raise OptionError("Too few values for the required degree.")
c = ones(degnum + k + 1, degnum + k + 2)
for j in range(max(degnum, k)):
for i in range(degnum + k + 1):
c[i, j + 1] = c[i, j]*xdata[i]
for j in range(k + 1):
for i in range(degnum + k + 1):
c[i, degnum + k + 1 - j] = -c[i, k - j]*ydata[i]
r = c.nullspace()[0]
return (sum(r[i] * X**i for i in range(degnum + 1))
/ sum(r[i + degnum + 1] * X**i for i in range(k + 1)))
@public
def viete(f, roots=None, *gens, **args):
"""
Generate Viete's formulas for ``f``.
Examples
========
>>> from sympy.polys.polyfuncs import viete
>>> from sympy import symbols
>>> x, a, b, c, r1, r2 = symbols('x,a:c,r1:3')
>>> viete(a*x**2 + b*x + c, [r1, r2], x)
[(r1 + r2, -b/a), (r1*r2, c/a)]
"""
allowed_flags(args, [])
if isinstance(roots, Basic):
gens, roots = (roots,) + gens, None
try:
f, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('viete', 1, exc)
if f.is_multivariate:
raise MultivariatePolynomialError(
"multivariate polynomials are not allowed")
n = f.degree()
if n < 1:
raise ValueError(
"can't derive Viete's formulas for a constant polynomial")
if roots is None:
roots = numbered_symbols('r', start=1)
roots = take(roots, n)
if n != len(roots):
raise ValueError("required %s roots, got %s" % (n, len(roots)))
lc, coeffs = f.LC(), f.all_coeffs()
result, sign = [], -1
for i, coeff in enumerate(coeffs[1:]):
poly = symmetric_poly(i + 1, roots)
coeff = sign*(coeff/lc)
result.append((poly, coeff))
sign = -sign
return result
|
418c6facbc005d4e511538b950d5fc0109b158215d69e4f3d8de28ed39e5eac4 | """Heuristic polynomial GCD algorithm (HEUGCD). """
from __future__ import print_function, division
from .polyerrors import HeuristicGCDFailed
HEU_GCD_MAX = 6
def heugcd(f, g):
"""
Heuristic polynomial GCD in ``Z[X]``.
Given univariate polynomials ``f`` and ``g`` in ``Z[X]``, returns
their GCD and cofactors, i.e. polynomials ``h``, ``cff`` and ``cfg``
such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
``f`` and ``g`` at certain points and computing (fast) integer GCD
of those evaluations. The polynomial GCD is recovered from the integer
image by interpolation. The evaluation process reduces f and g variable
by variable into a large integer. The final step is to verify if the
interpolated polynomial is the correct GCD. This gives cofactors of
the input polynomials as a side effect.
Examples
========
>>> from sympy.polys.heuristicgcd import heugcd
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> h, cff, cfg = heugcd(f, g)
>>> h, cff, cfg
(x + y, x + y, x)
>>> cff*h == f
True
>>> cfg*h == g
True
References
==========
.. [1] [Liao95]_
"""
assert f.ring == g.ring and f.ring.domain.is_ZZ
ring = f.ring
x0 = ring.gens[0]
domain = ring.domain
gcd, f, g = f.extract_ground(g)
f_norm = f.max_norm()
g_norm = g.max_norm()
B = domain(2*min(f_norm, g_norm) + 29)
x = max(min(B, 99*domain.sqrt(B)),
2*min(f_norm // abs(f.LC),
g_norm // abs(g.LC)) + 4)
for i in range(0, HEU_GCD_MAX):
ff = f.evaluate(x0, x)
gg = g.evaluate(x0, x)
if ff and gg:
if ring.ngens == 1:
h, cff, cfg = domain.cofactors(ff, gg)
else:
h, cff, cfg = heugcd(ff, gg)
h = _gcd_interpolate(h, x, ring)
h = h.primitive()[1]
cff_, r = f.div(h)
if not r:
cfg_, r = g.div(h)
if not r:
h = h.mul_ground(gcd)
return h, cff_, cfg_
cff = _gcd_interpolate(cff, x, ring)
h, r = f.div(cff)
if not r:
cfg_, r = g.div(h)
if not r:
h = h.mul_ground(gcd)
return h, cff, cfg_
cfg = _gcd_interpolate(cfg, x, ring)
h, r = g.div(cfg)
if not r:
cff_, r = f.div(h)
if not r:
h = h.mul_ground(gcd)
return h, cff_, cfg
x = 73794*x * domain.sqrt(domain.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
def _gcd_interpolate(h, x, ring):
"""Interpolate polynomial GCD from integer GCD. """
f, i = ring.zero, 0
# TODO: don't expose poly repr implementation details
if ring.ngens == 1:
while h:
g = h % x
if g > x // 2: g -= x
h = (h - g) // x
# f += X**i*g
if g:
f[(i,)] = g
i += 1
else:
while h:
g = h.trunc_ground(x)
h = (h - g).quo_ground(x)
# f += X**i*g
if g:
for monom, coeff in g.iterterms():
f[(i,) + monom] = coeff
i += 1
if f.LC < 0:
return -f
else:
return f
|
f4cd594976acda095028b1cdef0a0eeaaff632f74eebfab8ec539c5a4ac4680c | """Tools and arithmetics for monomials of distributed polynomials. """
from __future__ import print_function, division
from itertools import combinations_with_replacement, product
from textwrap import dedent
from sympy.core import Mul, S, Tuple, sympify
from sympy.core.compatibility import exec_, iterable
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.polys.polyutils import PicklableWithSlots, dict_from_expr
from sympy.utilities import public
from sympy.core.compatibility import is_sequence
@public
def itermonomials(variables, max_degrees, min_degrees=None):
r"""
`max_degrees` and `min_degrees` are either both integers or both lists.
Unless otherwise specified, `min_degrees` is either 0 or [0,...,0].
A generator of all monomials `monom` is returned, such that
either
min_degree <= total_degree(monom) <= max_degree,
or
min_degrees[i] <= degree_list(monom)[i] <= max_degrees[i], for all i.
Case I:: `max_degrees` and `min_degrees` are both integers.
===========================================================
Given a set of variables `V` and a min_degree `N` and a max_degree `M`
generate a set of monomials of degree less than or equal to `N` and greater
than or equal to `M`. The total number of monomials in commutative
variables is huge and is given by the following formula if `M = 0`:
.. math::
\frac{(\#V + N)!}{\#V! N!}
For example if we would like to generate a dense polynomial of
a total degree `N = 50` and `M = 0`, which is the worst case, in 5
variables, assuming that exponents and all of coefficients are 32-bit long
and stored in an array we would need almost 80 GiB of memory! Fortunately
most polynomials, that we will encounter, are sparse.
Examples
========
Consider monomials in commutative variables `x` and `y`
and non-commutative variables `a` and `b`::
>>> from sympy import symbols
>>> from sympy.polys.monomials import itermonomials
>>> from sympy.polys.orderings import monomial_key
>>> from sympy.abc import x, y
>>> sorted(itermonomials([x, y], 2), key=monomial_key('grlex', [y, x]))
[1, x, y, x**2, x*y, y**2]
>>> sorted(itermonomials([x, y], 3), key=monomial_key('grlex', [y, x]))
[1, x, y, x**2, x*y, y**2, x**3, x**2*y, x*y**2, y**3]
>>> a, b = symbols('a, b', commutative=False)
>>> set(itermonomials([a, b, x], 2))
{1, a, a**2, b, b**2, x, x**2, a*b, b*a, x*a, x*b}
>>> sorted(itermonomials([x, y], 2, 1), key=monomial_key('grlex', [y, x]))
[x, y, x**2, x*y, y**2]
Case II:: `max_degrees` and `min_degrees` are both lists.
=========================================================
If max_degrees = [d_1, ..., d_n] and min_degrees = [e_1, ..., e_n],
the number of monomials generated is:
(d_1 - e_1 + 1) * ... * (d_n - e_n + 1)
Example
=======
Let us generate all monomials `monom` in variables `x`, and `y`
such that [1, 2][i] <= degree_list(monom)[i] <= [2, 4][i], i = 0, 1 ::
>>> from sympy import symbols
>>> from sympy.polys.monomials import itermonomials
>>> from sympy.polys.orderings import monomial_key
>>> from itertools import product
>>> from sympy.core import Mul
>>> from sympy.abc import x, y
>>> sorted(itermonomials([x, y], [2, 4], [1, 2]), reverse=True, key=monomial_key('lex', [x, y]))
[x**2*y**4, x**2*y**3, x**2*y**2, x*y**4, x*y**3, x*y**2]
"""
n = len(variables)
if is_sequence(max_degrees):
if len(max_degrees) != n:
raise ValueError('Argument sizes do not match')
if min_degrees is None:
min_degrees = [0]*n
elif not is_sequence(min_degrees):
raise ValueError('min_degrees is not a list')
else:
if len(min_degrees) != n:
raise ValueError('Argument sizes do not match')
if any(i < 0 for i in min_degrees):
raise ValueError("min_degrees can't contain negative numbers")
total_degree = False
else:
max_degree = max_degrees
if max_degree < 0:
raise ValueError("max_degrees can't be negative")
if min_degrees is None:
min_degree = 0
else:
if min_degrees < 0:
raise ValueError("min_degrees can't be negative")
min_degree = min_degrees
total_degree = True
if total_degree:
if min_degree > max_degree:
return
if not variables or max_degree == 0:
yield S.One
return
# Force to list in case of passed tuple or other incompatible collection
variables = list(variables) + [S.One]
if all(variable.is_commutative for variable in variables):
monomials_list_comm = []
for item in combinations_with_replacement(variables, max_degree):
powers = dict()
for variable in variables:
powers[variable] = 0
for variable in item:
if variable != 1:
powers[variable] += 1
if max(powers.values()) >= min_degree:
monomials_list_comm.append(Mul(*item))
for mon in set(monomials_list_comm):
yield mon
else:
monomials_list_non_comm = []
for item in product(variables, repeat=max_degree):
powers = dict()
for variable in variables:
powers[variable] = 0
for variable in item:
if variable != 1:
powers[variable] += 1
if max(powers.values()) >= min_degree:
monomials_list_non_comm.append(Mul(*item))
for mon in set(monomials_list_non_comm):
yield mon
else:
if any(min_degrees[i] > max_degrees[i] for i in range(n)):
raise ValueError('min_degrees[i] must be <= max_degrees[i] for all i')
power_lists = []
for var, min_d, max_d in zip(variables, min_degrees, max_degrees):
power_lists.append([var**i for i in range(min_d, max_d + 1)])
for powers in product(*power_lists):
yield Mul(*powers)
def monomial_count(V, N):
r"""
Computes the number of monomials.
The number of monomials is given by the following formula:
.. math::
\frac{(\#V + N)!}{\#V! N!}
where `N` is a total degree and `V` is a set of variables.
Examples
========
>>> from sympy.polys.monomials import itermonomials, monomial_count
>>> from sympy.polys.orderings import monomial_key
>>> from sympy.abc import x, y
>>> monomial_count(2, 2)
6
>>> M = list(itermonomials([x, y], 2))
>>> sorted(M, key=monomial_key('grlex', [y, x]))
[1, x, y, x**2, x*y, y**2]
>>> len(M)
6
"""
from sympy import factorial
return factorial(V + N) / factorial(V) / factorial(N)
def monomial_mul(A, B):
"""
Multiplication of tuples representing monomials.
Examples
========
Lets multiply `x**3*y**4*z` with `x*y**2`::
>>> from sympy.polys.monomials import monomial_mul
>>> monomial_mul((3, 4, 1), (1, 2, 0))
(4, 6, 1)
which gives `x**4*y**5*z`.
"""
return tuple([ a + b for a, b in zip(A, B) ])
def monomial_div(A, B):
"""
Division of tuples representing monomials.
Examples
========
Lets divide `x**3*y**4*z` by `x*y**2`::
>>> from sympy.polys.monomials import monomial_div
>>> monomial_div((3, 4, 1), (1, 2, 0))
(2, 2, 1)
which gives `x**2*y**2*z`. However::
>>> monomial_div((3, 4, 1), (1, 2, 2)) is None
True
`x*y**2*z**2` does not divide `x**3*y**4*z`.
"""
C = monomial_ldiv(A, B)
if all(c >= 0 for c in C):
return tuple(C)
else:
return None
def monomial_ldiv(A, B):
"""
Division of tuples representing monomials.
Examples
========
Lets divide `x**3*y**4*z` by `x*y**2`::
>>> from sympy.polys.monomials import monomial_ldiv
>>> monomial_ldiv((3, 4, 1), (1, 2, 0))
(2, 2, 1)
which gives `x**2*y**2*z`.
>>> monomial_ldiv((3, 4, 1), (1, 2, 2))
(2, 2, -1)
which gives `x**2*y**2*z**-1`.
"""
return tuple([ a - b for a, b in zip(A, B) ])
def monomial_pow(A, n):
"""Return the n-th pow of the monomial. """
return tuple([ a*n for a in A ])
def monomial_gcd(A, B):
"""
Greatest common divisor of tuples representing monomials.
Examples
========
Lets compute GCD of `x*y**4*z` and `x**3*y**2`::
>>> from sympy.polys.monomials import monomial_gcd
>>> monomial_gcd((1, 4, 1), (3, 2, 0))
(1, 2, 0)
which gives `x*y**2`.
"""
return tuple([ min(a, b) for a, b in zip(A, B) ])
def monomial_lcm(A, B):
"""
Least common multiple of tuples representing monomials.
Examples
========
Lets compute LCM of `x*y**4*z` and `x**3*y**2`::
>>> from sympy.polys.monomials import monomial_lcm
>>> monomial_lcm((1, 4, 1), (3, 2, 0))
(3, 4, 1)
which gives `x**3*y**4*z`.
"""
return tuple([ max(a, b) for a, b in zip(A, B) ])
def monomial_divides(A, B):
"""
Does there exist a monomial X such that XA == B?
Examples
========
>>> from sympy.polys.monomials import monomial_divides
>>> monomial_divides((1, 2), (3, 4))
True
>>> monomial_divides((1, 2), (0, 2))
False
"""
return all(a <= b for a, b in zip(A, B))
def monomial_max(*monoms):
"""
Returns maximal degree for each variable in a set of monomials.
Examples
========
Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`.
We wish to find out what is the maximal degree for each of `x`, `y`
and `z` variables::
>>> from sympy.polys.monomials import monomial_max
>>> monomial_max((3,4,5), (0,5,1), (6,3,9))
(6, 5, 9)
"""
M = list(monoms[0])
for N in monoms[1:]:
for i, n in enumerate(N):
M[i] = max(M[i], n)
return tuple(M)
def monomial_min(*monoms):
"""
Returns minimal degree for each variable in a set of monomials.
Examples
========
Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`.
We wish to find out what is the minimal degree for each of `x`, `y`
and `z` variables::
>>> from sympy.polys.monomials import monomial_min
>>> monomial_min((3,4,5), (0,5,1), (6,3,9))
(0, 3, 1)
"""
M = list(monoms[0])
for N in monoms[1:]:
for i, n in enumerate(N):
M[i] = min(M[i], n)
return tuple(M)
def monomial_deg(M):
"""
Returns the total degree of a monomial.
Examples
========
The total degree of `xy^2` is 3:
>>> from sympy.polys.monomials import monomial_deg
>>> monomial_deg((1, 2))
3
"""
return sum(M)
def term_div(a, b, domain):
"""Division of two terms in over a ring/field. """
a_lm, a_lc = a
b_lm, b_lc = b
monom = monomial_div(a_lm, b_lm)
if domain.is_Field:
if monom is not None:
return monom, domain.quo(a_lc, b_lc)
else:
return None
else:
if not (monom is None or a_lc % b_lc):
return monom, domain.quo(a_lc, b_lc)
else:
return None
class MonomialOps(object):
"""Code generator of fast monomial arithmetic functions. """
def __init__(self, ngens):
self.ngens = ngens
def _build(self, code, name):
ns = {}
exec_(code, ns)
return ns[name]
def _vars(self, name):
return [ "%s%s" % (name, i) for i in range(self.ngens) ]
def mul(self):
name = "monomial_mul"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s + %s" % (a, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB))
return self._build(code, name)
def pow(self):
name = "monomial_pow"
template = dedent("""\
def %(name)s(A, k):
(%(A)s,) = A
return (%(Ak)s,)
""")
A = self._vars("a")
Ak = [ "%s*k" % a for a in A ]
code = template % dict(name=name, A=", ".join(A), Ak=", ".join(Ak))
return self._build(code, name)
def mulpow(self):
name = "monomial_mulpow"
template = dedent("""\
def %(name)s(A, B, k):
(%(A)s,) = A
(%(B)s,) = B
return (%(ABk)s,)
""")
A = self._vars("a")
B = self._vars("b")
ABk = [ "%s + %s*k" % (a, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), ABk=", ".join(ABk))
return self._build(code, name)
def ldiv(self):
name = "monomial_ldiv"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s - %s" % (a, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB))
return self._build(code, name)
def div(self):
name = "monomial_div"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
%(RAB)s
return (%(R)s,)
""")
A = self._vars("a")
B = self._vars("b")
RAB = [ "r%(i)s = a%(i)s - b%(i)s\n if r%(i)s < 0: return None" % dict(i=i) for i in range(self.ngens) ]
R = self._vars("r")
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), RAB="\n ".join(RAB), R=", ".join(R))
return self._build(code, name)
def lcm(self):
name = "monomial_lcm"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s if %s >= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB))
return self._build(code, name)
def gcd(self):
name = "monomial_gcd"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s if %s <= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ]
code = template % dict(name=name, A=", ".join(A), B=", ".join(B), AB=", ".join(AB))
return self._build(code, name)
@public
class Monomial(PicklableWithSlots):
"""Class representing a monomial, i.e. a product of powers. """
__slots__ = ('exponents', 'gens')
def __init__(self, monom, gens=None):
if not iterable(monom):
rep, gens = dict_from_expr(sympify(monom), gens=gens)
if len(rep) == 1 and list(rep.values())[0] == 1:
monom = list(rep.keys())[0]
else:
raise ValueError("Expected a monomial got {}".format(monom))
self.exponents = tuple(map(int, monom))
self.gens = gens
def rebuild(self, exponents, gens=None):
return self.__class__(exponents, gens or self.gens)
def __len__(self):
return len(self.exponents)
def __iter__(self):
return iter(self.exponents)
def __getitem__(self, item):
return self.exponents[item]
def __hash__(self):
return hash((self.__class__.__name__, self.exponents, self.gens))
def __str__(self):
if self.gens:
return "*".join([ "%s**%s" % (gen, exp) for gen, exp in zip(self.gens, self.exponents) ])
else:
return "%s(%s)" % (self.__class__.__name__, self.exponents)
def as_expr(self, *gens):
"""Convert a monomial instance to a SymPy expression. """
gens = gens or self.gens
if not gens:
raise ValueError(
"can't convert %s to an expression without generators" % self)
return Mul(*[ gen**exp for gen, exp in zip(gens, self.exponents) ])
def __eq__(self, other):
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
return False
return self.exponents == exponents
def __ne__(self, other):
return not self == other
def __mul__(self, other):
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
raise NotImplementedError
return self.rebuild(monomial_mul(self.exponents, exponents))
def __div__(self, other):
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
raise NotImplementedError
result = monomial_div(self.exponents, exponents)
if result is not None:
return self.rebuild(result)
else:
raise ExactQuotientFailed(self, Monomial(other))
__floordiv__ = __truediv__ = __div__
def __pow__(self, other):
n = int(other)
if not n:
return self.rebuild([0]*len(self))
elif n > 0:
exponents = self.exponents
for i in range(1, n):
exponents = monomial_mul(exponents, self.exponents)
return self.rebuild(exponents)
else:
raise ValueError("a non-negative integer expected, got %s" % other)
def gcd(self, other):
"""Greatest common divisor of monomials. """
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
raise TypeError(
"an instance of Monomial class expected, got %s" % other)
return self.rebuild(monomial_gcd(self.exponents, exponents))
def lcm(self, other):
"""Least common multiple of monomials. """
if isinstance(other, Monomial):
exponents = other.exponents
elif isinstance(other, (tuple, Tuple)):
exponents = other
else:
raise TypeError(
"an instance of Monomial class expected, got %s" % other)
return self.rebuild(monomial_lcm(self.exponents, exponents))
|
0107c7e27934ea832e17ce77ef9fa840a65d61e5f2c2ea96c324a54615aca318 | """Sparse rational function fields. """
from __future__ import print_function, division
from typing import Any, Dict
from operator import add, mul, lt, le, gt, ge
from sympy.core.compatibility import is_sequence, reduce
from sympy.core.expr import Expr
from sympy.core.mod import Mod
from sympy.core.numbers import Exp1
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import CantSympify, sympify
from sympy.functions.elementary.exponential import ExpBase
from sympy.polys.domains.domainelement import DomainElement
from sympy.polys.domains.fractionfield import FractionField
from sympy.polys.domains.polynomialring import PolynomialRing
from sympy.polys.constructor import construct_domain
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.polyoptions import build_options
from sympy.polys.polyutils import _parallel_dict_from_expr
from sympy.polys.rings import PolyElement
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.magic import pollute
@public
def field(symbols, domain, order=lex):
"""Construct new rational function field returning (field, x1, ..., xn). """
_field = FracField(symbols, domain, order)
return (_field,) + _field.gens
@public
def xfield(symbols, domain, order=lex):
"""Construct new rational function field returning (field, (x1, ..., xn)). """
_field = FracField(symbols, domain, order)
return (_field, _field.gens)
@public
def vfield(symbols, domain, order=lex):
"""Construct new rational function field and inject generators into global namespace. """
_field = FracField(symbols, domain, order)
pollute([ sym.name for sym in _field.symbols ], _field.gens)
return _field
@public
def sfield(exprs, *symbols, **options):
"""Construct a field deriving generators and domain
from options and input expressions.
Parameters
==========
exprs : :class:`Expr` or sequence of :class:`Expr` (sympifiable)
symbols : sequence of :class:`Symbol`/:class:`Expr`
options : keyword arguments understood by :class:`Options`
Examples
========
>>> from sympy.core import symbols
>>> from sympy.functions import exp, log
>>> from sympy.polys.fields import sfield
>>> x = symbols("x")
>>> K, f = sfield((x*log(x) + 4*x**2)*exp(1/x + log(x)/3)/x**2)
>>> K
Rational function field in x, exp(1/x), log(x), x**(1/3) over ZZ with lex order
>>> f
(4*x**2*(exp(1/x)) + x*(exp(1/x))*(log(x)))/((x**(1/3))**5)
"""
single = False
if not is_sequence(exprs):
exprs, single = [exprs], True
exprs = list(map(sympify, exprs))
opt = build_options(symbols, options)
numdens = []
for expr in exprs:
numdens.extend(expr.as_numer_denom())
reps, opt = _parallel_dict_from_expr(numdens, opt)
if opt.domain is None:
# NOTE: this is inefficient because construct_domain() automatically
# performs conversion to the target domain. It shouldn't do this.
coeffs = sum([list(rep.values()) for rep in reps], [])
opt.domain, _ = construct_domain(coeffs, opt=opt)
_field = FracField(opt.gens, opt.domain, opt.order)
fracs = []
for i in range(0, len(reps), 2):
fracs.append(_field(tuple(reps[i:i+2])))
if single:
return (_field, fracs[0])
else:
return (_field, fracs)
_field_cache = {} # type: Dict[Any, Any]
class FracField(DefaultPrinting):
"""Multivariate distributed rational function field. """
def __new__(cls, symbols, domain, order=lex):
from sympy.polys.rings import PolyRing
ring = PolyRing(symbols, domain, order)
symbols = ring.symbols
ngens = ring.ngens
domain = ring.domain
order = ring.order
_hash_tuple = (cls.__name__, symbols, ngens, domain, order)
obj = _field_cache.get(_hash_tuple)
if obj is None:
obj = object.__new__(cls)
obj._hash_tuple = _hash_tuple
obj._hash = hash(_hash_tuple)
obj.ring = ring
obj.dtype = type("FracElement", (FracElement,), {"field": obj})
obj.symbols = symbols
obj.ngens = ngens
obj.domain = domain
obj.order = order
obj.zero = obj.dtype(ring.zero)
obj.one = obj.dtype(ring.one)
obj.gens = obj._gens()
for symbol, generator in zip(obj.symbols, obj.gens):
if isinstance(symbol, Symbol):
name = symbol.name
if not hasattr(obj, name):
setattr(obj, name, generator)
_field_cache[_hash_tuple] = obj
return obj
def _gens(self):
"""Return a list of polynomial generators. """
return tuple([ self.dtype(gen) for gen in self.ring.gens ])
def __getnewargs__(self):
return (self.symbols, self.domain, self.order)
def __hash__(self):
return self._hash
def __eq__(self, other):
return isinstance(other, FracField) and \
(self.symbols, self.ngens, self.domain, self.order) == \
(other.symbols, other.ngens, other.domain, other.order)
def __ne__(self, other):
return not self == other
def raw_new(self, numer, denom=None):
return self.dtype(numer, denom)
def new(self, numer, denom=None):
if denom is None: denom = self.ring.one
numer, denom = numer.cancel(denom)
return self.raw_new(numer, denom)
def domain_new(self, element):
return self.domain.convert(element)
def ground_new(self, element):
try:
return self.new(self.ring.ground_new(element))
except CoercionFailed:
domain = self.domain
if not domain.is_Field and domain.has_assoc_Field:
ring = self.ring
ground_field = domain.get_field()
element = ground_field.convert(element)
numer = ring.ground_new(ground_field.numer(element))
denom = ring.ground_new(ground_field.denom(element))
return self.raw_new(numer, denom)
else:
raise
def field_new(self, element):
if isinstance(element, FracElement):
if self == element.field:
return element
else:
raise NotImplementedError("conversion")
elif isinstance(element, PolyElement):
denom, numer = element.clear_denoms()
numer = numer.set_ring(self.ring)
denom = self.ring.ground_new(denom)
return self.raw_new(numer, denom)
elif isinstance(element, tuple) and len(element) == 2:
numer, denom = list(map(self.ring.ring_new, element))
return self.new(numer, denom)
elif isinstance(element, str):
raise NotImplementedError("parsing")
elif isinstance(element, Expr):
return self.from_expr(element)
else:
return self.ground_new(element)
__call__ = field_new
def _rebuild_expr(self, expr, mapping):
domain = self.domain
powers = tuple((gen, gen.as_base_exp()) for gen in mapping.keys()
if gen.is_Pow or isinstance(gen, ExpBase))
def _rebuild(expr):
generator = mapping.get(expr)
if generator is not None:
return generator
elif expr.is_Add:
return reduce(add, list(map(_rebuild, expr.args)))
elif expr.is_Mul:
return reduce(mul, list(map(_rebuild, expr.args)))
elif expr.is_Pow or isinstance(expr, (ExpBase, Exp1)):
b, e = expr.as_base_exp()
# look for bg**eg whose integer power may be b**e
for gen, (bg, eg) in powers:
if bg == b and Mod(e, eg) == 0:
return mapping.get(gen)**int(e/eg)
if e.is_Integer and e is not S.One:
return _rebuild(b)**int(e)
try:
return domain.convert(expr)
except CoercionFailed:
if not domain.is_Field and domain.has_assoc_Field:
return domain.get_field().convert(expr)
else:
raise
return _rebuild(sympify(expr))
def from_expr(self, expr):
mapping = dict(list(zip(self.symbols, self.gens)))
try:
frac = self._rebuild_expr(expr, mapping)
except CoercionFailed:
raise ValueError("expected an expression convertible to a rational function in %s, got %s" % (self, expr))
else:
return self.field_new(frac)
def to_domain(self):
return FractionField(self)
def to_ring(self):
from sympy.polys.rings import PolyRing
return PolyRing(self.symbols, self.domain, self.order)
class FracElement(DomainElement, DefaultPrinting, CantSympify):
"""Element of multivariate distributed rational function field. """
def __init__(self, numer, denom=None):
if denom is None:
denom = self.field.ring.one
elif not denom:
raise ZeroDivisionError("zero denominator")
self.numer = numer
self.denom = denom
def raw_new(f, numer, denom):
return f.__class__(numer, denom)
def new(f, numer, denom):
return f.raw_new(*numer.cancel(denom))
def to_poly(f):
if f.denom != 1:
raise ValueError("f.denom should be 1")
return f.numer
def parent(self):
return self.field.to_domain()
def __getnewargs__(self):
return (self.field, self.numer, self.denom)
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.field, self.numer, self.denom))
return _hash
def copy(self):
return self.raw_new(self.numer.copy(), self.denom.copy())
def set_field(self, new_field):
if self.field == new_field:
return self
else:
new_ring = new_field.ring
numer = self.numer.set_ring(new_ring)
denom = self.denom.set_ring(new_ring)
return new_field.new(numer, denom)
def as_expr(self, *symbols):
return self.numer.as_expr(*symbols)/self.denom.as_expr(*symbols)
def __eq__(f, g):
if isinstance(g, FracElement) and f.field == g.field:
return f.numer == g.numer and f.denom == g.denom
else:
return f.numer == g and f.denom == f.field.ring.one
def __ne__(f, g):
return not f == g
def __nonzero__(f):
return bool(f.numer)
__bool__ = __nonzero__
def sort_key(self):
return (self.denom.sort_key(), self.numer.sort_key())
def _cmp(f1, f2, op):
if isinstance(f2, f1.field.dtype):
return op(f1.sort_key(), f2.sort_key())
else:
return NotImplemented
def __lt__(f1, f2):
return f1._cmp(f2, lt)
def __le__(f1, f2):
return f1._cmp(f2, le)
def __gt__(f1, f2):
return f1._cmp(f2, gt)
def __ge__(f1, f2):
return f1._cmp(f2, ge)
def __pos__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(f.numer, f.denom)
def __neg__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(-f.numer, f.denom)
def _extract_ground(self, element):
domain = self.field.domain
try:
element = domain.convert(element)
except CoercionFailed:
if not domain.is_Field and domain.has_assoc_Field:
ground_field = domain.get_field()
try:
element = ground_field.convert(element)
except CoercionFailed:
pass
else:
return -1, ground_field.numer(element), ground_field.denom(element)
return 0, None, None
else:
return 1, element, None
def __add__(f, g):
"""Add rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer + g.numer, f.denom)
else:
return f.new(f.numer*g.denom + f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer + f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__radd__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__radd__(f)
return f.__radd__(g)
def __radd__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __sub__(f, g):
"""Subtract rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return -g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer - g.numer, f.denom)
else:
return f.new(f.numer*g.denom - f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer - f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rsub__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rsub__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer - f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom - f.denom*g_numer, f.denom*g_denom)
def __rsub__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(-f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(-f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(-f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __mul__(f, g):
"""Multiply rational functions ``f`` and ``g``. """
field = f.field
if not f or not g:
return field.zero
elif isinstance(g, field.dtype):
return f.new(f.numer*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rmul__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rmul__(f)
return f.__rmul__(g)
def __rmul__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_numer, f.denom*g_denom)
def __truediv__(f, g):
"""Computes quotient of fractions ``f`` and ``g``. """
field = f.field
if not g:
raise ZeroDivisionError
elif isinstance(g, field.dtype):
return f.new(f.numer*g.denom, f.denom*g.numer)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer, f.denom*g)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rtruediv__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rtruediv__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer, f.denom*g_numer)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom, f.denom*g_numer)
__div__ = __truediv__
def __rtruediv__(f, c):
if not f:
raise ZeroDivisionError
elif isinstance(c, f.field.ring.dtype):
return f.new(f.denom*c, f.numer)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.denom*g_numer, f.numer)
elif not op:
return NotImplemented
else:
return f.new(f.denom*g_numer, f.numer*g_denom)
__rdiv__ = __rtruediv__
def __pow__(f, n):
"""Raise ``f`` to a non-negative power ``n``. """
if n >= 0:
return f.raw_new(f.numer**n, f.denom**n)
elif not f:
raise ZeroDivisionError
else:
return f.raw_new(f.denom**-n, f.numer**-n)
def diff(f, x):
"""Computes partial derivative in ``x``.
Examples
========
>>> from sympy.polys.fields import field
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = field("x,y,z", ZZ)
>>> ((x**2 + y)/(z + 1)).diff(x)
2*x/(z + 1)
"""
x = x.to_poly()
return f.new(f.numer.diff(x)*f.denom - f.numer*f.denom.diff(x), f.denom**2)
def __call__(f, *values):
if 0 < len(values) <= f.field.ngens:
return f.evaluate(list(zip(f.field.gens, values)))
else:
raise ValueError("expected at least 1 and at most %s values, got %s" % (f.field.ngens, len(values)))
def evaluate(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.evaluate(x), f.denom.evaluate(x)
else:
x = x.to_poly()
numer, denom = f.numer.evaluate(x, a), f.denom.evaluate(x, a)
field = numer.ring.to_field()
return field.new(numer, denom)
def subs(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.subs(x), f.denom.subs(x)
else:
x = x.to_poly()
numer, denom = f.numer.subs(x, a), f.denom.subs(x, a)
return f.new(numer, denom)
def compose(f, x, a=None):
raise NotImplementedError
|
02b061bc2606a0cc177ee6d93b0eae49533b53a9af92b6bd8891735cc84bc78f | """Low-level linear systems solver. """
from __future__ import print_function, division
from sympy.matrices import MutableDenseMatrix, zeros
class RawMatrix(MutableDenseMatrix):
_sympify = staticmethod(lambda x: x)
def eqs_to_matrix(eqs, ring):
"""Transform from equations to matrix form. """
xs = ring.gens
M = zeros(len(eqs), len(xs)+1, cls=RawMatrix)
for j, e_j in enumerate(eqs):
for i, x_i in enumerate(xs):
M[j, i] = e_j.coeff(x_i)
M[j, -1] = -e_j.coeff(1)
return M
def solve_lin_sys(eqs, ring, _raw=True):
"""Solve a system of linear equations.
If ``_raw`` is False, the keys and values in the returned dictionary
will be of type Expr (and the unit of the field will be removed from
the keys) otherwise the low-level polys types will be returned, e.g.
PolyElement: PythonRational.
"""
as_expr = not _raw
assert ring.domain.is_Field
# transform from equations to matrix form
matrix = eqs_to_matrix(eqs, ring)
# solve by row-reduction
echelon, pivots = matrix.rref(iszerofunc=lambda x: not x, simplify=lambda x: x)
# construct the returnable form of the solutions
keys = ring.symbols if as_expr else ring.gens
if pivots[-1] == len(keys):
return None
if len(pivots) == len(keys):
sol = []
for s in echelon[:, -1]:
a = ring.ground_new(s)
if as_expr:
a = a.as_expr()
sol.append(a)
sols = dict(zip(keys, sol))
else:
sols = {}
g = ring.gens
_g = [[-i] for i in g]
for i, p in enumerate(pivots):
vect = RawMatrix(_g[p + 1:] + [[ring.one]])
v = (echelon[i, p + 1:]*vect)[0]
if as_expr:
v = v.as_expr()
sols[keys[p]] = v
return sols
|
c21463f55f84e1dd7b17ab3e6d2478d753debea7de3f6556e91a521f53a915d2 | """Algorithms for partial fraction decomposition of rational functions. """
from __future__ import print_function, division
from sympy.core import S, Add, sympify, Function, Lambda, Dummy
from sympy.core.basic import preorder_traversal
from sympy.polys import Poly, RootSum, cancel, factor
from sympy.polys.polyerrors import PolynomialError
from sympy.polys.polyoptions import allowed_flags, set_defaults
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.utilities import numbered_symbols, take, xthreaded, public
@xthreaded
@public
def apart(f, x=None, full=False, **options):
"""
Compute partial fraction decomposition of a rational function.
Given a rational function ``f``, computes the partial fraction
decomposition of ``f``. Two algorithms are available: One is based on the
undertermined coefficients method, the other is Bronstein's full partial
fraction decomposition algorithm.
The undetermined coefficients method (selected by ``full=False``) uses
polynomial factorization (and therefore accepts the same options as
factor) for the denominator. Per default it works over the rational
numbers, therefore decomposition of denominators with non-rational roots
(e.g. irrational, complex roots) is not supported by default (see options
of factor).
Bronstein's algorithm can be selected by using ``full=True`` and allows a
decomposition of denominators with non-rational roots. A human-readable
result can be obtained via ``doit()`` (see examples below).
Examples
========
>>> from sympy.polys.partfrac import apart
>>> from sympy.abc import x, y
By default, using the undetermined coefficients method:
>>> apart(y/(x + 2)/(x + 1), x)
-y/(x + 2) + y/(x + 1)
The undetermined coefficients method does not provide a result when the
denominators roots are not rational:
>>> apart(y/(x**2 + x + 1), x)
y/(x**2 + x + 1)
You can choose Bronstein's algorithm by setting ``full=True``:
>>> apart(y/(x**2 + x + 1), x, full=True)
RootSum(_w**2 + _w + 1, Lambda(_a, (-2*_a*y/3 - y/3)/(-_a + x)))
Calling ``doit()`` yields a human-readable result:
>>> apart(y/(x**2 + x + 1), x, full=True).doit()
(-y/3 - 2*y*(-1/2 - sqrt(3)*I/2)/3)/(x + 1/2 + sqrt(3)*I/2) + (-y/3 -
2*y*(-1/2 + sqrt(3)*I/2)/3)/(x + 1/2 - sqrt(3)*I/2)
See Also
========
apart_list, assemble_partfrac_list
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
_options = options.copy()
options = set_defaults(options, extension=True)
try:
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
except PolynomialError as msg:
if f.is_commutative:
raise PolynomialError(msg)
# non-commutative
if f.is_Mul:
c, nc = f.args_cnc(split_1=False)
nc = f.func(*nc)
if c:
c = apart(f.func._from_args(c), x=x, full=full, **_options)
return c*nc
else:
return nc
elif f.is_Add:
c = []
nc = []
for i in f.args:
if i.is_commutative:
c.append(i)
else:
try:
nc.append(apart(i, x=x, full=full, **_options))
except NotImplementedError:
nc.append(i)
return apart(f.func(*c), x=x, full=full, **_options) + f.func(*nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
try:
reps.append((e, apart(e, x=x, full=full, **_options)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
if P.is_multivariate:
fc = f.cancel()
if fc != f:
return apart(fc, x=x, full=full, **_options)
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
if Q.degree() <= 1:
partial = P/Q
else:
if not full:
partial = apart_undetermined_coeffs(P, Q)
else:
partial = apart_full_decomposition(P, Q)
terms = S.Zero
for term in Add.make_args(partial):
if term.has(RootSum):
terms += term
else:
terms += factor(term)
return common*(poly.as_expr() + terms)
def apart_undetermined_coeffs(P, Q):
"""Partial fractions via method of undetermined coefficients. """
X = numbered_symbols(cls=Dummy)
partial, symbols = [], []
_, factors = Q.factor_list()
for f, k in factors:
n, q = f.degree(), Q
for i in range(1, k + 1):
coeffs, q = take(X, n), q.quo(f)
partial.append((coeffs, q, f, i))
symbols.extend(coeffs)
dom = Q.get_domain().inject(*symbols)
F = Poly(0, Q.gen, domain=dom)
for i, (coeffs, q, f, k) in enumerate(partial):
h = Poly(coeffs, Q.gen, domain=dom)
partial[i] = (h, f, k)
q = q.set_domain(dom)
F += h*q
system, result = [], S.Zero
for (k,), coeff in F.terms():
system.append(coeff - P.nth(k))
from sympy.solvers import solve
solution = solve(system, symbols)
for h, f, k in partial:
h = h.as_expr().subs(solution)
result += h/f.as_expr()**k
return result
def apart_full_decomposition(P, Q):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
.. [1] [Bronstein93]_
"""
return assemble_partfrac_list(apart_list(P/Q, P.gens[0]))
@public
def apart_list(f, x=None, dummies=None, **options):
"""
Compute partial fraction decomposition of a rational function
and return the result in structured form.
Given a rational function ``f`` compute the partial fraction decomposition
of ``f``. Only Bronstein's full partial fraction decomposition algorithm
is supported by this method. The return value is highly structured and
perfectly suited for further algorithmic treatment rather than being
human-readable. The function returns a tuple holding three elements:
* The first item is the common coefficient, free of the variable `x` used
for decomposition. (It is an element of the base field `K`.)
* The second item is the polynomial part of the decomposition. This can be
the zero polynomial. (It is an element of `K[x]`.)
* The third part itself is a list of quadruples. Each quadruple
has the following elements in this order:
- The (not necessarily irreducible) polynomial `D` whose roots `w_i` appear
in the linear denominator of a bunch of related fraction terms. (This item
can also be a list of explicit roots. However, at the moment ``apart_list``
never returns a result this way, but the related ``assemble_partfrac_list``
function accepts this format as input.)
- The numerator of the fraction, written as a function of the root `w`
- The linear denominator of the fraction *excluding its power exponent*,
written as a function of the root `w`.
- The power to which the denominator has to be raised.
On can always rebuild a plain expression by using the function ``assemble_partfrac_list``.
Examples
========
A first example:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, t
>>> f = (2*x**3 - 2*x) / (x**2 - 2*x + 1)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(2*x + 4, x, domain='ZZ'),
[(Poly(_w - 1, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
2*x + 4 + 4/(x - 1)
Second example:
>>> f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
>>> pfd = apart_list(f)
>>> pfd
(-1,
Poly(2/3, x, domain='QQ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-2/3 - 2/(x - 2)
Another example, showing symbolic parameters:
>>> pfd = apart_list(t/(x**2 + x + t), x)
>>> pfd
(1,
Poly(0, x, domain='ZZ[t]'),
[(Poly(_w**2 + _w + t, _w, domain='ZZ[t]'),
Lambda(_a, -2*_a*t/(4*t - 1) - t/(4*t - 1)),
Lambda(_a, -_a + x),
1)])
>>> assemble_partfrac_list(pfd)
RootSum(_w**2 + _w + t, Lambda(_a, (-2*_a*t/(4*t - 1) - t/(4*t - 1))/(-_a + x)))
This example is taken from Bronstein's original paper:
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
See also
========
apart, assemble_partfrac_list
References
==========
.. [1] [Bronstein93]_
"""
allowed_flags(options, [])
f = sympify(f)
if f.is_Atom:
return f
else:
P, Q = f.as_numer_denom()
options = set_defaults(options, extension=True)
(P, Q), opt = parallel_poly_from_expr((P, Q), x, **options)
if P.is_multivariate:
raise NotImplementedError(
"multivariate partial fraction decomposition")
common, P, Q = P.cancel(Q)
poly, P = P.div(Q, auto=True)
P, Q = P.rat_clear_denoms(Q)
polypart = poly
if dummies is None:
def dummies(name):
d = Dummy(name)
while True:
yield d
dummies = dummies("w")
rationalpart = apart_list_full_decomposition(P, Q, dummies)
return (common, polypart, rationalpart)
def apart_list_full_decomposition(P, Q, dummygen):
"""
Bronstein's full partial fraction decomposition algorithm.
Given a univariate rational function ``f``, performing only GCD
operations over the algebraic closure of the initial ground domain
of definition, compute full partial fraction decomposition with
fractions having linear denominators.
Note that no factorization of the initial denominator of ``f`` is
performed. The final decomposition is formed in terms of a sum of
:class:`RootSum` instances.
References
==========
.. [1] [Bronstein93]_
"""
f, x, U = P/Q, P.gen, []
u = Function('u')(x)
a = Dummy('a')
partial = []
for d, n in Q.sqf_list_include(all=True):
b = d.as_expr()
U += [ u.diff(x, n - 1) ]
h = cancel(f*b**n) / u**n
H, subs = [h], []
for j in range(1, n):
H += [ H[-1].diff(x) / j ]
for j in range(1, n + 1):
subs += [ (U[j - 1], b.diff(x, j) / j) ]
for j in range(0, n):
P, Q = cancel(H[j]).as_numer_denom()
for i in range(0, j + 1):
P = P.subs(*subs[j - i])
Q = Q.subs(*subs[0])
P = Poly(P, x)
Q = Poly(Q, x)
G = P.gcd(d)
D = d.quo(G)
B, g = Q.half_gcdex(D)
b = (P * B.quo(g)).rem(D)
Dw = D.subs(x, next(dummygen))
numer = Lambda(a, b.as_expr().subs(x, a))
denom = Lambda(a, (x - a))
exponent = n-j
partial.append((Dw, numer, denom, exponent))
return partial
@public
def assemble_partfrac_list(partial_list):
r"""Reassemble a full partial fraction decomposition
from a structured result obtained by the function ``apart_list``.
Examples
========
This example is taken from Bronstein's original paper:
>>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list
>>> from sympy.abc import x, y
>>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
>>> pfd = apart_list(f)
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
>>> assemble_partfrac_list(pfd)
-4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
If we happen to know some roots we can provide them easily inside the structure:
>>> pfd = apart_list(2/(x**2-2))
>>> pfd
(1,
Poly(0, x, domain='ZZ'),
[(Poly(_w**2 - 2, _w, domain='ZZ'),
Lambda(_a, _a/2),
Lambda(_a, -_a + x),
1)])
>>> pfda = assemble_partfrac_list(pfd)
>>> pfda
RootSum(_w**2 - 2, Lambda(_a, _a/(-_a + x)))/2
>>> pfda.doit()
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
>>> from sympy import Dummy, Poly, Lambda, sqrt
>>> a = Dummy("a")
>>> pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)])
>>> assemble_partfrac_list(pfd)
-sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2)))
See Also
========
apart, apart_list
"""
# Common factor
common = partial_list[0]
# Polynomial part
polypart = partial_list[1]
pfd = polypart.as_expr()
# Rational parts
for r, nf, df, ex in partial_list[2]:
if isinstance(r, Poly):
# Assemble in case the roots are given implicitly by a polynomials
an, nu = nf.variables, nf.expr
ad, de = df.variables, df.expr
# Hack to make dummies equal because Lambda created new Dummies
de = de.subs(ad[0], an[0])
func = Lambda(tuple(an), nu/de**ex)
pfd += RootSum(r, func, auto=False, quadratic=False)
else:
# Assemble in case the roots are given explicitly by a list of algebraic numbers
for root in r:
pfd += nf(root)/df(root)**ex
return common*pfd
|
f1e6edf71be79ea41ab2447d080562181f9c46329d1fc324a937d9f84d50524d | """Real and complex root isolation and refinement algorithms. """
from __future__ import print_function, division
from sympy.polys.densearith import (
dup_neg, dup_rshift, dup_rem)
from sympy.polys.densebasic import (
dup_LC, dup_TC, dup_degree,
dup_strip, dup_reverse,
dup_convert,
dup_terms_gcd)
from sympy.polys.densetools import (
dup_clear_denoms,
dup_mirror, dup_scale, dup_shift,
dup_transform,
dup_diff,
dup_eval, dmp_eval_in,
dup_sign_variations,
dup_real_imag)
from sympy.polys.factortools import (
dup_factor_list)
from sympy.polys.polyerrors import (
RefinementFailed,
DomainError)
from sympy.polys.sqfreetools import (
dup_sqf_part, dup_sqf_list)
def dup_sturm(f, K):
"""
Computes the Sturm sequence of ``f`` in ``F[x]``.
Given a univariate, square-free polynomial ``f(x)`` returns the
associated Sturm sequence ``f_0(x), ..., f_n(x)`` defined by::
f_0(x), f_1(x) = f(x), f'(x)
f_n = -rem(f_{n-2}(x), f_{n-1}(x))
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2/9*x + 25/9, -2079/4]
References
==========
.. [1] [Davenport88]_
"""
if not K.is_Field:
raise DomainError("can't compute Sturm sequence over %s" % K)
f = dup_sqf_part(f, K)
sturm = [f, dup_diff(f, 1, K)]
while sturm[-1]:
s = dup_rem(sturm[-2], sturm[-1], K)
sturm.append(dup_neg(s, K))
return sturm[:-1]
def dup_root_upper_bound(f, K):
"""Compute the LMQ upper bound for the positive roots of `f`;
LMQ (Local Max Quadratic) was developed by Akritas-Strzebonski-Vigklas.
References
==========
.. [1] Alkiviadis G. Akritas: "Linear and Quadratic Complexity Bounds on the
Values of the Positive Roots of Polynomials"
Journal of Universal Computer Science, Vol. 15, No. 3, 523-537, 2009.
"""
n, P = len(f), []
t = n * [K.one]
if dup_LC(f, K) < 0:
f = dup_neg(f, K)
f = list(reversed(f))
for i in range(0, n):
if f[i] >= 0:
continue
a, QL = K.log(-f[i], 2), []
for j in range(i + 1, n):
if f[j] <= 0:
continue
q = t[j] + a - K.log(f[j], 2)
QL.append([q // (j - i) , j])
if not QL:
continue
q = min(QL)
t[q[1]] = t[q[1]] + 1
P.append(q[0])
if not P:
return None
else:
return K.get_field()(2)**(max(P) + 1)
def dup_root_lower_bound(f, K):
"""Compute the LMQ lower bound for the positive roots of `f`;
LMQ (Local Max Quadratic) was developed by Akritas-Strzebonski-Vigklas.
References
==========
.. [1] Alkiviadis G. Akritas: "Linear and Quadratic Complexity Bounds on the
Values of the Positive Roots of Polynomials"
Journal of Universal Computer Science, Vol. 15, No. 3, 523-537, 2009.
"""
bound = dup_root_upper_bound(dup_reverse(f), K)
if bound is not None:
return 1/bound
else:
return None
def _mobius_from_interval(I, field):
"""Convert an open interval to a Mobius transform. """
s, t = I
a, c = field.numer(s), field.denom(s)
b, d = field.numer(t), field.denom(t)
return a, b, c, d
def _mobius_to_interval(M, field):
"""Convert a Mobius transform to an open interval. """
a, b, c, d = M
s, t = field(a, c), field(b, d)
if s <= t:
return (s, t)
else:
return (t, s)
def dup_step_refine_real_root(f, M, K, fast=False):
"""One step of positive real root refinement algorithm. """
a, b, c, d = M
if a == b and c == d:
return f, (a, b, c, d)
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_shift(f, A, K)
b, d = A*a + b, A*c + d
if not dup_eval(f, K.zero, K):
return f, (b, b, d, d)
f, g = dup_shift(f, K.one, K), f
a1, b1, c1, d1 = a, a + b, c, c + d
if not dup_eval(f, K.zero, K):
return f, (b1, b1, d1, d1)
k = dup_sign_variations(f, K)
if k == 1:
a, b, c, d = a1, b1, c1, d1
else:
f = dup_shift(dup_reverse(g), K.one, K)
if not dup_eval(f, K.zero, K):
f = dup_rshift(f, 1, K)
a, b, c, d = b, a + b, d, c + d
return f, (a, b, c, d)
def dup_inner_refine_real_root(f, M, K, eps=None, steps=None, disjoint=None, fast=False, mobius=False):
"""Refine a positive root of `f` given a Mobius transform or an interval. """
F = K.get_field()
if len(M) == 2:
a, b, c, d = _mobius_from_interval(M, F)
else:
a, b, c, d = M
while not c:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c,
d), K, fast=fast)
if eps is not None and steps is not None:
for i in range(0, steps):
if abs(F(a, c) - F(b, d)) >= eps:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
else:
break
else:
if eps is not None:
while abs(F(a, c) - F(b, d)) >= eps:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
if steps is not None:
for i in range(0, steps):
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
if disjoint is not None:
while True:
u, v = _mobius_to_interval((a, b, c, d), F)
if v <= disjoint or disjoint <= u:
break
else:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
if not mobius:
return _mobius_to_interval((a, b, c, d), F)
else:
return f, (a, b, c, d)
def dup_outer_refine_real_root(f, s, t, K, eps=None, steps=None, disjoint=None, fast=False):
"""Refine a positive root of `f` given an interval `(s, t)`. """
a, b, c, d = _mobius_from_interval((s, t), K.get_field())
f = dup_transform(f, dup_strip([a, b]),
dup_strip([c, d]), K)
if dup_sign_variations(f, K) != 1:
raise RefinementFailed("there should be exactly one root in (%s, %s) interval" % (s, t))
return dup_inner_refine_real_root(f, (a, b, c, d), K, eps=eps, steps=steps, disjoint=disjoint, fast=fast)
def dup_refine_real_root(f, s, t, K, eps=None, steps=None, disjoint=None, fast=False):
"""Refine real root's approximating interval to the given precision. """
if K.is_QQ:
(_, f), K = dup_clear_denoms(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("real root refinement not supported over %s" % K)
if s == t:
return (s, t)
if s > t:
s, t = t, s
negative = False
if s < 0:
if t <= 0:
f, s, t, negative = dup_mirror(f, K), -t, -s, True
else:
raise ValueError("can't refine a real root in (%s, %s)" % (s, t))
if negative and disjoint is not None:
if disjoint < 0:
disjoint = -disjoint
else:
disjoint = None
s, t = dup_outer_refine_real_root(
f, s, t, K, eps=eps, steps=steps, disjoint=disjoint, fast=fast)
if negative:
return (-t, -s)
else:
return ( s, t)
def dup_inner_isolate_real_roots(f, K, eps=None, fast=False):
"""Internal function for isolation positive roots up to given precision.
References
==========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
a, b, c, d = K.one, K.zero, K.zero, K.one
k = dup_sign_variations(f, K)
if k == 0:
return []
if k == 1:
roots = [dup_inner_refine_real_root(
f, (a, b, c, d), K, eps=eps, fast=fast, mobius=True)]
else:
roots, stack = [], [(a, b, c, d, f, k)]
while stack:
a, b, c, d, f, k = stack.pop()
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_shift(f, A, K)
b, d = A*a + b, A*c + d
if not dup_TC(f, K):
roots.append((f, (b, b, d, d)))
f = dup_rshift(f, 1, K)
k = dup_sign_variations(f, K)
if k == 0:
continue
if k == 1:
roots.append(dup_inner_refine_real_root(
f, (a, b, c, d), K, eps=eps, fast=fast, mobius=True))
continue
f1 = dup_shift(f, K.one, K)
a1, b1, c1, d1, r = a, a + b, c, c + d, 0
if not dup_TC(f1, K):
roots.append((f1, (b1, b1, d1, d1)))
f1, r = dup_rshift(f1, 1, K), 1
k1 = dup_sign_variations(f1, K)
k2 = k - k1 - r
a2, b2, c2, d2 = b, a + b, d, c + d
if k2 > 1:
f2 = dup_shift(dup_reverse(f), K.one, K)
if not dup_TC(f2, K):
f2 = dup_rshift(f2, 1, K)
k2 = dup_sign_variations(f2, K)
else:
f2 = None
if k1 < k2:
a1, a2, b1, b2 = a2, a1, b2, b1
c1, c2, d1, d2 = c2, c1, d2, d1
f1, f2, k1, k2 = f2, f1, k2, k1
if not k1:
continue
if f1 is None:
f1 = dup_shift(dup_reverse(f), K.one, K)
if not dup_TC(f1, K):
f1 = dup_rshift(f1, 1, K)
if k1 == 1:
roots.append(dup_inner_refine_real_root(
f1, (a1, b1, c1, d1), K, eps=eps, fast=fast, mobius=True))
else:
stack.append((a1, b1, c1, d1, f1, k1))
if not k2:
continue
if f2 is None:
f2 = dup_shift(dup_reverse(f), K.one, K)
if not dup_TC(f2, K):
f2 = dup_rshift(f2, 1, K)
if k2 == 1:
roots.append(dup_inner_refine_real_root(
f2, (a2, b2, c2, d2), K, eps=eps, fast=fast, mobius=True))
else:
stack.append((a2, b2, c2, d2, f2, k2))
return roots
def _discard_if_outside_interval(f, M, inf, sup, K, negative, fast, mobius):
"""Discard an isolating interval if outside ``(inf, sup)``. """
F = K.get_field()
while True:
u, v = _mobius_to_interval(M, F)
if negative:
u, v = -v, -u
if (inf is None or u >= inf) and (sup is None or v <= sup):
if not mobius:
return u, v
else:
return f, M
elif (sup is not None and u > sup) or (inf is not None and v < inf):
return None
else:
f, M = dup_step_refine_real_root(f, M, K, fast=fast)
def dup_inner_isolate_positive_roots(f, K, eps=None, inf=None, sup=None, fast=False, mobius=False):
"""Iteratively compute disjoint positive root isolation intervals. """
if sup is not None and sup < 0:
return []
roots = dup_inner_isolate_real_roots(f, K, eps=eps, fast=fast)
F, results = K.get_field(), []
if inf is not None or sup is not None:
for f, M in roots:
result = _discard_if_outside_interval(f, M, inf, sup, K, False, fast, mobius)
if result is not None:
results.append(result)
elif not mobius:
for f, M in roots:
u, v = _mobius_to_interval(M, F)
results.append((u, v))
else:
results = roots
return results
def dup_inner_isolate_negative_roots(f, K, inf=None, sup=None, eps=None, fast=False, mobius=False):
"""Iteratively compute disjoint negative root isolation intervals. """
if inf is not None and inf >= 0:
return []
roots = dup_inner_isolate_real_roots(dup_mirror(f, K), K, eps=eps, fast=fast)
F, results = K.get_field(), []
if inf is not None or sup is not None:
for f, M in roots:
result = _discard_if_outside_interval(f, M, inf, sup, K, True, fast, mobius)
if result is not None:
results.append(result)
elif not mobius:
for f, M in roots:
u, v = _mobius_to_interval(M, F)
results.append((-v, -u))
else:
results = roots
return results
def _isolate_zero(f, K, inf, sup, basis=False, sqf=False):
"""Handle special case of CF algorithm when ``f`` is homogeneous. """
j, f = dup_terms_gcd(f, K)
if j > 0:
F = K.get_field()
if (inf is None or inf <= 0) and (sup is None or 0 <= sup):
if not sqf:
if not basis:
return [((F.zero, F.zero), j)], f
else:
return [((F.zero, F.zero), j, [K.one, K.zero])], f
else:
return [(F.zero, F.zero)], f
return [], f
def dup_isolate_real_roots_sqf(f, K, eps=None, inf=None, sup=None, fast=False, blackbox=False):
"""Isolate real roots of a square-free polynomial using the Vincent-Akritas-Strzebonski (VAS) CF approach.
References
==========
.. [1] Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative
Study of Two Real Root Isolation Methods. Nonlinear Analysis:
Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
.. [2] Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S.
Vigklas: Improving the Performance of the Continued Fractions
Method Using New Bounds of Positive Roots. Nonlinear Analysis:
Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
if K.is_QQ:
(_, f), K = dup_clear_denoms(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
if dup_degree(f) <= 0:
return []
I_zero, f = _isolate_zero(f, K, inf, sup, basis=False, sqf=True)
I_neg = dup_inner_isolate_negative_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
I_pos = dup_inner_isolate_positive_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
roots = sorted(I_neg + I_zero + I_pos)
if not blackbox:
return roots
else:
return [ RealInterval((a, b), f, K) for (a, b) in roots ]
def dup_isolate_real_roots(f, K, eps=None, inf=None, sup=None, basis=False, fast=False):
"""Isolate real roots using Vincent-Akritas-Strzebonski (VAS) continued fractions approach.
References
==========
.. [1] Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative
Study of Two Real Root Isolation Methods. Nonlinear Analysis:
Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
.. [2] Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S.
Vigklas: Improving the Performance of the Continued Fractions
Method Using New Bounds of Positive Roots.
Nonlinear Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
if K.is_QQ:
(_, f), K = dup_clear_denoms(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
if dup_degree(f) <= 0:
return []
I_zero, f = _isolate_zero(f, K, inf, sup, basis=basis, sqf=False)
_, factors = dup_sqf_list(f, K)
if len(factors) == 1:
((f, k),) = factors
I_neg = dup_inner_isolate_negative_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
I_pos = dup_inner_isolate_positive_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
I_neg = [ ((u, v), k) for u, v in I_neg ]
I_pos = [ ((u, v), k) for u, v in I_pos ]
else:
I_neg, I_pos = _real_isolate_and_disjoin(factors, K,
eps=eps, inf=inf, sup=sup, basis=basis, fast=fast)
return sorted(I_neg + I_zero + I_pos)
def dup_isolate_real_roots_list(polys, K, eps=None, inf=None, sup=None, strict=False, basis=False, fast=False):
"""Isolate real roots of a list of square-free polynomial using Vincent-Akritas-Strzebonski (VAS) CF approach.
References
==========
.. [1] Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative
Study of Two Real Root Isolation Methods. Nonlinear Analysis:
Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
.. [2] Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S.
Vigklas: Improving the Performance of the Continued Fractions
Method Using New Bounds of Positive Roots.
Nonlinear Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
if K.is_QQ:
K, F, polys = K.get_ring(), K, polys[:]
for i, p in enumerate(polys):
polys[i] = dup_clear_denoms(p, F, K, convert=True)[1]
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
zeros, factors_dict = False, {}
if (inf is None or inf <= 0) and (sup is None or 0 <= sup):
zeros, zero_indices = True, {}
for i, p in enumerate(polys):
j, p = dup_terms_gcd(p, K)
if zeros and j > 0:
zero_indices[i] = j
for f, k in dup_factor_list(p, K)[1]:
f = tuple(f)
if f not in factors_dict:
factors_dict[f] = {i: k}
else:
factors_dict[f][i] = k
factors_list = []
for f, indices in factors_dict.items():
factors_list.append((list(f), indices))
I_neg, I_pos = _real_isolate_and_disjoin(factors_list, K, eps=eps,
inf=inf, sup=sup, strict=strict, basis=basis, fast=fast)
F = K.get_field()
if not zeros or not zero_indices:
I_zero = []
else:
if not basis:
I_zero = [((F.zero, F.zero), zero_indices)]
else:
I_zero = [((F.zero, F.zero), zero_indices, [K.one, K.zero])]
return sorted(I_neg + I_zero + I_pos)
def _disjoint_p(M, N, strict=False):
"""Check if Mobius transforms define disjoint intervals. """
a1, b1, c1, d1 = M
a2, b2, c2, d2 = N
a1d1, b1c1 = a1*d1, b1*c1
a2d2, b2c2 = a2*d2, b2*c2
if a1d1 == b1c1 and a2d2 == b2c2:
return True
if a1d1 > b1c1:
a1, c1, b1, d1 = b1, d1, a1, c1
if a2d2 > b2c2:
a2, c2, b2, d2 = b2, d2, a2, c2
if not strict:
return a2*d1 >= c2*b1 or b2*c1 <= d2*a1
else:
return a2*d1 > c2*b1 or b2*c1 < d2*a1
def _real_isolate_and_disjoin(factors, K, eps=None, inf=None, sup=None, strict=False, basis=False, fast=False):
"""Isolate real roots of a list of polynomials and disjoin intervals. """
I_pos, I_neg = [], []
for i, (f, k) in enumerate(factors):
for F, M in dup_inner_isolate_positive_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast, mobius=True):
I_pos.append((F, M, k, f))
for G, N in dup_inner_isolate_negative_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast, mobius=True):
I_neg.append((G, N, k, f))
for i, (f, M, k, F) in enumerate(I_pos):
for j, (g, N, m, G) in enumerate(I_pos[i + 1:]):
while not _disjoint_p(M, N, strict=strict):
f, M = dup_inner_refine_real_root(f, M, K, steps=1, fast=fast, mobius=True)
g, N = dup_inner_refine_real_root(g, N, K, steps=1, fast=fast, mobius=True)
I_pos[i + j + 1] = (g, N, m, G)
I_pos[i] = (f, M, k, F)
for i, (f, M, k, F) in enumerate(I_neg):
for j, (g, N, m, G) in enumerate(I_neg[i + 1:]):
while not _disjoint_p(M, N, strict=strict):
f, M = dup_inner_refine_real_root(f, M, K, steps=1, fast=fast, mobius=True)
g, N = dup_inner_refine_real_root(g, N, K, steps=1, fast=fast, mobius=True)
I_neg[i + j + 1] = (g, N, m, G)
I_neg[i] = (f, M, k, F)
if strict:
for i, (f, M, k, F) in enumerate(I_neg):
if not M[0]:
while not M[0]:
f, M = dup_inner_refine_real_root(f, M, K, steps=1, fast=fast, mobius=True)
I_neg[i] = (f, M, k, F)
break
for j, (g, N, m, G) in enumerate(I_pos):
if not N[0]:
while not N[0]:
g, N = dup_inner_refine_real_root(g, N, K, steps=1, fast=fast, mobius=True)
I_pos[j] = (g, N, m, G)
break
field = K.get_field()
I_neg = [ (_mobius_to_interval(M, field), k, f) for (_, M, k, f) in I_neg ]
I_pos = [ (_mobius_to_interval(M, field), k, f) for (_, M, k, f) in I_pos ]
if not basis:
I_neg = [ ((-v, -u), k) for ((u, v), k, _) in I_neg ]
I_pos = [ (( u, v), k) for ((u, v), k, _) in I_pos ]
else:
I_neg = [ ((-v, -u), k, f) for ((u, v), k, f) in I_neg ]
I_pos = [ (( u, v), k, f) for ((u, v), k, f) in I_pos ]
return I_neg, I_pos
def dup_count_real_roots(f, K, inf=None, sup=None):
"""Returns the number of distinct real roots of ``f`` in ``[inf, sup]``. """
if dup_degree(f) <= 0:
return 0
if not K.is_Field:
R, K = K, K.get_field()
f = dup_convert(f, R, K)
sturm = dup_sturm(f, K)
if inf is None:
signs_inf = dup_sign_variations([ dup_LC(s, K)*(-1)**dup_degree(s) for s in sturm ], K)
else:
signs_inf = dup_sign_variations([ dup_eval(s, inf, K) for s in sturm ], K)
if sup is None:
signs_sup = dup_sign_variations([ dup_LC(s, K) for s in sturm ], K)
else:
signs_sup = dup_sign_variations([ dup_eval(s, sup, K) for s in sturm ], K)
count = abs(signs_inf - signs_sup)
if inf is not None and not dup_eval(f, inf, K):
count += 1
return count
OO = 'OO' # Origin of (re, im) coordinate system
Q1 = 'Q1' # Quadrant #1 (++): re > 0 and im > 0
Q2 = 'Q2' # Quadrant #2 (-+): re < 0 and im > 0
Q3 = 'Q3' # Quadrant #3 (--): re < 0 and im < 0
Q4 = 'Q4' # Quadrant #4 (+-): re > 0 and im < 0
A1 = 'A1' # Axis #1 (+0): re > 0 and im = 0
A2 = 'A2' # Axis #2 (0+): re = 0 and im > 0
A3 = 'A3' # Axis #3 (-0): re < 0 and im = 0
A4 = 'A4' # Axis #4 (0-): re = 0 and im < 0
_rules_simple = {
# Q --> Q (same) => no change
(Q1, Q1): 0,
(Q2, Q2): 0,
(Q3, Q3): 0,
(Q4, Q4): 0,
# A -- CCW --> Q => +1/4 (CCW)
(A1, Q1): 1,
(A2, Q2): 1,
(A3, Q3): 1,
(A4, Q4): 1,
# A -- CW --> Q => -1/4 (CCW)
(A1, Q4): 2,
(A2, Q1): 2,
(A3, Q2): 2,
(A4, Q3): 2,
# Q -- CCW --> A => +1/4 (CCW)
(Q1, A2): 3,
(Q2, A3): 3,
(Q3, A4): 3,
(Q4, A1): 3,
# Q -- CW --> A => -1/4 (CCW)
(Q1, A1): 4,
(Q2, A2): 4,
(Q3, A3): 4,
(Q4, A4): 4,
# Q -- CCW --> Q => +1/2 (CCW)
(Q1, Q2): +5,
(Q2, Q3): +5,
(Q3, Q4): +5,
(Q4, Q1): +5,
# Q -- CW --> Q => -1/2 (CW)
(Q1, Q4): -5,
(Q2, Q1): -5,
(Q3, Q2): -5,
(Q4, Q3): -5,
}
_rules_ambiguous = {
# A -- CCW --> Q => { +1/4 (CCW), -9/4 (CW) }
(A1, OO, Q1): -1,
(A2, OO, Q2): -1,
(A3, OO, Q3): -1,
(A4, OO, Q4): -1,
# A -- CW --> Q => { -1/4 (CCW), +7/4 (CW) }
(A1, OO, Q4): -2,
(A2, OO, Q1): -2,
(A3, OO, Q2): -2,
(A4, OO, Q3): -2,
# Q -- CCW --> A => { +1/4 (CCW), -9/4 (CW) }
(Q1, OO, A2): -3,
(Q2, OO, A3): -3,
(Q3, OO, A4): -3,
(Q4, OO, A1): -3,
# Q -- CW --> A => { -1/4 (CCW), +7/4 (CW) }
(Q1, OO, A1): -4,
(Q2, OO, A2): -4,
(Q3, OO, A3): -4,
(Q4, OO, A4): -4,
# A -- OO --> A => { +1 (CCW), -1 (CW) }
(A1, A3): 7,
(A2, A4): 7,
(A3, A1): 7,
(A4, A2): 7,
(A1, OO, A3): 7,
(A2, OO, A4): 7,
(A3, OO, A1): 7,
(A4, OO, A2): 7,
# Q -- DIA --> Q => { +1 (CCW), -1 (CW) }
(Q1, Q3): 8,
(Q2, Q4): 8,
(Q3, Q1): 8,
(Q4, Q2): 8,
(Q1, OO, Q3): 8,
(Q2, OO, Q4): 8,
(Q3, OO, Q1): 8,
(Q4, OO, Q2): 8,
# A --- R ---> A => { +1/2 (CCW), -3/2 (CW) }
(A1, A2): 9,
(A2, A3): 9,
(A3, A4): 9,
(A4, A1): 9,
(A1, OO, A2): 9,
(A2, OO, A3): 9,
(A3, OO, A4): 9,
(A4, OO, A1): 9,
# A --- L ---> A => { +3/2 (CCW), -1/2 (CW) }
(A1, A4): 10,
(A2, A1): 10,
(A3, A2): 10,
(A4, A3): 10,
(A1, OO, A4): 10,
(A2, OO, A1): 10,
(A3, OO, A2): 10,
(A4, OO, A3): 10,
# Q --- 1 ---> A => { +3/4 (CCW), -5/4 (CW) }
(Q1, A3): 11,
(Q2, A4): 11,
(Q3, A1): 11,
(Q4, A2): 11,
(Q1, OO, A3): 11,
(Q2, OO, A4): 11,
(Q3, OO, A1): 11,
(Q4, OO, A2): 11,
# Q --- 2 ---> A => { +5/4 (CCW), -3/4 (CW) }
(Q1, A4): 12,
(Q2, A1): 12,
(Q3, A2): 12,
(Q4, A3): 12,
(Q1, OO, A4): 12,
(Q2, OO, A1): 12,
(Q3, OO, A2): 12,
(Q4, OO, A3): 12,
# A --- 1 ---> Q => { +5/4 (CCW), -3/4 (CW) }
(A1, Q3): 13,
(A2, Q4): 13,
(A3, Q1): 13,
(A4, Q2): 13,
(A1, OO, Q3): 13,
(A2, OO, Q4): 13,
(A3, OO, Q1): 13,
(A4, OO, Q2): 13,
# A --- 2 ---> Q => { +3/4 (CCW), -5/4 (CW) }
(A1, Q2): 14,
(A2, Q3): 14,
(A3, Q4): 14,
(A4, Q1): 14,
(A1, OO, Q2): 14,
(A2, OO, Q3): 14,
(A3, OO, Q4): 14,
(A4, OO, Q1): 14,
# Q --> OO --> Q => { +1/2 (CCW), -3/2 (CW) }
(Q1, OO, Q2): 15,
(Q2, OO, Q3): 15,
(Q3, OO, Q4): 15,
(Q4, OO, Q1): 15,
# Q --> OO --> Q => { +3/2 (CCW), -1/2 (CW) }
(Q1, OO, Q4): 16,
(Q2, OO, Q1): 16,
(Q3, OO, Q2): 16,
(Q4, OO, Q3): 16,
# A --> OO --> A => { +2 (CCW), 0 (CW) }
(A1, OO, A1): 17,
(A2, OO, A2): 17,
(A3, OO, A3): 17,
(A4, OO, A4): 17,
# Q --> OO --> Q => { +2 (CCW), 0 (CW) }
(Q1, OO, Q1): 18,
(Q2, OO, Q2): 18,
(Q3, OO, Q3): 18,
(Q4, OO, Q4): 18,
}
_values = {
0: [( 0, 1)],
1: [(+1, 4)],
2: [(-1, 4)],
3: [(+1, 4)],
4: [(-1, 4)],
-1: [(+9, 4), (+1, 4)],
-2: [(+7, 4), (-1, 4)],
-3: [(+9, 4), (+1, 4)],
-4: [(+7, 4), (-1, 4)],
+5: [(+1, 2)],
-5: [(-1, 2)],
7: [(+1, 1), (-1, 1)],
8: [(+1, 1), (-1, 1)],
9: [(+1, 2), (-3, 2)],
10: [(+3, 2), (-1, 2)],
11: [(+3, 4), (-5, 4)],
12: [(+5, 4), (-3, 4)],
13: [(+5, 4), (-3, 4)],
14: [(+3, 4), (-5, 4)],
15: [(+1, 2), (-3, 2)],
16: [(+3, 2), (-1, 2)],
17: [(+2, 1), ( 0, 1)],
18: [(+2, 1), ( 0, 1)],
}
def _classify_point(re, im):
"""Return the half-axis (or origin) on which (re, im) point is located. """
if not re and not im:
return OO
if not re:
if im > 0:
return A2
else:
return A4
elif not im:
if re > 0:
return A1
else:
return A3
def _intervals_to_quadrants(intervals, f1, f2, s, t, F):
"""Generate a sequence of extended quadrants from a list of critical points. """
if not intervals:
return []
Q = []
if not f1:
(a, b), _, _ = intervals[0]
if a == b == s:
if len(intervals) == 1:
if dup_eval(f2, t, F) > 0:
return [OO, A2]
else:
return [OO, A4]
else:
(a, _), _, _ = intervals[1]
if dup_eval(f2, (s + a)/2, F) > 0:
Q.extend([OO, A2])
f2_sgn = +1
else:
Q.extend([OO, A4])
f2_sgn = -1
intervals = intervals[1:]
else:
if dup_eval(f2, s, F) > 0:
Q.append(A2)
f2_sgn = +1
else:
Q.append(A4)
f2_sgn = -1
for (a, _), indices, _ in intervals:
Q.append(OO)
if indices[1] % 2 == 1:
f2_sgn = -f2_sgn
if a != t:
if f2_sgn > 0:
Q.append(A2)
else:
Q.append(A4)
return Q
if not f2:
(a, b), _, _ = intervals[0]
if a == b == s:
if len(intervals) == 1:
if dup_eval(f1, t, F) > 0:
return [OO, A1]
else:
return [OO, A3]
else:
(a, _), _, _ = intervals[1]
if dup_eval(f1, (s + a)/2, F) > 0:
Q.extend([OO, A1])
f1_sgn = +1
else:
Q.extend([OO, A3])
f1_sgn = -1
intervals = intervals[1:]
else:
if dup_eval(f1, s, F) > 0:
Q.append(A1)
f1_sgn = +1
else:
Q.append(A3)
f1_sgn = -1
for (a, _), indices, _ in intervals:
Q.append(OO)
if indices[0] % 2 == 1:
f1_sgn = -f1_sgn
if a != t:
if f1_sgn > 0:
Q.append(A1)
else:
Q.append(A3)
return Q
re = dup_eval(f1, s, F)
im = dup_eval(f2, s, F)
if not re or not im:
Q.append(_classify_point(re, im))
if len(intervals) == 1:
re = dup_eval(f1, t, F)
im = dup_eval(f2, t, F)
else:
(a, _), _, _ = intervals[1]
re = dup_eval(f1, (s + a)/2, F)
im = dup_eval(f2, (s + a)/2, F)
intervals = intervals[1:]
if re > 0:
f1_sgn = +1
else:
f1_sgn = -1
if im > 0:
f2_sgn = +1
else:
f2_sgn = -1
sgn = {
(+1, +1): Q1,
(-1, +1): Q2,
(-1, -1): Q3,
(+1, -1): Q4,
}
Q.append(sgn[(f1_sgn, f2_sgn)])
for (a, b), indices, _ in intervals:
if a == b:
re = dup_eval(f1, a, F)
im = dup_eval(f2, a, F)
cls = _classify_point(re, im)
if cls is not None:
Q.append(cls)
if 0 in indices:
if indices[0] % 2 == 1:
f1_sgn = -f1_sgn
if 1 in indices:
if indices[1] % 2 == 1:
f2_sgn = -f2_sgn
if not (a == b and b == t):
Q.append(sgn[(f1_sgn, f2_sgn)])
return Q
def _traverse_quadrants(Q_L1, Q_L2, Q_L3, Q_L4, exclude=None):
"""Transform sequences of quadrants to a sequence of rules. """
if exclude is True:
edges = [1, 1, 0, 0]
corners = {
(0, 1): 1,
(1, 2): 1,
(2, 3): 0,
(3, 0): 1,
}
else:
edges = [0, 0, 0, 0]
corners = {
(0, 1): 0,
(1, 2): 0,
(2, 3): 0,
(3, 0): 0,
}
if exclude is not None and exclude is not True:
exclude = set(exclude)
for i, edge in enumerate(['S', 'E', 'N', 'W']):
if edge in exclude:
edges[i] = 1
for i, corner in enumerate(['SW', 'SE', 'NE', 'NW']):
if corner in exclude:
corners[((i - 1) % 4, i)] = 1
QQ, rules = [Q_L1, Q_L2, Q_L3, Q_L4], []
for i, Q in enumerate(QQ):
if not Q:
continue
if Q[-1] == OO:
Q = Q[:-1]
if Q[0] == OO:
j, Q = (i - 1) % 4, Q[1:]
qq = (QQ[j][-2], OO, Q[0])
if qq in _rules_ambiguous:
rules.append((_rules_ambiguous[qq], corners[(j, i)]))
else:
raise NotImplementedError("3 element rule (corner): " + str(qq))
q1, k = Q[0], 1
while k < len(Q):
q2, k = Q[k], k + 1
if q2 != OO:
qq = (q1, q2)
if qq in _rules_simple:
rules.append((_rules_simple[qq], 0))
elif qq in _rules_ambiguous:
rules.append((_rules_ambiguous[qq], edges[i]))
else:
raise NotImplementedError("2 element rule (inside): " + str(qq))
else:
qq, k = (q1, q2, Q[k]), k + 1
if qq in _rules_ambiguous:
rules.append((_rules_ambiguous[qq], edges[i]))
else:
raise NotImplementedError("3 element rule (edge): " + str(qq))
q1 = qq[-1]
return rules
def _reverse_intervals(intervals):
"""Reverse intervals for traversal from right to left and from top to bottom. """
return [ ((b, a), indices, f) for (a, b), indices, f in reversed(intervals) ]
def _winding_number(T, field):
"""Compute the winding number of the input polynomial, i.e. the number of roots. """
return int(sum([ field(*_values[t][i]) for t, i in T ]) / field(2))
def dup_count_complex_roots(f, K, inf=None, sup=None, exclude=None):
"""Count all roots in [u + v*I, s + t*I] rectangle using Collins-Krandick algorithm. """
if not K.is_ZZ and not K.is_QQ:
raise DomainError("complex root counting is not supported over %s" % K)
if K.is_ZZ:
R, F = K, K.get_field()
else:
R, F = K.get_ring(), K
f = dup_convert(f, K, F)
if inf is None or sup is None:
_, lc = dup_degree(f), abs(dup_LC(f, F))
B = 2*max([ F.quo(abs(c), lc) for c in f ])
if inf is None:
(u, v) = (-B, -B)
else:
(u, v) = inf
if sup is None:
(s, t) = (+B, +B)
else:
(s, t) = sup
f1, f2 = dup_real_imag(f, F)
f1L1F = dmp_eval_in(f1, v, 1, 1, F)
f2L1F = dmp_eval_in(f2, v, 1, 1, F)
_, f1L1R = dup_clear_denoms(f1L1F, F, R, convert=True)
_, f2L1R = dup_clear_denoms(f2L1F, F, R, convert=True)
f1L2F = dmp_eval_in(f1, s, 0, 1, F)
f2L2F = dmp_eval_in(f2, s, 0, 1, F)
_, f1L2R = dup_clear_denoms(f1L2F, F, R, convert=True)
_, f2L2R = dup_clear_denoms(f2L2F, F, R, convert=True)
f1L3F = dmp_eval_in(f1, t, 1, 1, F)
f2L3F = dmp_eval_in(f2, t, 1, 1, F)
_, f1L3R = dup_clear_denoms(f1L3F, F, R, convert=True)
_, f2L3R = dup_clear_denoms(f2L3F, F, R, convert=True)
f1L4F = dmp_eval_in(f1, u, 0, 1, F)
f2L4F = dmp_eval_in(f2, u, 0, 1, F)
_, f1L4R = dup_clear_denoms(f1L4F, F, R, convert=True)
_, f2L4R = dup_clear_denoms(f2L4F, F, R, convert=True)
S_L1 = [f1L1R, f2L1R]
S_L2 = [f1L2R, f2L2R]
S_L3 = [f1L3R, f2L3R]
S_L4 = [f1L4R, f2L4R]
I_L1 = dup_isolate_real_roots_list(S_L1, R, inf=u, sup=s, fast=True, basis=True, strict=True)
I_L2 = dup_isolate_real_roots_list(S_L2, R, inf=v, sup=t, fast=True, basis=True, strict=True)
I_L3 = dup_isolate_real_roots_list(S_L3, R, inf=u, sup=s, fast=True, basis=True, strict=True)
I_L4 = dup_isolate_real_roots_list(S_L4, R, inf=v, sup=t, fast=True, basis=True, strict=True)
I_L3 = _reverse_intervals(I_L3)
I_L4 = _reverse_intervals(I_L4)
Q_L1 = _intervals_to_quadrants(I_L1, f1L1F, f2L1F, u, s, F)
Q_L2 = _intervals_to_quadrants(I_L2, f1L2F, f2L2F, v, t, F)
Q_L3 = _intervals_to_quadrants(I_L3, f1L3F, f2L3F, s, u, F)
Q_L4 = _intervals_to_quadrants(I_L4, f1L4F, f2L4F, t, v, F)
T = _traverse_quadrants(Q_L1, Q_L2, Q_L3, Q_L4, exclude=exclude)
return _winding_number(T, F)
def _vertical_bisection(N, a, b, I, Q, F1, F2, f1, f2, F):
"""Vertical bisection step in Collins-Krandick root isolation algorithm. """
(u, v), (s, t) = a, b
I_L1, I_L2, I_L3, I_L4 = I
Q_L1, Q_L2, Q_L3, Q_L4 = Q
f1L1F, f1L2F, f1L3F, f1L4F = F1
f2L1F, f2L2F, f2L3F, f2L4F = F2
x = (u + s) / 2
f1V = dmp_eval_in(f1, x, 0, 1, F)
f2V = dmp_eval_in(f2, x, 0, 1, F)
I_V = dup_isolate_real_roots_list([f1V, f2V], F, inf=v, sup=t, fast=True, strict=True, basis=True)
I_L1_L, I_L1_R = [], []
I_L2_L, I_L2_R = I_V, I_L2
I_L3_L, I_L3_R = [], []
I_L4_L, I_L4_R = I_L4, _reverse_intervals(I_V)
for I in I_L1:
(a, b), indices, h = I
if a == b:
if a == x:
I_L1_L.append(I)
I_L1_R.append(I)
elif a < x:
I_L1_L.append(I)
else:
I_L1_R.append(I)
else:
if b <= x:
I_L1_L.append(I)
elif a >= x:
I_L1_R.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=x, fast=True)
if b <= x:
I_L1_L.append(((a, b), indices, h))
if a >= x:
I_L1_R.append(((a, b), indices, h))
for I in I_L3:
(b, a), indices, h = I
if a == b:
if a == x:
I_L3_L.append(I)
I_L3_R.append(I)
elif a < x:
I_L3_L.append(I)
else:
I_L3_R.append(I)
else:
if b <= x:
I_L3_L.append(I)
elif a >= x:
I_L3_R.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=x, fast=True)
if b <= x:
I_L3_L.append(((b, a), indices, h))
if a >= x:
I_L3_R.append(((b, a), indices, h))
Q_L1_L = _intervals_to_quadrants(I_L1_L, f1L1F, f2L1F, u, x, F)
Q_L2_L = _intervals_to_quadrants(I_L2_L, f1V, f2V, v, t, F)
Q_L3_L = _intervals_to_quadrants(I_L3_L, f1L3F, f2L3F, x, u, F)
Q_L4_L = Q_L4
Q_L1_R = _intervals_to_quadrants(I_L1_R, f1L1F, f2L1F, x, s, F)
Q_L2_R = Q_L2
Q_L3_R = _intervals_to_quadrants(I_L3_R, f1L3F, f2L3F, s, x, F)
Q_L4_R = _intervals_to_quadrants(I_L4_R, f1V, f2V, t, v, F)
T_L = _traverse_quadrants(Q_L1_L, Q_L2_L, Q_L3_L, Q_L4_L, exclude=True)
T_R = _traverse_quadrants(Q_L1_R, Q_L2_R, Q_L3_R, Q_L4_R, exclude=True)
N_L = _winding_number(T_L, F)
N_R = _winding_number(T_R, F)
I_L = (I_L1_L, I_L2_L, I_L3_L, I_L4_L)
Q_L = (Q_L1_L, Q_L2_L, Q_L3_L, Q_L4_L)
I_R = (I_L1_R, I_L2_R, I_L3_R, I_L4_R)
Q_R = (Q_L1_R, Q_L2_R, Q_L3_R, Q_L4_R)
F1_L = (f1L1F, f1V, f1L3F, f1L4F)
F2_L = (f2L1F, f2V, f2L3F, f2L4F)
F1_R = (f1L1F, f1L2F, f1L3F, f1V)
F2_R = (f2L1F, f2L2F, f2L3F, f2V)
a, b = (u, v), (x, t)
c, d = (x, v), (s, t)
D_L = (N_L, a, b, I_L, Q_L, F1_L, F2_L)
D_R = (N_R, c, d, I_R, Q_R, F1_R, F2_R)
return D_L, D_R
def _horizontal_bisection(N, a, b, I, Q, F1, F2, f1, f2, F):
"""Horizontal bisection step in Collins-Krandick root isolation algorithm. """
(u, v), (s, t) = a, b
I_L1, I_L2, I_L3, I_L4 = I
Q_L1, Q_L2, Q_L3, Q_L4 = Q
f1L1F, f1L2F, f1L3F, f1L4F = F1
f2L1F, f2L2F, f2L3F, f2L4F = F2
y = (v + t) / 2
f1H = dmp_eval_in(f1, y, 1, 1, F)
f2H = dmp_eval_in(f2, y, 1, 1, F)
I_H = dup_isolate_real_roots_list([f1H, f2H], F, inf=u, sup=s, fast=True, strict=True, basis=True)
I_L1_B, I_L1_U = I_L1, I_H
I_L2_B, I_L2_U = [], []
I_L3_B, I_L3_U = _reverse_intervals(I_H), I_L3
I_L4_B, I_L4_U = [], []
for I in I_L2:
(a, b), indices, h = I
if a == b:
if a == y:
I_L2_B.append(I)
I_L2_U.append(I)
elif a < y:
I_L2_B.append(I)
else:
I_L2_U.append(I)
else:
if b <= y:
I_L2_B.append(I)
elif a >= y:
I_L2_U.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=y, fast=True)
if b <= y:
I_L2_B.append(((a, b), indices, h))
if a >= y:
I_L2_U.append(((a, b), indices, h))
for I in I_L4:
(b, a), indices, h = I
if a == b:
if a == y:
I_L4_B.append(I)
I_L4_U.append(I)
elif a < y:
I_L4_B.append(I)
else:
I_L4_U.append(I)
else:
if b <= y:
I_L4_B.append(I)
elif a >= y:
I_L4_U.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=y, fast=True)
if b <= y:
I_L4_B.append(((b, a), indices, h))
if a >= y:
I_L4_U.append(((b, a), indices, h))
Q_L1_B = Q_L1
Q_L2_B = _intervals_to_quadrants(I_L2_B, f1L2F, f2L2F, v, y, F)
Q_L3_B = _intervals_to_quadrants(I_L3_B, f1H, f2H, s, u, F)
Q_L4_B = _intervals_to_quadrants(I_L4_B, f1L4F, f2L4F, y, v, F)
Q_L1_U = _intervals_to_quadrants(I_L1_U, f1H, f2H, u, s, F)
Q_L2_U = _intervals_to_quadrants(I_L2_U, f1L2F, f2L2F, y, t, F)
Q_L3_U = Q_L3
Q_L4_U = _intervals_to_quadrants(I_L4_U, f1L4F, f2L4F, t, y, F)
T_B = _traverse_quadrants(Q_L1_B, Q_L2_B, Q_L3_B, Q_L4_B, exclude=True)
T_U = _traverse_quadrants(Q_L1_U, Q_L2_U, Q_L3_U, Q_L4_U, exclude=True)
N_B = _winding_number(T_B, F)
N_U = _winding_number(T_U, F)
I_B = (I_L1_B, I_L2_B, I_L3_B, I_L4_B)
Q_B = (Q_L1_B, Q_L2_B, Q_L3_B, Q_L4_B)
I_U = (I_L1_U, I_L2_U, I_L3_U, I_L4_U)
Q_U = (Q_L1_U, Q_L2_U, Q_L3_U, Q_L4_U)
F1_B = (f1L1F, f1L2F, f1H, f1L4F)
F2_B = (f2L1F, f2L2F, f2H, f2L4F)
F1_U = (f1H, f1L2F, f1L3F, f1L4F)
F2_U = (f2H, f2L2F, f2L3F, f2L4F)
a, b = (u, v), (s, y)
c, d = (u, y), (s, t)
D_B = (N_B, a, b, I_B, Q_B, F1_B, F2_B)
D_U = (N_U, c, d, I_U, Q_U, F1_U, F2_U)
return D_B, D_U
def _depth_first_select(rectangles):
"""Find a rectangle of minimum area for bisection. """
min_area, j = None, None
for i, (_, (u, v), (s, t), _, _, _, _) in enumerate(rectangles):
area = (s - u)*(t - v)
if min_area is None or area < min_area:
min_area, j = area, i
return rectangles.pop(j)
def _rectangle_small_p(a, b, eps):
"""Return ``True`` if the given rectangle is small enough. """
(u, v), (s, t) = a, b
if eps is not None:
return s - u < eps and t - v < eps
else:
return True
def dup_isolate_complex_roots_sqf(f, K, eps=None, inf=None, sup=None, blackbox=False):
"""Isolate complex roots of a square-free polynomial using Collins-Krandick algorithm. """
if not K.is_ZZ and not K.is_QQ:
raise DomainError("isolation of complex roots is not supported over %s" % K)
if dup_degree(f) <= 0:
return []
if K.is_ZZ:
F = K.get_field()
else:
F = K
f = dup_convert(f, K, F)
lc = abs(dup_LC(f, F))
B = 2*max([ F.quo(abs(c), lc) for c in f ])
(u, v), (s, t) = (-B, F.zero), (B, B)
if inf is not None:
u = inf
if sup is not None:
s = sup
if v < 0 or t <= v or s <= u:
raise ValueError("not a valid complex isolation rectangle")
f1, f2 = dup_real_imag(f, F)
f1L1 = dmp_eval_in(f1, v, 1, 1, F)
f2L1 = dmp_eval_in(f2, v, 1, 1, F)
f1L2 = dmp_eval_in(f1, s, 0, 1, F)
f2L2 = dmp_eval_in(f2, s, 0, 1, F)
f1L3 = dmp_eval_in(f1, t, 1, 1, F)
f2L3 = dmp_eval_in(f2, t, 1, 1, F)
f1L4 = dmp_eval_in(f1, u, 0, 1, F)
f2L4 = dmp_eval_in(f2, u, 0, 1, F)
S_L1 = [f1L1, f2L1]
S_L2 = [f1L2, f2L2]
S_L3 = [f1L3, f2L3]
S_L4 = [f1L4, f2L4]
I_L1 = dup_isolate_real_roots_list(S_L1, F, inf=u, sup=s, fast=True, strict=True, basis=True)
I_L2 = dup_isolate_real_roots_list(S_L2, F, inf=v, sup=t, fast=True, strict=True, basis=True)
I_L3 = dup_isolate_real_roots_list(S_L3, F, inf=u, sup=s, fast=True, strict=True, basis=True)
I_L4 = dup_isolate_real_roots_list(S_L4, F, inf=v, sup=t, fast=True, strict=True, basis=True)
I_L3 = _reverse_intervals(I_L3)
I_L4 = _reverse_intervals(I_L4)
Q_L1 = _intervals_to_quadrants(I_L1, f1L1, f2L1, u, s, F)
Q_L2 = _intervals_to_quadrants(I_L2, f1L2, f2L2, v, t, F)
Q_L3 = _intervals_to_quadrants(I_L3, f1L3, f2L3, s, u, F)
Q_L4 = _intervals_to_quadrants(I_L4, f1L4, f2L4, t, v, F)
T = _traverse_quadrants(Q_L1, Q_L2, Q_L3, Q_L4)
N = _winding_number(T, F)
if not N:
return []
I = (I_L1, I_L2, I_L3, I_L4)
Q = (Q_L1, Q_L2, Q_L3, Q_L4)
F1 = (f1L1, f1L2, f1L3, f1L4)
F2 = (f2L1, f2L2, f2L3, f2L4)
rectangles, roots = [(N, (u, v), (s, t), I, Q, F1, F2)], []
while rectangles:
N, (u, v), (s, t), I, Q, F1, F2 = _depth_first_select(rectangles)
if s - u > t - v:
D_L, D_R = _vertical_bisection(N, (u, v), (s, t), I, Q, F1, F2, f1, f2, F)
N_L, a, b, I_L, Q_L, F1_L, F2_L = D_L
N_R, c, d, I_R, Q_R, F1_R, F2_R = D_R
if N_L >= 1:
if N_L == 1 and _rectangle_small_p(a, b, eps):
roots.append(ComplexInterval(a, b, I_L, Q_L, F1_L, F2_L, f1, f2, F))
else:
rectangles.append(D_L)
if N_R >= 1:
if N_R == 1 and _rectangle_small_p(c, d, eps):
roots.append(ComplexInterval(c, d, I_R, Q_R, F1_R, F2_R, f1, f2, F))
else:
rectangles.append(D_R)
else:
D_B, D_U = _horizontal_bisection(N, (u, v), (s, t), I, Q, F1, F2, f1, f2, F)
N_B, a, b, I_B, Q_B, F1_B, F2_B = D_B
N_U, c, d, I_U, Q_U, F1_U, F2_U = D_U
if N_B >= 1:
if N_B == 1 and _rectangle_small_p(a, b, eps):
roots.append(ComplexInterval(
a, b, I_B, Q_B, F1_B, F2_B, f1, f2, F))
else:
rectangles.append(D_B)
if N_U >= 1:
if N_U == 1 and _rectangle_small_p(c, d, eps):
roots.append(ComplexInterval(
c, d, I_U, Q_U, F1_U, F2_U, f1, f2, F))
else:
rectangles.append(D_U)
_roots, roots = sorted(roots, key=lambda r: (r.ax, r.ay)), []
for root in _roots:
roots.extend([root.conjugate(), root])
if blackbox:
return roots
else:
return [ r.as_tuple() for r in roots ]
def dup_isolate_all_roots_sqf(f, K, eps=None, inf=None, sup=None, fast=False, blackbox=False):
"""Isolate real and complex roots of a square-free polynomial ``f``. """
return (
dup_isolate_real_roots_sqf( f, K, eps=eps, inf=inf, sup=sup, fast=fast, blackbox=blackbox),
dup_isolate_complex_roots_sqf(f, K, eps=eps, inf=inf, sup=sup, blackbox=blackbox))
def dup_isolate_all_roots(f, K, eps=None, inf=None, sup=None, fast=False):
"""Isolate real and complex roots of a non-square-free polynomial ``f``. """
if not K.is_ZZ and not K.is_QQ:
raise DomainError("isolation of real and complex roots is not supported over %s" % K)
_, factors = dup_sqf_list(f, K)
if len(factors) == 1:
((f, k),) = factors
real_part, complex_part = dup_isolate_all_roots_sqf(
f, K, eps=eps, inf=inf, sup=sup, fast=fast)
real_part = [ ((a, b), k) for (a, b) in real_part ]
complex_part = [ ((a, b), k) for (a, b) in complex_part ]
return real_part, complex_part
else:
raise NotImplementedError( "only trivial square-free polynomials are supported")
class RealInterval(object):
"""A fully qualified representation of a real isolation interval. """
def __init__(self, data, f, dom):
"""Initialize new real interval with complete information. """
if len(data) == 2:
s, t = data
self.neg = False
if s < 0:
if t <= 0:
f, s, t, self.neg = dup_mirror(f, dom), -t, -s, True
else:
raise ValueError("can't refine a real root in (%s, %s)" % (s, t))
a, b, c, d = _mobius_from_interval((s, t), dom.get_field())
f = dup_transform(f, dup_strip([a, b]),
dup_strip([c, d]), dom)
self.mobius = a, b, c, d
else:
self.mobius = data[:-1]
self.neg = data[-1]
self.f, self.dom = f, dom
@property
def func(self):
return RealInterval
@property
def args(self):
i = self
return (i.mobius + (i.neg,), i.f, i.dom)
def __eq__(self, other):
if type(other) != type(self):
return False
return self.args == other.args
@property
def a(self):
"""Return the position of the left end. """
field = self.dom.get_field()
a, b, c, d = self.mobius
if not self.neg:
if a*d < b*c:
return field(a, c)
return field(b, d)
else:
if a*d > b*c:
return -field(a, c)
return -field(b, d)
@property
def b(self):
"""Return the position of the right end. """
was = self.neg
self.neg = not was
rv = -self.a
self.neg = was
return rv
@property
def dx(self):
"""Return width of the real isolating interval. """
return self.b - self.a
@property
def center(self):
"""Return the center of the real isolating interval. """
return (self.a + self.b)/2
def as_tuple(self):
"""Return tuple representation of real isolating interval. """
return (self.a, self.b)
def __repr__(self):
return "(%s, %s)" % (self.a, self.b)
def is_disjoint(self, other):
"""Return ``True`` if two isolation intervals are disjoint. """
if isinstance(other, RealInterval):
return (self.b <= other.a or other.b <= self.a)
assert isinstance(other, ComplexInterval)
return (self.b <= other.ax or other.bx <= self.a
or other.ay*other.by > 0)
def _inner_refine(self):
"""Internal one step real root refinement procedure. """
if self.mobius is None:
return self
f, mobius = dup_inner_refine_real_root(
self.f, self.mobius, self.dom, steps=1, mobius=True)
return RealInterval(mobius + (self.neg,), f, self.dom)
def refine_disjoint(self, other):
"""Refine an isolating interval until it is disjoint with another one. """
expr = self
while not expr.is_disjoint(other):
expr, other = expr._inner_refine(), other._inner_refine()
return expr, other
def refine_size(self, dx):
"""Refine an isolating interval until it is of sufficiently small size. """
expr = self
while not (expr.dx < dx):
expr = expr._inner_refine()
return expr
def refine_step(self, steps=1):
"""Perform several steps of real root refinement algorithm. """
expr = self
for _ in range(steps):
expr = expr._inner_refine()
return expr
def refine(self):
"""Perform one step of real root refinement algorithm. """
return self._inner_refine()
class ComplexInterval(object):
"""A fully qualified representation of a complex isolation interval.
The printed form is shown as (ax, bx) x (ay, by) where (ax, ay)
and (bx, by) are the coordinates of the southwest and northeast
corners of the interval's rectangle, respectively.
Examples
========
>>> from sympy import CRootOf, Rational, S
>>> from sympy.abc import x
>>> CRootOf.clear_cache() # for doctest reproducibility
>>> root = CRootOf(x**10 - 2*x + 3, 9)
>>> i = root._get_interval(); i
(3/64, 3/32) x (9/8, 75/64)
The real part of the root lies within the range [0, 3/4] while
the imaginary part lies within the range [9/8, 3/2]:
>>> root.n(3)
0.0766 + 1.14*I
The width of the ranges in the x and y directions on the complex
plane are:
>>> i.dx, i.dy
(3/64, 3/64)
The center of the range is
>>> i.center
(9/128, 147/128)
The northeast coordinate of the rectangle bounding the root in the
complex plane is given by attribute b and the x and y components
are accessed by bx and by:
>>> i.b, i.bx, i.by
((3/32, 75/64), 3/32, 75/64)
The southwest coordinate is similarly given by i.a
>>> i.a, i.ax, i.ay
((3/64, 9/8), 3/64, 9/8)
Although the interval prints to show only the real and imaginary
range of the root, all the information of the underlying root
is contained as properties of the interval.
For example, an interval with a nonpositive imaginary range is
considered to be the conjugate. Since the y values of y are in the
range [0, 1/4] it is not the conjugate:
>>> i.conj
False
The conjugate's interval is
>>> ic = i.conjugate(); ic
(3/64, 3/32) x (-75/64, -9/8)
NOTE: the values printed still represent the x and y range
in which the root -- conjugate, in this case -- is located,
but the underlying a and b values of a root and its conjugate
are the same:
>>> assert i.a == ic.a and i.b == ic.b
What changes are the reported coordinates of the bounding rectangle:
>>> (i.ax, i.ay), (i.bx, i.by)
((3/64, 9/8), (3/32, 75/64))
>>> (ic.ax, ic.ay), (ic.bx, ic.by)
((3/64, -75/64), (3/32, -9/8))
The interval can be refined once:
>>> i # for reference, this is the current interval
(3/64, 3/32) x (9/8, 75/64)
>>> i.refine()
(3/64, 3/32) x (9/8, 147/128)
Several refinement steps can be taken:
>>> i.refine_step(2) # 2 steps
(9/128, 3/32) x (9/8, 147/128)
It is also possible to refine to a given tolerance:
>>> tol = min(i.dx, i.dy)/2
>>> i.refine_size(tol)
(9/128, 21/256) x (9/8, 291/256)
A disjoint interval is one whose bounding rectangle does not
overlap with another. An interval, necessarily, is not disjoint with
itself, but any interval is disjoint with a conjugate since the
conjugate rectangle will always be in the lower half of the complex
plane and the non-conjugate in the upper half:
>>> i.is_disjoint(i), i.is_disjoint(i.conjugate())
(False, True)
The following interval j is not disjoint from i:
>>> close = CRootOf(x**10 - 2*x + 300/S(101), 9)
>>> j = close._get_interval(); j
(75/1616, 75/808) x (225/202, 1875/1616)
>>> i.is_disjoint(j)
False
The two can be made disjoint, however:
>>> newi, newj = i.refine_disjoint(j)
>>> newi
(39/512, 159/2048) x (2325/2048, 4653/4096)
>>> newj
(3975/51712, 2025/25856) x (29325/25856, 117375/103424)
Even though the real ranges overlap, the imaginary do not, so
the roots have been resolved as distinct. Intervals are disjoint
when either the real or imaginary component of the intervals is
distinct. In the case above, the real components have not been
resolved (so we don't know, yet, which root has the smaller real
part) but the imaginary part of ``close`` is larger than ``root``:
>>> close.n(3)
0.0771 + 1.13*I
>>> root.n(3)
0.0766 + 1.14*I
"""
def __init__(self, a, b, I, Q, F1, F2, f1, f2, dom, conj=False):
"""Initialize new complex interval with complete information. """
# a and b are the SW and NE corner of the bounding interval,
# (ax, ay) and (bx, by), respectively, for the NON-CONJUGATE
# root (the one with the positive imaginary part); when working
# with the conjugate, the a and b value are still non-negative
# but the ay, by are reversed and have oppositite sign
self.a, self.b = a, b
self.I, self.Q = I, Q
self.f1, self.F1 = f1, F1
self.f2, self.F2 = f2, F2
self.dom = dom
self.conj = conj
@property
def func(self):
return ComplexInterval
@property
def args(self):
i = self
return (i.a, i.b, i.I, i.Q, i.F1, i.F2, i.f1, i.f2, i.dom, i.conj)
def __eq__(self, other):
if type(other) != type(self):
return False
return self.args == other.args
@property
def ax(self):
"""Return ``x`` coordinate of south-western corner. """
return self.a[0]
@property
def ay(self):
"""Return ``y`` coordinate of south-western corner. """
if not self.conj:
return self.a[1]
else:
return -self.b[1]
@property
def bx(self):
"""Return ``x`` coordinate of north-eastern corner. """
return self.b[0]
@property
def by(self):
"""Return ``y`` coordinate of north-eastern corner. """
if not self.conj:
return self.b[1]
else:
return -self.a[1]
@property
def dx(self):
"""Return width of the complex isolating interval. """
return self.b[0] - self.a[0]
@property
def dy(self):
"""Return height of the complex isolating interval. """
return self.b[1] - self.a[1]
@property
def center(self):
"""Return the center of the complex isolating interval. """
return ((self.ax + self.bx)/2, (self.ay + self.by)/2)
def as_tuple(self):
"""Return tuple representation of the complex isolating
interval's SW and NE corners, respectively. """
return ((self.ax, self.ay), (self.bx, self.by))
def __repr__(self):
return "(%s, %s) x (%s, %s)" % (self.ax, self.bx, self.ay, self.by)
def conjugate(self):
"""This complex interval really is located in lower half-plane. """
return ComplexInterval(self.a, self.b, self.I, self.Q,
self.F1, self.F2, self.f1, self.f2, self.dom, conj=True)
def is_disjoint(self, other):
"""Return ``True`` if two isolation intervals are disjoint. """
if isinstance(other, RealInterval):
return other.is_disjoint(self)
if self.conj != other.conj: # above and below real axis
return True
re_distinct = (self.bx <= other.ax or other.bx <= self.ax)
if re_distinct:
return True
im_distinct = (self.by <= other.ay or other.by <= self.ay)
return im_distinct
def _inner_refine(self):
"""Internal one step complex root refinement procedure. """
(u, v), (s, t) = self.a, self.b
I, Q = self.I, self.Q
f1, F1 = self.f1, self.F1
f2, F2 = self.f2, self.F2
dom = self.dom
if s - u > t - v:
D_L, D_R = _vertical_bisection(1, (u, v), (s, t), I, Q, F1, F2, f1, f2, dom)
if D_L[0] == 1:
_, a, b, I, Q, F1, F2 = D_L
else:
_, a, b, I, Q, F1, F2 = D_R
else:
D_B, D_U = _horizontal_bisection(1, (u, v), (s, t), I, Q, F1, F2, f1, f2, dom)
if D_B[0] == 1:
_, a, b, I, Q, F1, F2 = D_B
else:
_, a, b, I, Q, F1, F2 = D_U
return ComplexInterval(a, b, I, Q, F1, F2, f1, f2, dom, self.conj)
def refine_disjoint(self, other):
"""Refine an isolating interval until it is disjoint with another one. """
expr = self
while not expr.is_disjoint(other):
expr, other = expr._inner_refine(), other._inner_refine()
return expr, other
def refine_size(self, dx, dy=None):
"""Refine an isolating interval until it is of sufficiently small size. """
if dy is None:
dy = dx
expr = self
while not (expr.dx < dx and expr.dy < dy):
expr = expr._inner_refine()
return expr
def refine_step(self, steps=1):
"""Perform several steps of complex root refinement algorithm. """
expr = self
for _ in range(steps):
expr = expr._inner_refine()
return expr
def refine(self):
"""Perform one step of complex root refinement algorithm. """
return self._inner_refine()
|
c37e184652a0d88ae242e3e5af1d9f770df0a8eb200fa12350b0c246b2936f9d | """Efficient functions for generating orthogonal polynomials. """
from __future__ import print_function, division
from sympy import Dummy
from sympy.polys.constructor import construct_domain
from sympy.polys.densearith import (
dup_mul, dup_mul_ground, dup_lshift, dup_sub, dup_add
)
from sympy.polys.domains import ZZ, QQ
from sympy.polys.polyclasses import DMP
from sympy.polys.polytools import Poly, PurePoly
from sympy.utilities import public
def dup_jacobi(n, a, b, K):
"""Low-level implementation of Jacobi polynomials. """
seq = [[K.one], [(a + b + K(2))/K(2), (a - b)/K(2)]]
for i in range(2, n + 1):
den = K(i)*(a + b + i)*(a + b + K(2)*i - K(2))
f0 = (a + b + K(2)*i - K.one) * (a*a - b*b) / (K(2)*den)
f1 = (a + b + K(2)*i - K.one) * (a + b + K(2)*i - K(2)) * (a + b + K(2)*i) / (K(2)*den)
f2 = (a + i - K.one)*(b + i - K.one)*(a + b + K(2)*i) / den
p0 = dup_mul_ground(seq[-1], f0, K)
p1 = dup_mul_ground(dup_lshift(seq[-1], 1, K), f1, K)
p2 = dup_mul_ground(seq[-2], f2, K)
seq.append(dup_sub(dup_add(p0, p1, K), p2, K))
return seq[n]
@public
def jacobi_poly(n, a, b, x=None, polys=False):
"""Generates Jacobi polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
a
Lower limit of minimal domain for the list of
coefficients.
b
Upper limit of minimal domain for the list of
coefficients.
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError("can't generate Jacobi polynomial of degree %s" % n)
K, v = construct_domain([a, b], field=True)
poly = DMP(dup_jacobi(int(n), v[0], v[1], K), K)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_gegenbauer(n, a, K):
"""Low-level implementation of Gegenbauer polynomials. """
seq = [[K.one], [K(2)*a, K.zero]]
for i in range(2, n + 1):
f1 = K(2) * (i + a - K.one) / i
f2 = (i + K(2)*a - K(2)) / i
p1 = dup_mul_ground(dup_lshift(seq[-1], 1, K), f1, K)
p2 = dup_mul_ground(seq[-2], f2, K)
seq.append(dup_sub(p1, p2, K))
return seq[n]
def gegenbauer_poly(n, a, x=None, polys=False):
"""Generates Gegenbauer polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
a
Decides minimal domain for the list of
coefficients.
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError(
"can't generate Gegenbauer polynomial of degree %s" % n)
K, a = construct_domain(a, field=True)
poly = DMP(dup_gegenbauer(int(n), a, K), K)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_chebyshevt(n, K):
"""Low-level implementation of Chebyshev polynomials of the 1st kind. """
seq = [[K.one], [K.one, K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(2), K)
seq.append(dup_sub(a, seq[-2], K))
return seq[n]
@public
def chebyshevt_poly(n, x=None, polys=False):
"""Generates Chebyshev polynomial of the first kind of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError(
"can't generate 1st kind Chebyshev polynomial of degree %s" % n)
poly = DMP(dup_chebyshevt(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_chebyshevu(n, K):
"""Low-level implementation of Chebyshev polynomials of the 2nd kind. """
seq = [[K.one], [K(2), K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(2), K)
seq.append(dup_sub(a, seq[-2], K))
return seq[n]
@public
def chebyshevu_poly(n, x=None, polys=False):
"""Generates Chebyshev polynomial of the second kind of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError(
"can't generate 2nd kind Chebyshev polynomial of degree %s" % n)
poly = DMP(dup_chebyshevu(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_hermite(n, K):
"""Low-level implementation of Hermite polynomials. """
seq = [[K.one], [K(2), K.zero]]
for i in range(2, n + 1):
a = dup_lshift(seq[-1], 1, K)
b = dup_mul_ground(seq[-2], K(i - 1), K)
c = dup_mul_ground(dup_sub(a, b, K), K(2), K)
seq.append(c)
return seq[n]
@public
def hermite_poly(n, x=None, polys=False):
"""Generates Hermite polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError("can't generate Hermite polynomial of degree %s" % n)
poly = DMP(dup_hermite(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_legendre(n, K):
"""Low-level implementation of Legendre polynomials. """
seq = [[K.one], [K.one, K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(2*i - 1, i), K)
b = dup_mul_ground(seq[-2], K(i - 1, i), K)
seq.append(dup_sub(a, b, K))
return seq[n]
@public
def legendre_poly(n, x=None, polys=False):
"""Generates Legendre polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError("can't generate Legendre polynomial of degree %s" % n)
poly = DMP(dup_legendre(int(n), QQ), QQ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_laguerre(n, alpha, K):
"""Low-level implementation of Laguerre polynomials. """
seq = [[K.zero], [K.one]]
for i in range(1, n + 1):
a = dup_mul(seq[-1], [-K.one/i, alpha/i + K(2*i - 1)/i], K)
b = dup_mul_ground(seq[-2], alpha/i + K(i - 1)/i, K)
seq.append(dup_sub(a, b, K))
return seq[-1]
@public
def laguerre_poly(n, x=None, alpha=None, polys=False):
"""Generates Laguerre polynomial of degree `n` in `x`.
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
alpha
Decides minimal domain for the list
of coefficients.
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
"""
if n < 0:
raise ValueError("can't generate Laguerre polynomial of degree %s" % n)
if alpha is not None:
K, alpha = construct_domain(
alpha, field=True) # XXX: ground_field=True
else:
K, alpha = QQ, QQ(0)
poly = DMP(dup_laguerre(int(n), alpha, K), K)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
return poly if polys else poly.as_expr()
def dup_spherical_bessel_fn(n, K):
""" Low-level implementation of fn(n, x) """
seq = [[K.one], [K.one, K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(2*i - 1), K)
seq.append(dup_sub(a, seq[-2], K))
return dup_lshift(seq[n], 1, K)
def dup_spherical_bessel_fn_minus(n, K):
""" Low-level implementation of fn(-n, x) """
seq = [[K.one, K.zero], [K.zero]]
for i in range(2, n + 1):
a = dup_mul_ground(dup_lshift(seq[-1], 1, K), K(3 - 2*i), K)
seq.append(dup_sub(a, seq[-2], K))
return seq[n]
def spherical_bessel_fn(n, x=None, polys=False):
"""
Coefficients for the spherical Bessel functions.
Those are only needed in the jn() function.
The coefficients are calculated from:
fn(0, z) = 1/z
fn(1, z) = 1/z**2
fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
``polys=True`` returns an expression, otherwise
(default) returns an expression.
Examples
========
>>> from sympy.polys.orthopolys import spherical_bessel_fn as fn
>>> from sympy import Symbol
>>> z = Symbol("z")
>>> fn(1, z)
z**(-2)
>>> fn(2, z)
-1/z + 3/z**3
>>> fn(3, z)
-6/z**2 + 15/z**4
>>> fn(4, z)
1/z - 45/z**3 + 105/z**5
"""
if n < 0:
dup = dup_spherical_bessel_fn_minus(-int(n), ZZ)
else:
dup = dup_spherical_bessel_fn(int(n), ZZ)
poly = DMP(dup, ZZ)
if x is not None:
poly = Poly.new(poly, 1/x)
else:
poly = PurePoly.new(poly, 1/Dummy('x'))
return poly if polys else poly.as_expr()
|
62d9dfb243a900522e1ab9d0a86fabb606acec6686551a3c2e19776453f4dba5 | """
This module contains functions for two multivariate resultants. These
are:
- Dixon's resultant.
- Macaulay's resultant.
Multivariate resultants are used to identify whether a multivariate
system has common roots. That is when the resultant is equal to zero.
"""
from sympy import IndexedBase, Matrix, Mul, Poly
from sympy import rem, prod, degree_list, diag, simplify
from sympy.polys.monomials import itermonomials, monomial_deg
from sympy.polys.orderings import monomial_key
from sympy.polys.polytools import poly_from_expr, total_degree
from sympy.functions.combinatorial.factorials import binomial
from itertools import combinations_with_replacement
from sympy.utilities.exceptions import SymPyDeprecationWarning
class DixonResultant():
"""
A class for retrieving the Dixon's resultant of a multivariate
system.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.polys.multivariate_resultants import DixonResultant
>>> x, y = symbols('x, y')
>>> p = x + y
>>> q = x ** 2 + y ** 3
>>> h = x ** 2 + y
>>> dixon = DixonResultant(variables=[x, y], polynomials=[p, q, h])
>>> poly = dixon.get_dixon_polynomial()
>>> matrix = dixon.get_dixon_matrix(polynomial=poly)
>>> matrix
Matrix([
[ 0, 0, -1, 0, -1],
[ 0, -1, 0, -1, 0],
[-1, 0, 1, 0, 0],
[ 0, -1, 0, 0, 1],
[-1, 0, 0, 1, 0]])
>>> matrix.det()
0
See Also
========
Notebook in examples: sympy/example/notebooks.
References
==========
.. [1] [Kapur1994]_
.. [2] [Palancz08]_
"""
def __init__(self, polynomials, variables):
"""
A class that takes two lists, a list of polynomials and list of
variables. Returns the Dixon matrix of the multivariate system.
Parameters
----------
polynomials : list of polynomials
A list of m n-degree polynomials
variables: list
A list of all n variables
"""
self.polynomials = polynomials
self.variables = variables
self.n = len(self.variables)
self.m = len(self.polynomials)
a = IndexedBase("alpha")
# A list of n alpha variables (the replacing variables)
self.dummy_variables = [a[i] for i in range(self.n)]
# A list of the d_max of each variable.
self._max_degrees = [max(degree_list(poly)[i] for poly in self.polynomials)
for i in range(self.n)]
@property
def max_degrees(self):
SymPyDeprecationWarning(feature="max_degrees",
issue=17763,
deprecated_since_version="1.5").warn()
return self._max_degrees
def get_dixon_polynomial(self):
r"""
Returns
=======
dixon_polynomial: polynomial
Dixon's polynomial is calculated as:
delta = Delta(A) / ((x_1 - a_1) ... (x_n - a_n)) where,
A = |p_1(x_1,... x_n), ..., p_n(x_1,... x_n)|
|p_1(a_1,... x_n), ..., p_n(a_1,... x_n)|
|... , ..., ...|
|p_1(a_1,... a_n), ..., p_n(a_1,... a_n)|
"""
if self.m != (self.n + 1):
raise ValueError('Method invalid for given combination.')
# First row
rows = [self.polynomials]
temp = list(self.variables)
for idx in range(self.n):
temp[idx] = self.dummy_variables[idx]
substitution = {var: t for var, t in zip(self.variables, temp)}
rows.append([f.subs(substitution) for f in self.polynomials])
A = Matrix(rows)
terms = zip(self.variables, self.dummy_variables)
product_of_differences = Mul(*[a - b for a, b in terms])
dixon_polynomial = (A.det() / product_of_differences).factor()
return poly_from_expr(dixon_polynomial, self.dummy_variables)[0]
def get_upper_degree(self):
SymPyDeprecationWarning(feature="get_upper_degree",
useinstead="get_max_degrees",
issue=17763,
deprecated_since_version="1.5").warn()
list_of_products = [self.variables[i] ** self._max_degrees[i]
for i in range(self.n)]
product = prod(list_of_products)
product = Poly(product).monoms()
return monomial_deg(*product)
def get_max_degrees(self, polynomial):
r"""
Returns a list of the maximum degree of each variable appearing
in the coefficients of the Dixon polynomial. The coefficients are
viewed as polys in x_1, ... , x_n.
"""
deg_lists = [degree_list(Poly(poly, self.variables))
for poly in polynomial.coeffs()]
max_degrees = [max(degs) for degs in zip(*deg_lists)]
return max_degrees
def get_dixon_matrix(self, polynomial):
r"""
Construct the Dixon matrix from the coefficients of polynomial
\alpha. Each coefficient is viewed as a polynomial of x_1, ...,
x_n.
"""
max_degrees = self.get_max_degrees(polynomial)
# list of column headers of the Dixon matrix.
monomials = itermonomials(self.variables, max_degrees)
monomials = sorted(monomials, reverse=True,
key=monomial_key('lex', self.variables))
dixon_matrix = Matrix([[Poly(c, *self.variables).coeff_monomial(m)
for m in monomials]
for c in polynomial.coeffs()])
# remove columns if needed
if dixon_matrix.shape[0] != dixon_matrix.shape[1]:
keep = [column for column in range(dixon_matrix.shape[-1])
if any([element != 0 for element
in dixon_matrix[:, column]])]
dixon_matrix = dixon_matrix[:, keep]
return dixon_matrix
def KSY_precondition(self, matrix):
"""
Test for the validity of the Kapur-Saxena-Yang precondition.
The precondition requires that the column corresponding to the
monomial 1 = x_1 ^ 0 * x_2 ^ 0 * ... * x_n ^ 0 is not a linear
combination of the remaining ones. In sympy notation this is
the last column. For the precondition to hold the last non-zero
row of the rref matrix should be of the form [0, 0, ..., 1].
"""
if matrix.is_zero_matrix:
return False
m, n = matrix.shape
# simplify the matrix and keep only its non-zero rows
matrix = simplify(matrix.rref()[0])
rows = [i for i in range(m) if any(matrix[i, j] != 0 for j in range(n))]
matrix = matrix[rows,:]
condition = Matrix([[0]*(n-1) + [1]])
if matrix[-1,:] == condition:
return True
else:
return False
def delete_zero_rows_and_columns(self, matrix):
"""Remove the zero rows and columns of the matrix."""
rows = [
i for i in range(matrix.rows) if not matrix.row(i).is_zero_matrix]
cols = [
j for j in range(matrix.cols) if not matrix.col(j).is_zero_matrix]
return matrix[rows, cols]
def product_leading_entries(self, matrix):
"""Calculate the product of the leading entries of the matrix."""
res = 1
for row in range(matrix.rows):
for el in matrix.row(row):
if el != 0:
res = res * el
break
return res
def get_KSY_Dixon_resultant(self, matrix):
"""Calculate the Kapur-Saxena-Yang approach to the Dixon Resultant."""
matrix = self.delete_zero_rows_and_columns(matrix)
_, U, _ = matrix.LUdecomposition()
matrix = self.delete_zero_rows_and_columns(simplify(U))
return self.product_leading_entries(matrix)
class MacaulayResultant():
"""
A class for calculating the Macaulay resultant. Note that the
polynomials must be homogenized and their coefficients must be
given as symbols.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.polys.multivariate_resultants import MacaulayResultant
>>> x, y, z = symbols('x, y, z')
>>> a_0, a_1, a_2 = symbols('a_0, a_1, a_2')
>>> b_0, b_1, b_2 = symbols('b_0, b_1, b_2')
>>> c_0, c_1, c_2,c_3, c_4 = symbols('c_0, c_1, c_2, c_3, c_4')
>>> f = a_0 * y - a_1 * x + a_2 * z
>>> g = b_1 * x ** 2 + b_0 * y ** 2 - b_2 * z ** 2
>>> h = c_0 * y * z ** 2 - c_1 * x ** 3 + c_2 * x ** 2 * z - c_3 * x * z ** 2 + c_4 * z ** 3
>>> mac = MacaulayResultant(polynomials=[f, g, h], variables=[x, y, z])
>>> mac.monomial_set
[x**4, x**3*y, x**3*z, x**2*y**2, x**2*y*z, x**2*z**2, x*y**3,
x*y**2*z, x*y*z**2, x*z**3, y**4, y**3*z, y**2*z**2, y*z**3, z**4]
>>> matrix = mac.get_matrix()
>>> submatrix = mac.get_submatrix(matrix)
>>> submatrix
Matrix([
[-a_1, a_0, a_2, 0],
[ 0, -a_1, 0, 0],
[ 0, 0, -a_1, 0],
[ 0, 0, 0, -a_1]])
See Also
========
Notebook in examples: sympy/example/notebooks.
References
==========
.. [1] [Bruce97]_
.. [2] [Stiller96]_
"""
def __init__(self, polynomials, variables):
"""
Parameters
==========
variables: list
A list of all n variables
polynomials : list of sympy polynomials
A list of m n-degree polynomials
"""
self.polynomials = polynomials
self.variables = variables
self.n = len(variables)
# A list of the d_max of each polynomial.
self.degrees = [total_degree(poly, *self.variables) for poly
in self.polynomials]
self.degree_m = self._get_degree_m()
self.monomials_size = self.get_size()
# The set T of all possible monomials of degree degree_m
self.monomial_set = self.get_monomials_of_certain_degree(self.degree_m)
def _get_degree_m(self):
r"""
Returns
=======
degree_m: int
The degree_m is calculated as 1 + \sum_1 ^ n (d_i - 1),
where d_i is the degree of the i polynomial
"""
return 1 + sum(d - 1 for d in self.degrees)
def get_size(self):
r"""
Returns
=======
size: int
The size of set T. Set T is the set of all possible
monomials of the n variables for degree equal to the
degree_m
"""
return binomial(self.degree_m + self.n - 1, self.n - 1)
def get_monomials_of_certain_degree(self, degree):
"""
Returns
=======
monomials: list
A list of monomials of a certain degree.
"""
monomials = [Mul(*monomial) for monomial
in combinations_with_replacement(self.variables,
degree)]
return sorted(monomials, reverse=True,
key=monomial_key('lex', self.variables))
def get_row_coefficients(self):
"""
Returns
=======
row_coefficients: list
The row coefficients of Macaulay's matrix
"""
row_coefficients = []
divisible = []
for i in range(self.n):
if i == 0:
degree = self.degree_m - self.degrees[i]
monomial = self.get_monomials_of_certain_degree(degree)
row_coefficients.append(monomial)
else:
divisible.append(self.variables[i - 1] **
self.degrees[i - 1])
degree = self.degree_m - self.degrees[i]
poss_rows = self.get_monomials_of_certain_degree(degree)
for div in divisible:
for p in poss_rows:
if rem(p, div) == 0:
poss_rows = [item for item in poss_rows
if item != p]
row_coefficients.append(poss_rows)
return row_coefficients
def get_matrix(self):
"""
Returns
=======
macaulay_matrix: Matrix
The Macaulay numerator matrix
"""
rows = []
row_coefficients = self.get_row_coefficients()
for i in range(self.n):
for multiplier in row_coefficients[i]:
coefficients = []
poly = Poly(self.polynomials[i] * multiplier,
*self.variables)
for mono in self.monomial_set:
coefficients.append(poly.coeff_monomial(mono))
rows.append(coefficients)
macaulay_matrix = Matrix(rows)
return macaulay_matrix
def get_reduced_nonreduced(self):
r"""
Returns
=======
reduced: list
A list of the reduced monomials
non_reduced: list
A list of the monomials that are not reduced
Definition
==========
A polynomial is said to be reduced in x_i, if its degree (the
maximum degree of its monomials) in x_i is less than d_i. A
polynomial that is reduced in all variables but one is said
simply to be reduced.
"""
divisible = []
for m in self.monomial_set:
temp = []
for i, v in enumerate(self.variables):
temp.append(bool(total_degree(m, v) >= self.degrees[i]))
divisible.append(temp)
reduced = [i for i, r in enumerate(divisible)
if sum(r) < self.n - 1]
non_reduced = [i for i, r in enumerate(divisible)
if sum(r) >= self.n -1]
return reduced, non_reduced
def get_submatrix(self, matrix):
r"""
Returns
=======
macaulay_submatrix: Matrix
The Macaulay denominator matrix. Columns that are non reduced are kept.
The row which contains one of the a_{i}s is dropped. a_{i}s
are the coefficients of x_i ^ {d_i}.
"""
reduced, non_reduced = self.get_reduced_nonreduced()
# if reduced == [], then det(matrix) should be 1
if reduced == []:
return diag([1])
# reduced != []
reduction_set = [v ** self.degrees[i] for i, v
in enumerate(self.variables)]
ais = list([self.polynomials[i].coeff(reduction_set[i])
for i in range(self.n)])
reduced_matrix = matrix[:, reduced]
keep = []
for row in range(reduced_matrix.rows):
check = [ai in reduced_matrix[row, :] for ai in ais]
if True not in check:
keep.append(row)
return matrix[keep, non_reduced]
|
ea18651e4de7d7aa388140502514662ad481ea6a1d11bc3bb2a282307da1172c | """Useful utilities for higher level polynomial classes. """
from __future__ import print_function, division
from sympy.core import (S, Add, Mul, Pow, Eq, Expr,
expand_mul, expand_multinomial)
from sympy.core.exprtools import decompose_power, decompose_power_rat
from sympy.polys.polyerrors import PolynomialError, GeneratorsError
from sympy.polys.polyoptions import build_options
import re
_gens_order = {
'a': 301, 'b': 302, 'c': 303, 'd': 304,
'e': 305, 'f': 306, 'g': 307, 'h': 308,
'i': 309, 'j': 310, 'k': 311, 'l': 312,
'm': 313, 'n': 314, 'o': 315, 'p': 216,
'q': 217, 'r': 218, 's': 219, 't': 220,
'u': 221, 'v': 222, 'w': 223, 'x': 124,
'y': 125, 'z': 126,
}
_max_order = 1000
_re_gen = re.compile(r"^(.+?)(\d*)$")
def _nsort(roots, separated=False):
"""Sort the numerical roots putting the real roots first, then sorting
according to real and imaginary parts. If ``separated`` is True, then
the real and imaginary roots will be returned in two lists, respectively.
This routine tries to avoid issue 6137 by separating the roots into real
and imaginary parts before evaluation. In addition, the sorting will raise
an error if any computation cannot be done with precision.
"""
if not all(r.is_number for r in roots):
raise NotImplementedError
# see issue 6137:
# get the real part of the evaluated real and imaginary parts of each root
key = [[i.n(2).as_real_imag()[0] for i in r.as_real_imag()] for r in roots]
# make sure the parts were computed with precision
if any(i._prec == 1 for k in key for i in k):
raise NotImplementedError("could not compute root with precision")
# insert a key to indicate if the root has an imaginary part
key = [(1 if i else 0, r, i) for r, i in key]
key = sorted(zip(key, roots))
# return the real and imaginary roots separately if desired
if separated:
r = []
i = []
for (im, _, _), v in key:
if im:
i.append(v)
else:
r.append(v)
return r, i
_, roots = zip(*key)
return list(roots)
def _sort_gens(gens, **args):
"""Sort generators in a reasonably intelligent way. """
opt = build_options(args)
gens_order, wrt = {}, None
if opt is not None:
gens_order, wrt = {}, opt.wrt
for i, gen in enumerate(opt.sort):
gens_order[gen] = i + 1
def order_key(gen):
gen = str(gen)
if wrt is not None:
try:
return (-len(wrt) + wrt.index(gen), gen, 0)
except ValueError:
pass
name, index = _re_gen.match(gen).groups()
if index:
index = int(index)
else:
index = 0
try:
return ( gens_order[name], name, index)
except KeyError:
pass
try:
return (_gens_order[name], name, index)
except KeyError:
pass
return (_max_order, name, index)
try:
gens = sorted(gens, key=order_key)
except TypeError: # pragma: no cover
pass
return tuple(gens)
def _unify_gens(f_gens, g_gens):
"""Unify generators in a reasonably intelligent way. """
f_gens = list(f_gens)
g_gens = list(g_gens)
if f_gens == g_gens:
return tuple(f_gens)
gens, common, k = [], [], 0
for gen in f_gens:
if gen in g_gens:
common.append(gen)
for i, gen in enumerate(g_gens):
if gen in common:
g_gens[i], k = common[k], k + 1
for gen in common:
i = f_gens.index(gen)
gens.extend(f_gens[:i])
f_gens = f_gens[i + 1:]
i = g_gens.index(gen)
gens.extend(g_gens[:i])
g_gens = g_gens[i + 1:]
gens.append(gen)
gens.extend(f_gens)
gens.extend(g_gens)
return tuple(gens)
def _analyze_gens(gens):
"""Support for passing generators as `*gens` and `[gens]`. """
if len(gens) == 1 and hasattr(gens[0], '__iter__'):
return tuple(gens[0])
else:
return tuple(gens)
def _sort_factors(factors, **args):
"""Sort low-level factors in increasing 'complexity' order. """
def order_if_multiple_key(factor):
(f, n) = factor
return (len(f), n, f)
def order_no_multiple_key(f):
return (len(f), f)
if args.get('multiple', True):
return sorted(factors, key=order_if_multiple_key)
else:
return sorted(factors, key=order_no_multiple_key)
illegal = [S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity]
finf = [float(i) for i in illegal[1:3]]
def _not_a_coeff(expr):
"""Do not treat NaN and infinities as valid polynomial coefficients. """
if expr in illegal or expr in finf:
return True
if type(expr) is float and float(expr) != expr:
return True # nan
return # could be
def _parallel_dict_from_expr_if_gens(exprs, opt):
"""Transform expressions into a multinomial form given generators. """
k, indices = len(opt.gens), {}
for i, g in enumerate(opt.gens):
indices[g] = i
polys = []
for expr in exprs:
poly = {}
if expr.is_Equality:
expr = expr.lhs - expr.rhs
for term in Add.make_args(expr):
coeff, monom = [], [0]*k
for factor in Mul.make_args(term):
if not _not_a_coeff(factor) and factor.is_Number:
coeff.append(factor)
else:
try:
if opt.series is False:
base, exp = decompose_power(factor)
if exp < 0:
exp, base = -exp, Pow(base, -S.One)
else:
base, exp = decompose_power_rat(factor)
monom[indices[base]] = exp
except KeyError:
if not factor.free_symbols.intersection(opt.gens):
coeff.append(factor)
else:
raise PolynomialError("%s contains an element of "
"the set of generators." % factor)
monom = tuple(monom)
if monom in poly:
poly[monom] += Mul(*coeff)
else:
poly[monom] = Mul(*coeff)
polys.append(poly)
return polys, opt.gens
def _parallel_dict_from_expr_no_gens(exprs, opt):
"""Transform expressions into a multinomial form and figure out generators. """
if opt.domain is not None:
def _is_coeff(factor):
return factor in opt.domain
elif opt.extension is True:
def _is_coeff(factor):
return factor.is_algebraic
elif opt.greedy is not False:
def _is_coeff(factor):
return False
else:
def _is_coeff(factor):
return factor.is_number
gens, reprs = set([]), []
for expr in exprs:
terms = []
if expr.is_Equality:
expr = expr.lhs - expr.rhs
for term in Add.make_args(expr):
coeff, elements = [], {}
for factor in Mul.make_args(term):
if not _not_a_coeff(factor) and (factor.is_Number or _is_coeff(factor)):
coeff.append(factor)
else:
if opt.series is False:
base, exp = decompose_power(factor)
if exp < 0:
exp, base = -exp, Pow(base, -S.One)
else:
base, exp = decompose_power_rat(factor)
elements[base] = elements.setdefault(base, 0) + exp
gens.add(base)
terms.append((coeff, elements))
reprs.append(terms)
gens = _sort_gens(gens, opt=opt)
k, indices = len(gens), {}
for i, g in enumerate(gens):
indices[g] = i
polys = []
for terms in reprs:
poly = {}
for coeff, term in terms:
monom = [0]*k
for base, exp in term.items():
monom[indices[base]] = exp
monom = tuple(monom)
if monom in poly:
poly[monom] += Mul(*coeff)
else:
poly[monom] = Mul(*coeff)
polys.append(poly)
return polys, tuple(gens)
def _dict_from_expr_if_gens(expr, opt):
"""Transform an expression into a multinomial form given generators. """
(poly,), gens = _parallel_dict_from_expr_if_gens((expr,), opt)
return poly, gens
def _dict_from_expr_no_gens(expr, opt):
"""Transform an expression into a multinomial form and figure out generators. """
(poly,), gens = _parallel_dict_from_expr_no_gens((expr,), opt)
return poly, gens
def parallel_dict_from_expr(exprs, **args):
"""Transform expressions into a multinomial form. """
reps, opt = _parallel_dict_from_expr(exprs, build_options(args))
return reps, opt.gens
def _parallel_dict_from_expr(exprs, opt):
"""Transform expressions into a multinomial form. """
if opt.expand is not False:
exprs = [ expr.expand() for expr in exprs ]
if any(expr.is_commutative is False for expr in exprs):
raise PolynomialError('non-commutative expressions are not supported')
if opt.gens:
reps, gens = _parallel_dict_from_expr_if_gens(exprs, opt)
else:
reps, gens = _parallel_dict_from_expr_no_gens(exprs, opt)
return reps, opt.clone({'gens': gens})
def dict_from_expr(expr, **args):
"""Transform an expression into a multinomial form. """
rep, opt = _dict_from_expr(expr, build_options(args))
return rep, opt.gens
def _dict_from_expr(expr, opt):
"""Transform an expression into a multinomial form. """
if expr.is_commutative is False:
raise PolynomialError('non-commutative expressions are not supported')
def _is_expandable_pow(expr):
return (expr.is_Pow and expr.exp.is_positive and expr.exp.is_Integer
and expr.base.is_Add)
if opt.expand is not False:
if not isinstance(expr, (Expr, Eq)):
raise PolynomialError('expression must be of type Expr')
expr = expr.expand()
# TODO: Integrate this into expand() itself
while any(_is_expandable_pow(i) or i.is_Mul and
any(_is_expandable_pow(j) for j in i.args) for i in
Add.make_args(expr)):
expr = expand_multinomial(expr)
while any(i.is_Mul and any(j.is_Add for j in i.args) for i in Add.make_args(expr)):
expr = expand_mul(expr)
if opt.gens:
rep, gens = _dict_from_expr_if_gens(expr, opt)
else:
rep, gens = _dict_from_expr_no_gens(expr, opt)
return rep, opt.clone({'gens': gens})
def expr_from_dict(rep, *gens):
"""Convert a multinomial form into an expression. """
result = []
for monom, coeff in rep.items():
term = [coeff]
for g, m in zip(gens, monom):
if m:
term.append(Pow(g, m))
result.append(Mul(*term))
return Add(*result)
parallel_dict_from_basic = parallel_dict_from_expr
dict_from_basic = dict_from_expr
basic_from_dict = expr_from_dict
def _dict_reorder(rep, gens, new_gens):
"""Reorder levels using dict representation. """
gens = list(gens)
monoms = rep.keys()
coeffs = rep.values()
new_monoms = [ [] for _ in range(len(rep)) ]
used_indices = set()
for gen in new_gens:
try:
j = gens.index(gen)
used_indices.add(j)
for M, new_M in zip(monoms, new_monoms):
new_M.append(M[j])
except ValueError:
for new_M in new_monoms:
new_M.append(0)
for i, _ in enumerate(gens):
if i not in used_indices:
for monom in monoms:
if monom[i]:
raise GeneratorsError("unable to drop generators")
return map(tuple, new_monoms), coeffs
class PicklableWithSlots(object):
"""
Mixin class that allows to pickle objects with ``__slots__``.
Examples
========
First define a class that mixes :class:`PicklableWithSlots` in::
>>> from sympy.polys.polyutils import PicklableWithSlots
>>> class Some(PicklableWithSlots):
... __slots__ = ('foo', 'bar')
...
... def __init__(self, foo, bar):
... self.foo = foo
... self.bar = bar
To make :mod:`pickle` happy in doctest we have to use these hacks::
>>> from sympy.core.compatibility import builtins
>>> builtins.Some = Some
>>> from sympy.polys import polyutils
>>> polyutils.Some = Some
Next lets see if we can create an instance, pickle it and unpickle::
>>> some = Some('abc', 10)
>>> some.foo, some.bar
('abc', 10)
>>> from pickle import dumps, loads
>>> some2 = loads(dumps(some))
>>> some2.foo, some2.bar
('abc', 10)
"""
__slots__ = ()
def __getstate__(self, cls=None):
if cls is None:
# This is the case for the instance that gets pickled
cls = self.__class__
d = {}
# Get all data that should be stored from super classes
for c in cls.__bases__:
if hasattr(c, "__getstate__"):
d.update(c.__getstate__(self, c))
# Get all information that should be stored from cls and return the dict
for name in cls.__slots__:
if hasattr(self, name):
d[name] = getattr(self, name)
return d
def __setstate__(self, d):
# All values that were pickled are now assigned to a fresh instance
for name, value in d.items():
try:
setattr(self, name, value)
except AttributeError: # This is needed in cases like Rational :> Half
pass
|
cddd45209b4a3869b6061e4a93181de95a8c0ada4e0585d9df5f3c2b4954e730 | """Computational algebraic field theory. """
from __future__ import print_function, division
from sympy import (
S, Rational, AlgebraicNumber, GoldenRatio, TribonacciConstant,
Add, Mul, sympify, Dummy, expand_mul, I, pi
)
from sympy.functions import sqrt, cbrt
from sympy.core.compatibility import reduce
from sympy.core.exprtools import Factors
from sympy.core.function import _mexpand
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.ntheory import sieve
from sympy.ntheory.factor_ import divisors
from sympy.polys.domains import ZZ, QQ
from sympy.polys.orthopolys import dup_chebyshevt
from sympy.polys.polyerrors import (
IsomorphismFailed,
CoercionFailed,
NotAlgebraic,
GeneratorsError,
)
from sympy.polys.polytools import (
Poly, PurePoly, invert, factor_list, groebner, resultant,
degree, poly_from_expr, parallel_poly_from_expr, lcm
)
from sympy.polys.polyutils import dict_from_expr, expr_from_dict
from sympy.polys.ring_series import rs_compose_add
from sympy.polys.rings import ring
from sympy.polys.rootoftools import CRootOf
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.printing.lambdarepr import LambdaPrinter
from sympy.printing.pycode import PythonCodePrinter, MpmathPrinter
from sympy.simplify.radsimp import _split_gcd
from sympy.simplify.simplify import _is_sum_surds
from sympy.utilities import (
numbered_symbols, variations, lambdify, public, sift
)
from mpmath import pslq, mp
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x:v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError("multiple candidates for the minimal polynomial of %s" % v)
def _separate_sq(p):
"""
helper function for ``_minimal_polynomial_sq``
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> from sympy.polys.numberfields import _separate_sq
>>> p= -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p); p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p); p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p); p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
from sympy.utilities.iterables import sift
def is_sqrt(expr):
return expr.is_Pow and expr.exp is S.Half
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((S.One, y**2))
elif y.is_Atom:
a.append((y, S.One))
elif y.is_Pow and y.exp.is_integer:
a.append((y, S.One))
else:
raise NotImplementedError
continue
T, F = sift(y.args, is_sqrt, binary=True)
a.append((Mul(*F), Mul(*T)**2))
a.sort(key=lambda z: z[1])
if a[-1][1] is S.One:
# there are no surds
return p
surds = [z for y, z in a]
for i in range(len(surds)):
if surds[i] != 1:
break
g, b1, b2 = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*z**S.Half)
else:
a2.append(y*z**S.Half)
p1 = Add(*a1)
p2 = Add(*a2)
p = _mexpand(p1**2) - _mexpand(p2**2)
return p
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> from sympy.polys.numberfields import _minimal_polynomial_sq
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
from sympy.simplify.simplify import _is_sum_surds
p = sympify(p)
n = sympify(n)
if not n.is_Integer or not n > 0 or not _is_sum_surds(p):
return None
pn = p**Rational(1, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x:x**n})
break
else:
p = p1
# _separate_sq eliminates field extensions in a minimal way, so that
# if n = 1 then `p = constant*(minimal_polynomial(p))`
# if n > 1 it contains the minimal polynomial as a factor.
if n == 1:
p1 = Poly(p)
if p.coeff(x**p1.degree(x)) < 0:
p = -p
p = p.primitive()[1]
return p
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
result = _choose_factor(factors, x, pn)
return result
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
return the minimal polynomial for ``op(ex1, ex2)``
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> from sympy import sqrt, Add, Mul, QQ
>>> from sympy.polys.numberfields import _minpoly_op_algebraic_element
>>> from sympy.abc import x, y
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.frac_field(y))
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
.. [1] https://en.wikipedia.org/wiki/Resultant
.. [2] I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
if dom == QQ:
R, X = ring('X', QQ)
p1 = R(dict_from_expr(mp1)[0])
p2 = R(dict_from_expr(mp2)[0])
else:
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
if op is Mul or dom != QQ:
r = resultant(mp1a, mp2, gens=[y, x])
else:
r = rs_compose_add(p1, p2)
r = expr_from_dict(r.as_expr_dict(), x)
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = Poly(r, x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""
Returns ``expand_mul(x**degree(p, x)*p.subs(x, 1/x))``
"""
p1 = poly_from_expr(p, x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""
Returns ``_mexpand(y**deg*p.subs({x:x / y}))``
"""
p1 = poly_from_expr(p, x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom, mp=None):
"""
Returns ``minpoly(ex**pw, x)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
mp : minimal polynomial of ``p``
Examples
========
>>> from sympy import sqrt, QQ, Rational
>>> from sympy.polys.numberfields import _minpoly_pow, minpoly
>>> from sympy.abc import x, y
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minpoly(p**2, x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.frac_field(y))
x**3 - y
>>> minpoly(y**Rational(1, 3), x)
x**3 - y
"""
pw = sympify(pw)
if not mp:
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
if pw < 0:
if mp == x:
raise ZeroDivisionError('%s is zero' % ex)
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = Poly(resultant(mp, x**d - y**n, gens=[y]), x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""
returns ``minpoly(Add(*a), dom, x)``
"""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""
returns ``minpoly(Mul(*a), dom, x)``
"""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see http://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.is_rational:
n = c.q
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = dup_chebyshevt(n, ZZ)
return Add(*[x**(n - i - 1)*a[i] for i in range(n)])
if c.p == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of dup_chebyshevt(n, ZZ)
a = dup_chebyshevt(n, ZZ)
a = [x**(n - i)*a[i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = ((1 - cos(2*c*pi))/2)**S.Half
res = _minpoly_compose(expr, x, QQ)
return res
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see http://mathworld.wolfram.com/TrigonometryAngles.html
"""
from sympy import sqrt
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.is_rational:
if c.p == 1:
if c.q == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
if c.q == 9:
return 8*x**3 - 6*x + 1
elif c.p == 2:
q = sympify(c.q)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x:sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.q)
a = dup_chebyshevt(n, ZZ)
a = [x**(n - i)*a[i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.p
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
def _minpoly_exp(ex, x):
"""
Returns the minimal polynomial of ``exp(ex)``
"""
c, a = ex.args[0].as_coeff_Mul()
q = sympify(c.q)
if a == I*pi:
if c.is_rational:
if c.p == 1 or c.p == -1:
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
mp = _choose_factor(factors, x, ex)
return mp
else:
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
def _minpoly_rootof(ex, x):
"""
Returns the minimal polynomial of a ``CRootOf`` object.
"""
p = ex.expr
p = p.subs({ex.poly.gens[0]:x})
_, factors = factor_list(p, x)
result = _choose_factor(factors, x, ex)
return result
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> from sympy import minimal_polynomial, sqrt, Rational
>>> from sympy.abc import x, y
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=True)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, x, compose=True)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.q*x - ex.p
if ex is I:
_, factors = factor_list(x**2 + 1, x, domain=dom)
return x**2 + 1 if len(factors) == 1 else x - I
if ex is GoldenRatio:
_, factors = factor_list(x**2 - x - 1, x, domain=dom)
if len(factors) == 1:
return x**2 - x - 1
else:
return _choose_factor(factors, x, (1 + sqrt(5))/2, dom=dom)
if ex is TribonacciConstant:
_, factors = factor_list(x**3 - x**2 - x - 1, x, domain=dom)
if len(factors) == 1:
return x**3 - x**2 - x - 1
else:
fac = (1 + cbrt(19 - 3*sqrt(33)) + cbrt(19 + 3*sqrt(33))) / 3
return _choose_factor(factors, x, fac, dom=dom)
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_QQ and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *ex.args)
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = dict(r[True])
dens = [y.q for y in r1.values()]
lcmdens = reduce(lcm, dens, 1)
neg1 = S.NegativeOne
expn1 = r1.pop(neg1, S.Zero)
nums = [base**(y.p*lcmdens // y.q) for base, y in r1.items()]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1, x)
# use the fact that in SymPy canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
# Powers of -1 have to be treated separately to preserve sign.
mp2 = ex2.q*x**lcmdens - ex2.p*neg1**(expn1*lcmdens)
ex2 = neg1**expn1 * ex2**Rational(1, lcmdens)
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *ex.args)
elif ex.is_Pow:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif ex.__class__ is sin:
res = _minpoly_sin(ex, x)
elif ex.__class__ is cos:
res = _minpoly_cos(ex, x)
elif ex.__class__ is exp:
res = _minpoly_exp(ex, x)
elif ex.__class__ is CRootOf:
res = _minpoly_rootof(ex, x)
else:
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
return res
@public
def minimal_polynomial(ex, x=None, compose=True, polys=False, domain=None):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : Expr
Element or expression whose minimal polynomial is to be calculated.
x : Symbol, optional
Independent variable of the minimal polynomial
compose : boolean, optional (default=True)
Method to use for computing minimal polynomial. If ``compose=True``
(default) then ``_minpoly_compose`` is used, if ``compose=False`` then
groebner bases are used.
polys : boolean, optional (default=False)
If ``True`` returns a ``Poly`` object else an ``Expr`` object.
domain : Domain, optional
Ground domain
Notes
=====
By default ``compose=True``, the minimal polynomial of the subexpressions of ``ex``
are computed, then the arithmetic operations on them are performed using the resultant
and factorization.
If ``compose=False``, a bottom-up algorithm is used with ``groebner``.
The default algorithm stalls less frequently.
If no ground domain is given, it will be generated automatically from the expression.
Examples
========
>>> from sympy import minimal_polynomial, sqrt, solve, QQ
>>> from sympy.abc import x, y
>>> minimal_polynomial(sqrt(2), x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), x, domain=QQ.algebraic_field(sqrt(2)))
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3), x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0], x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y), x)
x**2 - y
"""
from sympy.polys.polytools import degree
from sympy.polys.domains import FractionField
from sympy.core.basic import preorder_traversal
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue 8354)
ex = _mexpand(ex, recursive=True)
for expr in preorder_traversal(ex):
if expr.is_AlgebraicNumber:
compose = False
break
if x is not None:
x, cls = sympify(x), Poly
else:
x, cls = Dummy('x'), PurePoly
if not domain:
if ex.free_symbols:
domain = FractionField(QQ, list(ex.free_symbols))
else:
domain = QQ
if hasattr(domain, 'symbols') and x in domain.symbols:
raise GeneratorsError("the variable %s is an element of the ground "
"domain %s" % (x, domain))
if compose:
result = _minpoly_compose(ex, x, domain)
result = result.primitive()[1]
c = result.coeff(x**degree(result, x))
if c.is_negative:
result = expand_mul(-result)
return cls(result, x, field=True) if polys else result.collect(x)
if not domain.is_QQ:
raise NotImplementedError("groebner method only works for QQ")
result = _minpoly_groebner(ex, x, cls)
return cls(result, x, field=True) if polys else result.collect(x)
def _minpoly_groebner(ex, x, cls):
"""
Computes the minimal polynomial of an algebraic number
using Groebner bases
Examples
========
>>> from sympy import minimal_polynomial, sqrt, Rational
>>> from sympy.abc import x
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=False)
x**2 - 2*x - 1
"""
from sympy.polys.polytools import degree
from sympy.core.function import expand_multinomial
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is S.ImaginaryUnit:
if ex not in mapping:
return update_mapping(ex, 2, 1)
else:
return symbols[ex]
elif ex.is_Rational:
return ex
elif ex.is_Add:
return Add(*[ bottom_up_scan(g) for g in ex.args ])
elif ex.is_Mul:
return Mul(*[ bottom_up_scan(g) for g in ex.args ])
elif ex.is_Pow:
if ex.exp.is_Rational:
if ex.exp < 0 and ex.base.is_Add:
coeff, terms = ex.base.as_coeff_add()
elt, _ = primitive_element(terms, polys=True)
alg = ex.base - coeff
# XXX: turn this into eval()
inverse = invert(elt.gen + coeff, elt).as_expr()
base = inverse.subs(elt.gen, alg).expand()
if ex.exp == -1:
return bottom_up_scan(base)
else:
ex = base**(-ex.exp)
if not ex.exp.is_Integer:
base, exp = (
ex.base**ex.exp.p).expand(), Rational(1, ex.exp.q)
else:
base, exp = ex.base, ex.exp
base = bottom_up_scan(base)
expr = base**exp
if expr not in mapping:
return update_mapping(expr, 1/exp, -base)
else:
return symbols[expr]
elif ex.is_AlgebraicNumber:
if ex.root not in mapping:
return update_mapping(ex.root, ex.minpoly)
else:
return symbols[ex.root]
raise NotAlgebraic("%s doesn't seem to be an algebraic number" % ex)
def simpler_inverse(ex):
"""
Returns True if it is more likely that the minimal polynomial
algorithm works better with the inverse
"""
if ex.is_Pow:
if (1/ex.exp).is_integer and ex.exp < 0:
if ex.base.is_Add:
return True
if ex.is_Mul:
hit = True
for p in ex.args:
if p.is_Add:
return False
if p.is_Pow:
if p.base.is_Add and p.exp > 0:
return False
if hit:
return True
return False
inverted = False
ex = expand_multinomial(ex)
if ex.is_AlgebraicNumber:
return ex.minpoly.as_expr(x)
elif ex.is_Rational:
result = ex.q*x - ex.p
else:
inverted = simpler_inverse(ex)
if inverted:
ex = ex**-1
res = None
if ex.is_Pow and (1/ex.exp).is_Integer:
n = 1/ex.exp
res = _minimal_polynomial_sq(ex.base, n, x)
elif _is_sum_surds(ex):
res = _minimal_polynomial_sq(ex, S.One, x)
if res is not None:
result = res
if res is None:
bus = bottom_up_scan(ex)
F = [x - bus] + list(mapping.values())
G = groebner(F, list(symbols.values()) + [x], order='lex')
_, factors = factor_list(G[-1])
# by construction G[-1] has root `ex`
result = _choose_factor(factors, x, ex)
if inverted:
result = _invertx(result, x)
if result.coeff(x**degree(result, x)) < 0:
result = expand_mul(-result)
return result
minpoly = minimal_polynomial
def _coeffs_generator(n):
"""Generate coefficients for `primitive_element()`. """
for coeffs in variations([1, -1, 2, -2, 3, -3], n, repetition=True):
# Two linear combinations with coeffs of opposite signs are
# opposites of each other. Hence it suffices to test only one.
if coeffs[0] > 0:
yield list(coeffs)
@public
def primitive_element(extension, x=None, **args):
"""Construct a common number field for all extensions. """
if not extension:
raise ValueError("can't compute primitive element for empty extension")
if x is not None:
x, cls = sympify(x), Poly
else:
x, cls = Dummy('x'), PurePoly
if not args.get('ex', False):
gen, coeffs = extension[0], [1]
# XXX when minimal_polynomial is extended to work
# with AlgebraicNumbers this test can be removed
if isinstance(gen, AlgebraicNumber):
g = gen.minpoly.replace(x)
else:
g = minimal_polynomial(gen, x, polys=True)
for ext in extension[1:]:
_, factors = factor_list(g, extension=ext)
g = _choose_factor(factors, x, gen)
s, _, g = g.sqf_norm()
gen += s*ext
coeffs.append(s)
if not args.get('polys', False):
return g.as_expr(), coeffs
else:
return cls(g), coeffs
generator = numbered_symbols('y', cls=Dummy)
F, Y = [], []
for ext in extension:
y = next(generator)
if ext.is_Poly:
if ext.is_univariate:
f = ext.as_expr(y)
else:
raise ValueError("expected minimal polynomial, got %s" % ext)
else:
f = minpoly(ext, y)
F.append(f)
Y.append(y)
coeffs_generator = args.get('coeffs', _coeffs_generator)
for coeffs in coeffs_generator(len(Y)):
f = x - sum([ c*y for c, y in zip(coeffs, Y)])
G = groebner(F + [f], Y + [x], order='lex', field=True)
H, g = G[:-1], cls(G[-1], x, domain='QQ')
for i, (h, y) in enumerate(zip(H, Y)):
try:
H[i] = Poly(y - h, x,
domain='QQ').all_coeffs() # XXX: composite=False
except CoercionFailed: # pragma: no cover
break # G is not a triangular set
else:
break
else: # pragma: no cover
raise RuntimeError("run out of coefficient configurations")
_, g = g.clear_denoms()
if not args.get('polys', False):
return g.as_expr(), coeffs, H
else:
return g, coeffs, H
def is_isomorphism_possible(a, b):
"""Returns `True` if there is a chance for isomorphism. """
n = a.minpoly.degree()
m = b.minpoly.degree()
if m % n != 0:
return False
if n == m:
return True
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
i, k, half = 1, m//n, db//2
while True:
p = sieve[i]
P = p**k
if P > half:
break
if ((da % p) % 2) and not (db % P):
return False
i += 1
return True
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm. """
if not a.root.is_real or not b.root.is_real:
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
g = b.minpoly.replace(f.gen)
n, m, prev = 100, b.minpoly.degree(), None
for i in range(1, 5):
A = a.root.evalf(n)
B = b.root.evalf(n)
basis = [1, B] + [ B**i for i in range(2, m) ] + [A]
dps, mp.dps = mp.dps, n
coeffs = pslq(basis, maxcoeff=int(1e10), maxsteps=1000)
mp.dps = dps
if coeffs is None:
break
if coeffs != prev:
prev = coeffs
else:
break
coeffs = [S(c)/coeffs[-1] for c in coeffs[:-1]]
while not coeffs[-1]:
coeffs.pop()
coeffs = list(reversed(coeffs))
h = Poly(coeffs, f.gen, domain='QQ')
if f.compose(h).rem(g).is_zero:
d, approx = len(coeffs) - 1, 0
for i, coeff in enumerate(coeffs):
approx += coeff*B**(d - i)
if A*approx < 0:
return [ -c for c in coeffs ]
else:
return coeffs
elif f.compose(-h).rem(g).is_zero:
return [ -c for c in coeffs ]
else:
n *= 2
return None
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization. """
_, factors = factor_list(a.minpoly, extension=b)
for f, _ in factors:
if f.degree() == 1:
coeffs = f.rep.TC().to_sympy_list()
d, terms = len(coeffs) - 1, []
for i, coeff in enumerate(coeffs):
terms.append(coeff*b.root**(d - i))
root = Add(*terms)
if (a.root - root).evalf(chop=True) == 0:
return coeffs
if (a.root + root).evalf(chop=True) == 0:
return [-c for c in coeffs]
return None
@public
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields. """
a, b = sympify(a), sympify(b)
if not a.is_AlgebraicNumber:
a = AlgebraicNumber(a)
if not b.is_AlgebraicNumber:
b = AlgebraicNumber(b)
if a == b:
return a.coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if n == 1:
return [a.root]
if m % n != 0:
return None
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
@public
def to_number_field(extension, theta=None, **args):
"""Express `extension` in the field generated by `theta`. """
gen = args.get('gen')
if hasattr(extension, '__iter__'):
extension = list(extension)
else:
extension = [extension]
if len(extension) == 1 and type(extension[0]) is tuple:
return AlgebraicNumber(extension[0])
minpoly, coeffs = primitive_element(extension, gen, polys=True)
root = sum([ coeff*ext for coeff, ext in zip(coeffs, extension) ])
if theta is None:
return AlgebraicNumber((minpoly, root))
else:
theta = sympify(theta)
if not theta.is_AlgebraicNumber:
theta = AlgebraicNumber(theta, gen=gen)
coeffs = field_isomorphism(root, theta)
if coeffs is not None:
return AlgebraicNumber(theta, coeffs)
else:
raise IsomorphismFailed(
"%s is not in a subfield of %s" % (root, theta.root))
class IntervalPrinter(MpmathPrinter, LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(PythonCodePrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(PythonCodePrinter, self)._print_Rational(expr)
def _print_Half(self, expr):
return "mpi('%s')" % super(PythonCodePrinter, self)._print_Rational(expr)
def _print_Pow(self, expr):
return super(MpmathPrinter, self)._print_Pow(expr, rational=True)
@public
def isolate(alg, eps=None, fast=False):
"""Give a rational isolating interval for an algebraic number. """
alg = sympify(alg)
if alg.is_Rational:
return (alg, alg)
elif not alg.is_real:
raise NotImplementedError(
"complex algebraic numbers are not supported")
func = lambdify((), alg, modules="mpmath", printer=IntervalPrinter())
poly = minpoly(alg, polys=True)
intervals = poly.intervals(sqf=True)
dps, done = mp.dps, False
try:
while not done:
alg = func()
for a, b in intervals:
if a <= alg.a and alg.b <= b:
done = True
break
else:
mp.dps *= 2
finally:
mp.dps = dps
if eps is not None:
a, b = poly.refine_root(a, b, eps=eps, fast=fast)
return (a, b)
|
f4cc6c1ce1a4941a0cb50e2650160a8878d015f0b18602e2798e0bd84153f7e4 | """Euclidean algorithms, GCDs, LCMs and polynomial remainder sequences. """
from __future__ import print_function, division
from sympy.ntheory import nextprime
from sympy.polys.densearith import (
dup_sub_mul,
dup_neg, dmp_neg,
dmp_add,
dmp_sub,
dup_mul, dmp_mul,
dmp_pow,
dup_div, dmp_div,
dup_rem,
dup_quo, dmp_quo,
dup_prem, dmp_prem,
dup_mul_ground, dmp_mul_ground,
dmp_mul_term,
dup_quo_ground, dmp_quo_ground,
dup_max_norm, dmp_max_norm)
from sympy.polys.densebasic import (
dup_strip, dmp_raise,
dmp_zero, dmp_one, dmp_ground,
dmp_one_p, dmp_zero_p,
dmp_zeros,
dup_degree, dmp_degree, dmp_degree_in,
dup_LC, dmp_LC, dmp_ground_LC,
dmp_multi_deflate, dmp_inflate,
dup_convert, dmp_convert,
dmp_apply_pairs)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_diff, dmp_diff,
dup_eval, dmp_eval, dmp_eval_in,
dup_trunc, dmp_ground_trunc,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_ground_primitive,
dup_extract, dmp_ground_extract)
from sympy.polys.galoistools import (
gf_int, gf_crt)
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
HeuristicGCDFailed,
HomomorphismFailed,
NotInvertible,
DomainError)
def dup_half_gcdex(f, g, K):
"""
Half extended Euclidean algorithm in `F[x]`.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> R.dup_half_gcdex(f, g)
(-1/5*x + 3/5, x + 1)
"""
if not K.is_Field:
raise DomainError("can't compute half extended GCD over %s" % K)
a, b = [K.one], []
while g:
q, r = dup_div(f, g, K)
f, g = g, r
a, b = b, dup_sub_mul(a, q, b, K)
a = dup_quo_ground(a, dup_LC(f, K), K)
f = dup_monic(f, K)
return a, f
def dmp_half_gcdex(f, g, u, K):
"""
Half extended Euclidean algorithm in `F[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
"""
if not u:
return dup_half_gcdex(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_gcdex(f, g, K):
"""
Extended Euclidean algorithm in `F[x]`.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> R.dup_gcdex(f, g)
(-1/5*x + 3/5, 1/5*x**2 - 6/5*x + 2, x + 1)
"""
s, h = dup_half_gcdex(f, g, K)
F = dup_sub_mul(h, s, f, K)
t = dup_quo(F, g, K)
return s, t, h
def dmp_gcdex(f, g, u, K):
"""
Extended Euclidean algorithm in `F[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
"""
if not u:
return dup_gcdex(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_invert(f, g, K):
"""
Compute multiplicative inverse of `f` modulo `g` in `F[x]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = x**2 - 1
>>> g = 2*x - 1
>>> h = x - 1
>>> R.dup_invert(f, g)
-4/3
>>> R.dup_invert(f, h)
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
s, h = dup_half_gcdex(f, g, K)
if h == [K.one]:
return dup_rem(s, g, K)
else:
raise NotInvertible("zero divisor")
def dmp_invert(f, g, u, K):
"""
Compute multiplicative inverse of `f` modulo `g` in `F[X]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
"""
if not u:
return dup_invert(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_euclidean_prs(f, g, K):
"""
Euclidean polynomial remainder sequence (PRS) in `K[x]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
>>> g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
>>> prs = R.dup_euclidean_prs(f, g)
>>> prs[0]
x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
>>> prs[1]
3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
>>> prs[2]
-5/9*x**4 + 1/9*x**2 - 1/3
>>> prs[3]
-117/25*x**2 - 9*x + 441/25
>>> prs[4]
233150/19773*x - 102500/6591
>>> prs[5]
-1288744821/543589225
"""
prs = [f, g]
h = dup_rem(f, g, K)
while h:
prs.append(h)
f, g = g, h
h = dup_rem(f, g, K)
return prs
def dmp_euclidean_prs(f, g, u, K):
"""
Euclidean polynomial remainder sequence (PRS) in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
"""
if not u:
return dup_euclidean_prs(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_primitive_prs(f, g, K):
"""
Primitive polynomial remainder sequence (PRS) in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
>>> g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
>>> prs = R.dup_primitive_prs(f, g)
>>> prs[0]
x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
>>> prs[1]
3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
>>> prs[2]
-5*x**4 + x**2 - 3
>>> prs[3]
13*x**2 + 25*x - 49
>>> prs[4]
4663*x - 6150
>>> prs[5]
1
"""
prs = [f, g]
_, h = dup_primitive(dup_prem(f, g, K), K)
while h:
prs.append(h)
f, g = g, h
_, h = dup_primitive(dup_prem(f, g, K), K)
return prs
def dmp_primitive_prs(f, g, u, K):
"""
Primitive polynomial remainder sequence (PRS) in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
"""
if not u:
return dup_primitive_prs(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
def dup_inner_subresultants(f, g, K):
"""
Subresultant PRS algorithm in `K[x]`.
Computes the subresultant polynomial remainder sequence (PRS)
and the non-zero scalar subresultants of `f` and `g`.
By [1] Thm. 3, these are the constants '-c' (- to optimize
computation of sign).
The first subdeterminant is set to 1 by convention to match
the polynomial and the scalar subdeterminants.
If 'deg(f) < deg(g)', the subresultants of '(g,f)' are computed.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_inner_subresultants(x**2 + 1, x**2 - 1)
([x**2 + 1, x**2 - 1, -2], [1, 1, 4])
References
==========
.. [1] W.S. Brown, The Subresultant PRS Algorithm.
ACM Transaction of Mathematical Software 4 (1978) 237-249
"""
n = dup_degree(f)
m = dup_degree(g)
if n < m:
f, g = g, f
n, m = m, n
if not f:
return [], []
if not g:
return [f], [K.one]
R = [f, g]
d = n - m
b = (-K.one)**(d + 1)
h = dup_prem(f, g, K)
h = dup_mul_ground(h, b, K)
lc = dup_LC(g, K)
c = lc**d
# Conventional first scalar subdeterminant is 1
S = [K.one, c]
c = -c
while h:
k = dup_degree(h)
R.append(h)
f, g, m, d = g, h, k, m - k
b = -lc * c**d
h = dup_prem(f, g, K)
h = dup_quo_ground(h, b, K)
lc = dup_LC(g, K)
if d > 1: # abnormal case
q = c**(d - 1)
c = K.quo((-lc)**d, q)
else:
c = -lc
S.append(-c)
return R, S
def dup_subresultants(f, g, K):
"""
Computes subresultant PRS of two polynomials in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
return dup_inner_subresultants(f, g, K)[0]
def dup_prs_resultant(f, g, K):
"""
Resultant algorithm in `K[x]` using subresultant PRS.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_prs_resultant(x**2 + 1, x**2 - 1)
(4, [x**2 + 1, x**2 - 1, -2])
"""
if not f or not g:
return (K.zero, [])
R, S = dup_inner_subresultants(f, g, K)
if dup_degree(R[-1]) > 0:
return (K.zero, R)
return S[-1], R
def dup_resultant(f, g, K, includePRS=False):
"""
Computes resultant of two polynomials in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_resultant(x**2 + 1, x**2 - 1)
4
"""
if includePRS:
return dup_prs_resultant(f, g, K)
return dup_prs_resultant(f, g, K)[0]
def dmp_inner_subresultants(f, g, u, K):
"""
Subresultant PRS algorithm in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> a = 3*x*y**4 + y**3 - 27*y + 4
>>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
>>> prs = [f, g, a, b]
>>> sres = [[1], [1], [3, 0, 0, 0, 0], [-3, 0, 0, -12, 1, 0, -54, 8, 729, -216, 16]]
>>> R.dmp_inner_subresultants(f, g) == (prs, sres)
True
"""
if not u:
return dup_inner_subresultants(f, g, K)
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < m:
f, g = g, f
n, m = m, n
if dmp_zero_p(f, u):
return [], []
v = u - 1
if dmp_zero_p(g, u):
return [f], [dmp_ground(K.one, v)]
R = [f, g]
d = n - m
b = dmp_pow(dmp_ground(-K.one, v), d + 1, v, K)
h = dmp_prem(f, g, u, K)
h = dmp_mul_term(h, b, 0, u, K)
lc = dmp_LC(g, K)
c = dmp_pow(lc, d, v, K)
S = [dmp_ground(K.one, v), c]
c = dmp_neg(c, v, K)
while not dmp_zero_p(h, u):
k = dmp_degree(h, u)
R.append(h)
f, g, m, d = g, h, k, m - k
b = dmp_mul(dmp_neg(lc, v, K),
dmp_pow(c, d, v, K), v, K)
h = dmp_prem(f, g, u, K)
h = [ dmp_quo(ch, b, v, K) for ch in h ]
lc = dmp_LC(g, K)
if d > 1:
p = dmp_pow(dmp_neg(lc, v, K), d, v, K)
q = dmp_pow(c, d - 1, v, K)
c = dmp_quo(p, q, v, K)
else:
c = dmp_neg(lc, v, K)
S.append(dmp_neg(c, v, K))
return R, S
def dmp_subresultants(f, g, u, K):
"""
Computes subresultant PRS of two polynomials in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> a = 3*x*y**4 + y**3 - 27*y + 4
>>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
>>> R.dmp_subresultants(f, g) == [f, g, a, b]
True
"""
return dmp_inner_subresultants(f, g, u, K)[0]
def dmp_prs_resultant(f, g, u, K):
"""
Resultant algorithm in `K[X]` using subresultant PRS.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> a = 3*x*y**4 + y**3 - 27*y + 4
>>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
>>> res, prs = R.dmp_prs_resultant(f, g)
>>> res == b # resultant has n-1 variables
False
>>> res == b.drop(x)
True
>>> prs == [f, g, a, b]
True
"""
if not u:
return dup_prs_resultant(f, g, K)
if dmp_zero_p(f, u) or dmp_zero_p(g, u):
return (dmp_zero(u - 1), [])
R, S = dmp_inner_subresultants(f, g, u, K)
if dmp_degree(R[-1], u) > 0:
return (dmp_zero(u - 1), R)
return S[-1], R
def dmp_zz_modular_resultant(f, g, p, u, K):
"""
Compute resultant of `f` and `g` modulo a prime `p`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x + y + 2
>>> g = 2*x*y + x + 3
>>> R.dmp_zz_modular_resultant(f, g, 5)
-2*y**2 + 1
"""
if not u:
return gf_int(dup_prs_resultant(f, g, K)[0] % p, p)
v = u - 1
n = dmp_degree(f, u)
m = dmp_degree(g, u)
N = dmp_degree_in(f, 1, u)
M = dmp_degree_in(g, 1, u)
B = n*M + m*N
D, a = [K.one], -K.one
r = dmp_zero(v)
while dup_degree(D) <= B:
while True:
a += K.one
if a == p:
raise HomomorphismFailed('no luck')
F = dmp_eval_in(f, gf_int(a, p), 1, u, K)
if dmp_degree(F, v) == n:
G = dmp_eval_in(g, gf_int(a, p), 1, u, K)
if dmp_degree(G, v) == m:
break
R = dmp_zz_modular_resultant(F, G, p, v, K)
e = dmp_eval(r, a, v, K)
if not v:
R = dup_strip([R])
e = dup_strip([e])
else:
R = [R]
e = [e]
d = K.invert(dup_eval(D, a, K), p)
d = dup_mul_ground(D, d, K)
d = dmp_raise(d, v, 0, K)
c = dmp_mul(d, dmp_sub(R, e, v, K), v, K)
r = dmp_add(r, c, v, K)
r = dmp_ground_trunc(r, p, v, K)
D = dup_mul(D, [K.one, -a], K)
D = dup_trunc(D, p, K)
return r
def _collins_crt(r, R, P, p, K):
"""Wrapper of CRT for Collins's resultant algorithm. """
return gf_int(gf_crt([r, R], [P, p], K), P*p)
def dmp_zz_collins_resultant(f, g, u, K):
"""
Collins's modular resultant algorithm in `Z[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x + y + 2
>>> g = 2*x*y + x + 3
>>> R.dmp_zz_collins_resultant(f, g)
-2*y**2 - 5*y + 1
"""
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < 0 or m < 0:
return dmp_zero(u - 1)
A = dmp_max_norm(f, u, K)
B = dmp_max_norm(g, u, K)
a = dmp_ground_LC(f, u, K)
b = dmp_ground_LC(g, u, K)
v = u - 1
B = K(2)*K.factorial(K(n + m))*A**m*B**n
r, p, P = dmp_zero(v), K.one, K.one
while P <= B:
p = K(nextprime(p))
while not (a % p) or not (b % p):
p = K(nextprime(p))
F = dmp_ground_trunc(f, p, u, K)
G = dmp_ground_trunc(g, p, u, K)
try:
R = dmp_zz_modular_resultant(F, G, p, u, K)
except HomomorphismFailed:
continue
if K.is_one(P):
r = R
else:
r = dmp_apply_pairs(r, R, _collins_crt, (P, p, K), v, K)
P *= p
return r
def dmp_qq_collins_resultant(f, g, u, K0):
"""
Collins's modular resultant algorithm in `Q[X]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> f = QQ(1,2)*x + y + QQ(2,3)
>>> g = 2*x*y + x + 3
>>> R.dmp_qq_collins_resultant(f, g)
-2*y**2 - 7/3*y + 5/6
"""
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < 0 or m < 0:
return dmp_zero(u - 1)
K1 = K0.get_ring()
cf, f = dmp_clear_denoms(f, u, K0, K1)
cg, g = dmp_clear_denoms(g, u, K0, K1)
f = dmp_convert(f, u, K0, K1)
g = dmp_convert(g, u, K0, K1)
r = dmp_zz_collins_resultant(f, g, u, K1)
r = dmp_convert(r, u - 1, K1, K0)
c = K0.convert(cf**m * cg**n, K1)
return dmp_quo_ground(r, c, u - 1, K0)
def dmp_resultant(f, g, u, K, includePRS=False):
"""
Computes resultant of two polynomials in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> R.dmp_resultant(f, g)
-3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
"""
if not u:
return dup_resultant(f, g, K, includePRS=includePRS)
if includePRS:
return dmp_prs_resultant(f, g, u, K)
if K.is_Field:
if K.is_QQ and query('USE_COLLINS_RESULTANT'):
return dmp_qq_collins_resultant(f, g, u, K)
else:
if K.is_ZZ and query('USE_COLLINS_RESULTANT'):
return dmp_zz_collins_resultant(f, g, u, K)
return dmp_prs_resultant(f, g, u, K)[0]
def dup_discriminant(f, K):
"""
Computes discriminant of a polynomial in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_discriminant(x**2 + 2*x + 3)
-8
"""
d = dup_degree(f)
if d <= 0:
return K.zero
else:
s = (-1)**((d*(d - 1)) // 2)
c = dup_LC(f, K)
r = dup_resultant(f, dup_diff(f, 1, K), K)
return K.quo(r, c*K(s))
def dmp_discriminant(f, u, K):
"""
Computes discriminant of a polynomial in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y,z,t = ring("x,y,z,t", ZZ)
>>> R.dmp_discriminant(x**2*y + x*z + t)
-4*y*t + z**2
"""
if not u:
return dup_discriminant(f, K)
d, v = dmp_degree(f, u), u - 1
if d <= 0:
return dmp_zero(v)
else:
s = (-1)**((d*(d - 1)) // 2)
c = dmp_LC(f, K)
r = dmp_resultant(f, dmp_diff(f, 1, u, K), u, K)
c = dmp_mul_ground(c, K(s), v, K)
return dmp_quo(r, c, v, K)
def _dup_rr_trivial_gcd(f, g, K):
"""Handle trivial cases in GCD algorithm over a ring. """
if not (f or g):
return [], [], []
elif not f:
if K.is_nonnegative(dup_LC(g, K)):
return g, [], [K.one]
else:
return dup_neg(g, K), [], [-K.one]
elif not g:
if K.is_nonnegative(dup_LC(f, K)):
return f, [K.one], []
else:
return dup_neg(f, K), [-K.one], []
return None
def _dup_ff_trivial_gcd(f, g, K):
"""Handle trivial cases in GCD algorithm over a field. """
if not (f or g):
return [], [], []
elif not f:
return dup_monic(g, K), [], [dup_LC(g, K)]
elif not g:
return dup_monic(f, K), [dup_LC(f, K)], []
else:
return None
def _dmp_rr_trivial_gcd(f, g, u, K):
"""Handle trivial cases in GCD algorithm over a ring. """
zero_f = dmp_zero_p(f, u)
zero_g = dmp_zero_p(g, u)
if_contain_one = dmp_one_p(f, u, K) or dmp_one_p(g, u, K)
if zero_f and zero_g:
return tuple(dmp_zeros(3, u, K))
elif zero_f:
if K.is_nonnegative(dmp_ground_LC(g, u, K)):
return g, dmp_zero(u), dmp_one(u, K)
else:
return dmp_neg(g, u, K), dmp_zero(u), dmp_ground(-K.one, u)
elif zero_g:
if K.is_nonnegative(dmp_ground_LC(f, u, K)):
return f, dmp_one(u, K), dmp_zero(u)
else:
return dmp_neg(f, u, K), dmp_ground(-K.one, u), dmp_zero(u)
elif if_contain_one:
return dmp_one(u, K), f, g
elif query('USE_SIMPLIFY_GCD'):
return _dmp_simplify_gcd(f, g, u, K)
else:
return None
def _dmp_ff_trivial_gcd(f, g, u, K):
"""Handle trivial cases in GCD algorithm over a field. """
zero_f = dmp_zero_p(f, u)
zero_g = dmp_zero_p(g, u)
if zero_f and zero_g:
return tuple(dmp_zeros(3, u, K))
elif zero_f:
return (dmp_ground_monic(g, u, K),
dmp_zero(u),
dmp_ground(dmp_ground_LC(g, u, K), u))
elif zero_g:
return (dmp_ground_monic(f, u, K),
dmp_ground(dmp_ground_LC(f, u, K), u),
dmp_zero(u))
elif query('USE_SIMPLIFY_GCD'):
return _dmp_simplify_gcd(f, g, u, K)
else:
return None
def _dmp_simplify_gcd(f, g, u, K):
"""Try to eliminate `x_0` from GCD computation in `K[X]`. """
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if df > 0 and dg > 0:
return None
if not (df or dg):
F = dmp_LC(f, K)
G = dmp_LC(g, K)
else:
if not df:
F = dmp_LC(f, K)
G = dmp_content(g, u, K)
else:
F = dmp_content(f, u, K)
G = dmp_LC(g, K)
v = u - 1
h = dmp_gcd(F, G, v, K)
cff = [ dmp_quo(cf, h, v, K) for cf in f ]
cfg = [ dmp_quo(cg, h, v, K) for cg in g ]
return [h], cff, cfg
def dup_rr_prs_gcd(f, g, K):
"""
Computes polynomial GCD using subresultants over a ring.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``,
and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rr_prs_gcd(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
result = _dup_rr_trivial_gcd(f, g, K)
if result is not None:
return result
fc, F = dup_primitive(f, K)
gc, G = dup_primitive(g, K)
c = K.gcd(fc, gc)
h = dup_subresultants(F, G, K)[-1]
_, h = dup_primitive(h, K)
if K.is_negative(dup_LC(h, K)):
c = -c
h = dup_mul_ground(h, c, K)
cff = dup_quo(f, h, K)
cfg = dup_quo(g, h, K)
return h, cff, cfg
def dup_ff_prs_gcd(f, g, K):
"""
Computes polynomial GCD using subresultants over a field.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``,
and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_ff_prs_gcd(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
result = _dup_ff_trivial_gcd(f, g, K)
if result is not None:
return result
h = dup_subresultants(f, g, K)[-1]
h = dup_monic(h, K)
cff = dup_quo(f, h, K)
cfg = dup_quo(g, h, K)
return h, cff, cfg
def dmp_rr_prs_gcd(f, g, u, K):
"""
Computes polynomial GCD using subresultants over a ring.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``,
and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_rr_prs_gcd(f, g)
(x + y, x + y, x)
"""
if not u:
return dup_rr_prs_gcd(f, g, K)
result = _dmp_rr_trivial_gcd(f, g, u, K)
if result is not None:
return result
fc, F = dmp_primitive(f, u, K)
gc, G = dmp_primitive(g, u, K)
h = dmp_subresultants(F, G, u, K)[-1]
c, _, _ = dmp_rr_prs_gcd(fc, gc, u - 1, K)
if K.is_negative(dmp_ground_LC(h, u, K)):
h = dmp_neg(h, u, K)
_, h = dmp_primitive(h, u, K)
h = dmp_mul_term(h, c, 0, u, K)
cff = dmp_quo(f, h, u, K)
cfg = dmp_quo(g, h, u, K)
return h, cff, cfg
def dmp_ff_prs_gcd(f, g, u, K):
"""
Computes polynomial GCD using subresultants over a field.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``,
and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y, = ring("x,y", QQ)
>>> f = QQ(1,2)*x**2 + x*y + QQ(1,2)*y**2
>>> g = x**2 + x*y
>>> R.dmp_ff_prs_gcd(f, g)
(x + y, 1/2*x + 1/2*y, x)
"""
if not u:
return dup_ff_prs_gcd(f, g, K)
result = _dmp_ff_trivial_gcd(f, g, u, K)
if result is not None:
return result
fc, F = dmp_primitive(f, u, K)
gc, G = dmp_primitive(g, u, K)
h = dmp_subresultants(F, G, u, K)[-1]
c, _, _ = dmp_ff_prs_gcd(fc, gc, u - 1, K)
_, h = dmp_primitive(h, u, K)
h = dmp_mul_term(h, c, 0, u, K)
h = dmp_ground_monic(h, u, K)
cff = dmp_quo(f, h, u, K)
cfg = dmp_quo(g, h, u, K)
return h, cff, cfg
HEU_GCD_MAX = 6
def _dup_zz_gcd_interpolate(h, x, K):
"""Interpolate polynomial GCD from integer GCD. """
f = []
while h:
g = h % x
if g > x // 2:
g -= x
f.insert(0, g)
h = (h - g) // x
return f
def dup_zz_heu_gcd(f, g, K):
"""
Heuristic polynomial GCD in `Z[x]`.
Given univariate polynomials `f` and `g` in `Z[x]`, returns
their GCD and cofactors, i.e. polynomials ``h``, ``cff`` and ``cfg``
such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
f and g at certain points and computing (fast) integer GCD of those
evaluations. The polynomial GCD is recovered from the integer image
by interpolation. The final step is to verify if the result is the
correct GCD. This gives cofactors as a side effect.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_zz_heu_gcd(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
References
==========
.. [1] [Liao95]_
"""
result = _dup_rr_trivial_gcd(f, g, K)
if result is not None:
return result
df = dup_degree(f)
dg = dup_degree(g)
gcd, f, g = dup_extract(f, g, K)
if df == 0 or dg == 0:
return [gcd], f, g
f_norm = dup_max_norm(f, K)
g_norm = dup_max_norm(g, K)
B = K(2*min(f_norm, g_norm) + 29)
x = max(min(B, 99*K.sqrt(B)),
2*min(f_norm // abs(dup_LC(f, K)),
g_norm // abs(dup_LC(g, K))) + 2)
for i in range(0, HEU_GCD_MAX):
ff = dup_eval(f, x, K)
gg = dup_eval(g, x, K)
if ff and gg:
h = K.gcd(ff, gg)
cff = ff // h
cfg = gg // h
h = _dup_zz_gcd_interpolate(h, x, K)
h = dup_primitive(h, K)[1]
cff_, r = dup_div(f, h, K)
if not r:
cfg_, r = dup_div(g, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff_, cfg_
cff = _dup_zz_gcd_interpolate(cff, x, K)
h, r = dup_div(f, cff, K)
if not r:
cfg_, r = dup_div(g, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff, cfg_
cfg = _dup_zz_gcd_interpolate(cfg, x, K)
h, r = dup_div(g, cfg, K)
if not r:
cff_, r = dup_div(f, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff_, cfg
x = 73794*x * K.sqrt(K.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
def _dmp_zz_gcd_interpolate(h, x, v, K):
"""Interpolate polynomial GCD from integer GCD. """
f = []
while not dmp_zero_p(h, v):
g = dmp_ground_trunc(h, x, v, K)
f.insert(0, g)
h = dmp_sub(h, g, v, K)
h = dmp_quo_ground(h, x, v, K)
if K.is_negative(dmp_ground_LC(f, v + 1, K)):
return dmp_neg(f, v + 1, K)
else:
return f
def dmp_zz_heu_gcd(f, g, u, K):
"""
Heuristic polynomial GCD in `Z[X]`.
Given univariate polynomials `f` and `g` in `Z[X]`, returns
their GCD and cofactors, i.e. polynomials ``h``, ``cff`` and ``cfg``
such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
f and g at certain points and computing (fast) integer GCD of those
evaluations. The polynomial GCD is recovered from the integer image
by interpolation. The evaluation process reduces f and g variable by
variable into a large integer. The final step is to verify if the
interpolated polynomial is the correct GCD. This gives cofactors of
the input polynomials as a side effect.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_zz_heu_gcd(f, g)
(x + y, x + y, x)
References
==========
.. [1] [Liao95]_
"""
if not u:
return dup_zz_heu_gcd(f, g, K)
result = _dmp_rr_trivial_gcd(f, g, u, K)
if result is not None:
return result
gcd, f, g = dmp_ground_extract(f, g, u, K)
f_norm = dmp_max_norm(f, u, K)
g_norm = dmp_max_norm(g, u, K)
B = K(2*min(f_norm, g_norm) + 29)
x = max(min(B, 99*K.sqrt(B)),
2*min(f_norm // abs(dmp_ground_LC(f, u, K)),
g_norm // abs(dmp_ground_LC(g, u, K))) + 2)
for i in range(0, HEU_GCD_MAX):
ff = dmp_eval(f, x, u, K)
gg = dmp_eval(g, x, u, K)
v = u - 1
if not (dmp_zero_p(ff, v) or dmp_zero_p(gg, v)):
h, cff, cfg = dmp_zz_heu_gcd(ff, gg, v, K)
h = _dmp_zz_gcd_interpolate(h, x, v, K)
h = dmp_ground_primitive(h, u, K)[1]
cff_, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
cfg_, r = dmp_div(g, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff_, cfg_
cff = _dmp_zz_gcd_interpolate(cff, x, v, K)
h, r = dmp_div(f, cff, u, K)
if dmp_zero_p(r, u):
cfg_, r = dmp_div(g, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff, cfg_
cfg = _dmp_zz_gcd_interpolate(cfg, x, v, K)
h, r = dmp_div(g, cfg, u, K)
if dmp_zero_p(r, u):
cff_, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff_, cfg
x = 73794*x * K.sqrt(K.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
def dup_qq_heu_gcd(f, g, K0):
"""
Heuristic polynomial GCD in `Q[x]`.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``,
``cff = quo(f, h)``, and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = QQ(1,2)*x**2 + QQ(7,4)*x + QQ(3,2)
>>> g = QQ(1,2)*x**2 + x
>>> R.dup_qq_heu_gcd(f, g)
(x + 2, 1/2*x + 3/4, 1/2*x)
"""
result = _dup_ff_trivial_gcd(f, g, K0)
if result is not None:
return result
K1 = K0.get_ring()
cf, f = dup_clear_denoms(f, K0, K1)
cg, g = dup_clear_denoms(g, K0, K1)
f = dup_convert(f, K0, K1)
g = dup_convert(g, K0, K1)
h, cff, cfg = dup_zz_heu_gcd(f, g, K1)
h = dup_convert(h, K1, K0)
c = dup_LC(h, K0)
h = dup_monic(h, K0)
cff = dup_convert(cff, K1, K0)
cfg = dup_convert(cfg, K1, K0)
cff = dup_mul_ground(cff, K0.quo(c, cf), K0)
cfg = dup_mul_ground(cfg, K0.quo(c, cg), K0)
return h, cff, cfg
def dmp_qq_heu_gcd(f, g, u, K0):
"""
Heuristic polynomial GCD in `Q[X]`.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``,
``cff = quo(f, h)``, and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y, = ring("x,y", QQ)
>>> f = QQ(1,4)*x**2 + x*y + y**2
>>> g = QQ(1,2)*x**2 + x*y
>>> R.dmp_qq_heu_gcd(f, g)
(x + 2*y, 1/4*x + 1/2*y, 1/2*x)
"""
result = _dmp_ff_trivial_gcd(f, g, u, K0)
if result is not None:
return result
K1 = K0.get_ring()
cf, f = dmp_clear_denoms(f, u, K0, K1)
cg, g = dmp_clear_denoms(g, u, K0, K1)
f = dmp_convert(f, u, K0, K1)
g = dmp_convert(g, u, K0, K1)
h, cff, cfg = dmp_zz_heu_gcd(f, g, u, K1)
h = dmp_convert(h, u, K1, K0)
c = dmp_ground_LC(h, u, K0)
h = dmp_ground_monic(h, u, K0)
cff = dmp_convert(cff, u, K1, K0)
cfg = dmp_convert(cfg, u, K1, K0)
cff = dmp_mul_ground(cff, K0.quo(c, cf), u, K0)
cfg = dmp_mul_ground(cfg, K0.quo(c, cg), u, K0)
return h, cff, cfg
def dup_inner_gcd(f, g, K):
"""
Computes polynomial GCD and cofactors of `f` and `g` in `K[x]`.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``,
``cff = quo(f, h)``, and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_inner_gcd(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
if not K.is_Exact:
try:
exact = K.get_exact()
except DomainError:
return [K.one], f, g
f = dup_convert(f, K, exact)
g = dup_convert(g, K, exact)
h, cff, cfg = dup_inner_gcd(f, g, exact)
h = dup_convert(h, exact, K)
cff = dup_convert(cff, exact, K)
cfg = dup_convert(cfg, exact, K)
return h, cff, cfg
elif K.is_Field:
if K.is_QQ and query('USE_HEU_GCD'):
try:
return dup_qq_heu_gcd(f, g, K)
except HeuristicGCDFailed:
pass
return dup_ff_prs_gcd(f, g, K)
else:
if K.is_ZZ and query('USE_HEU_GCD'):
try:
return dup_zz_heu_gcd(f, g, K)
except HeuristicGCDFailed:
pass
return dup_rr_prs_gcd(f, g, K)
def _dmp_inner_gcd(f, g, u, K):
"""Helper function for `dmp_inner_gcd()`. """
if not K.is_Exact:
try:
exact = K.get_exact()
except DomainError:
return dmp_one(u, K), f, g
f = dmp_convert(f, u, K, exact)
g = dmp_convert(g, u, K, exact)
h, cff, cfg = _dmp_inner_gcd(f, g, u, exact)
h = dmp_convert(h, u, exact, K)
cff = dmp_convert(cff, u, exact, K)
cfg = dmp_convert(cfg, u, exact, K)
return h, cff, cfg
elif K.is_Field:
if K.is_QQ and query('USE_HEU_GCD'):
try:
return dmp_qq_heu_gcd(f, g, u, K)
except HeuristicGCDFailed:
pass
return dmp_ff_prs_gcd(f, g, u, K)
else:
if K.is_ZZ and query('USE_HEU_GCD'):
try:
return dmp_zz_heu_gcd(f, g, u, K)
except HeuristicGCDFailed:
pass
return dmp_rr_prs_gcd(f, g, u, K)
def dmp_inner_gcd(f, g, u, K):
"""
Computes polynomial GCD and cofactors of `f` and `g` in `K[X]`.
Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``,
``cff = quo(f, h)``, and ``cfg = quo(g, h)``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_inner_gcd(f, g)
(x + y, x + y, x)
"""
if not u:
return dup_inner_gcd(f, g, K)
J, (f, g) = dmp_multi_deflate((f, g), u, K)
h, cff, cfg = _dmp_inner_gcd(f, g, u, K)
return (dmp_inflate(h, J, u, K),
dmp_inflate(cff, J, u, K),
dmp_inflate(cfg, J, u, K))
def dup_gcd(f, g, K):
"""
Computes polynomial GCD of `f` and `g` in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
return dup_inner_gcd(f, g, K)[0]
def dmp_gcd(f, g, u, K):
"""
Computes polynomial GCD of `f` and `g` in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_gcd(f, g)
x + y
"""
return dmp_inner_gcd(f, g, u, K)[0]
def dup_rr_lcm(f, g, K):
"""
Computes polynomial LCM over a ring in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_rr_lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
fc, f = dup_primitive(f, K)
gc, g = dup_primitive(g, K)
c = K.lcm(fc, gc)
h = dup_quo(dup_mul(f, g, K),
dup_gcd(f, g, K), K)
return dup_mul_ground(h, c, K)
def dup_ff_lcm(f, g, K):
"""
Computes polynomial LCM over a field in `K[x]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = QQ(1,2)*x**2 + QQ(7,4)*x + QQ(3,2)
>>> g = QQ(1,2)*x**2 + x
>>> R.dup_ff_lcm(f, g)
x**3 + 7/2*x**2 + 3*x
"""
h = dup_quo(dup_mul(f, g, K),
dup_gcd(f, g, K), K)
return dup_monic(h, K)
def dup_lcm(f, g, K):
"""
Computes polynomial LCM of `f` and `g` in `K[x]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if K.is_Field:
return dup_ff_lcm(f, g, K)
else:
return dup_rr_lcm(f, g, K)
def dmp_rr_lcm(f, g, u, K):
"""
Computes polynomial LCM over a ring in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_rr_lcm(f, g)
x**3 + 2*x**2*y + x*y**2
"""
fc, f = dmp_ground_primitive(f, u, K)
gc, g = dmp_ground_primitive(g, u, K)
c = K.lcm(fc, gc)
h = dmp_quo(dmp_mul(f, g, u, K),
dmp_gcd(f, g, u, K), u, K)
return dmp_mul_ground(h, c, u, K)
def dmp_ff_lcm(f, g, u, K):
"""
Computes polynomial LCM over a field in `K[X]`.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y, = ring("x,y", QQ)
>>> f = QQ(1,4)*x**2 + x*y + y**2
>>> g = QQ(1,2)*x**2 + x*y
>>> R.dmp_ff_lcm(f, g)
x**3 + 4*x**2*y + 4*x*y**2
"""
h = dmp_quo(dmp_mul(f, g, u, K),
dmp_gcd(f, g, u, K), u, K)
return dmp_ground_monic(h, u, K)
def dmp_lcm(f, g, u, K):
"""
Computes polynomial LCM of `f` and `g` in `K[X]`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> R.dmp_lcm(f, g)
x**3 + 2*x**2*y + x*y**2
"""
if not u:
return dup_lcm(f, g, K)
if K.is_Field:
return dmp_ff_lcm(f, g, u, K)
else:
return dmp_rr_lcm(f, g, u, K)
def dmp_content(f, u, K):
"""
Returns GCD of multivariate coefficients.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> R.dmp_content(2*x*y + 6*x + 4*y + 12)
2*y + 6
"""
cont, v = dmp_LC(f, K), u - 1
if dmp_zero_p(f, u):
return cont
for c in f[1:]:
cont = dmp_gcd(cont, c, v, K)
if dmp_one_p(cont, v, K):
break
if K.is_negative(dmp_ground_LC(cont, v, K)):
return dmp_neg(cont, v, K)
else:
return cont
def dmp_primitive(f, u, K):
"""
Returns multivariate content and a primitive polynomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> R.dmp_primitive(2*x*y + 6*x + 4*y + 12)
(2*y + 6, x + 2)
"""
cont, v = dmp_content(f, u, K), u - 1
if dmp_zero_p(f, u) or dmp_one_p(cont, v, K):
return cont, f
else:
return cont, [ dmp_quo(c, cont, v, K) for c in f ]
def dup_cancel(f, g, K, include=True):
"""
Cancel common factors in a rational function `f/g`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_cancel(2*x**2 - 2, x**2 - 2*x + 1)
(2*x + 2, x - 1)
"""
return dmp_cancel(f, g, 0, K, include=include)
def dmp_cancel(f, g, u, K, include=True):
"""
Cancel common factors in a rational function `f/g`.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_cancel(2*x**2 - 2, x**2 - 2*x + 1)
(2*x + 2, x - 1)
"""
K0 = None
if K.is_Field and K.has_assoc_Ring:
K0, K = K, K.get_ring()
cq, f = dmp_clear_denoms(f, u, K0, K, convert=True)
cp, g = dmp_clear_denoms(g, u, K0, K, convert=True)
else:
cp, cq = K.one, K.one
_, p, q = dmp_inner_gcd(f, g, u, K)
if K0 is not None:
_, cp, cq = K.cofactors(cp, cq)
p = dmp_convert(p, u, K, K0)
q = dmp_convert(q, u, K, K0)
K = K0
p_neg = K.is_negative(dmp_ground_LC(p, u, K))
q_neg = K.is_negative(dmp_ground_LC(q, u, K))
if p_neg and q_neg:
p, q = dmp_neg(p, u, K), dmp_neg(q, u, K)
elif p_neg:
cp, p = -cp, dmp_neg(p, u, K)
elif q_neg:
cp, q = -cp, dmp_neg(q, u, K)
if not include:
return cp, cq, p, q
p = dmp_mul_ground(p, cp, u, K)
q = dmp_mul_ground(q, cq, u, K)
return p, q
|
23b2aa01b16446a20ee4bf685aa7e9b7f0bc84cee6aea985b35c8ba96323d182 | """Polynomial factorization routines in characteristic zero. """
from __future__ import print_function, division
from sympy.polys.galoistools import (
gf_from_int_poly, gf_to_int_poly,
gf_lshift, gf_add_mul, gf_mul,
gf_div, gf_rem,
gf_gcdex,
gf_sqf_p,
gf_factor_sqf, gf_factor)
from sympy.polys.densebasic import (
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dmp_from_dict,
dmp_zero_p,
dmp_one,
dmp_nest, dmp_raise,
dup_strip,
dmp_ground,
dup_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd)
from sympy.polys.densearith import (
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dmp_pow,
dup_div, dmp_div,
dup_quo, dmp_quo,
dmp_expand,
dmp_add_mul,
dup_sub_mul, dmp_sub_mul,
dup_lshift,
dup_max_norm, dmp_max_norm,
dup_l1_norm,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_trunc, dmp_ground_trunc,
dup_content,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_ground_primitive,
dmp_eval_tail,
dmp_eval_in, dmp_diff_eval_in,
dmp_compose,
dup_shift, dup_mirror)
from sympy.polys.euclidtools import (
dmp_primitive,
dup_inner_gcd, dmp_inner_gcd)
from sympy.polys.sqfreetools import (
dup_sqf_p,
dup_sqf_norm, dmp_sqf_norm,
dup_sqf_part, dmp_sqf_part)
from sympy.polys.polyutils import _sort_factors
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import (
ExtraneousFactors, DomainError, CoercionFailed, EvaluationFailed)
from sympy.ntheory import nextprime, isprime, factorint
from sympy.utilities import subsets
from math import ceil as _ceil, log as _log
def dup_trial_division(f, factors, K):
"""
Determine multiplicities of factors for a univariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dup_div(f, factor, K)
if not r:
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dmp_trial_division(f, factors, u, K):
"""
Determine multiplicities of factors for a multivariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dmp_div(f, factor, u, K)
if dmp_zero_p(r, u):
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dup_zz_mignotte_bound(f, K):
"""Mignotte bound for univariate polynomials in `K[x]`. """
a = dup_max_norm(f, K)
b = abs(dup_LC(f, K))
n = dup_degree(f)
return K.sqrt(K(n + 1))*2**n*a*b
def dmp_zz_mignotte_bound(f, u, K):
"""Mignotte bound for multivariate polynomials in `K[X]`. """
a = dmp_max_norm(f, u, K)
b = abs(dmp_ground_LC(f, u, K))
n = sum(dmp_degree_list(f, u))
return K.sqrt(K(n + 1))*2**n*a*b
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""
One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f = g*h (mod m)
s*g + t*h = 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) = 1
deg(f) = deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f = G*H (mod m**2)
S*G + T*H = 1 (mod m**2)
References
==========
.. [1] [Gathen99]_
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
def dup_zz_hensel_lift(p, f, f_list, l, K):
"""
Multifactor Hensel lifting in `Z[x]`.
Given a prime `p`, polynomial `f` over `Z[x]` such that `lc(f)`
is a unit modulo `p`, monic pair-wise coprime polynomials `f_i`
over `Z[x]` satisfying::
f = lc(f) f_1 ... f_r (mod p)
and a positive integer `l`, returns a list of monic polynomials
`F_1`, `F_2`, ..., `F_r` satisfying::
f = lc(f) F_1 ... F_r (mod p**l)
F_i = f_i (mod p), i = 1..r
References
==========
.. [1] [Gathen99]_
"""
r = len(f_list)
lc = dup_LC(f, K)
if r == 1:
F = dup_mul_ground(f, K.gcdex(lc, p**l)[0], K)
return [ dup_trunc(F, p**l, K) ]
m = p
k = r // 2
d = int(_ceil(_log(l, 2)))
g = gf_from_int_poly([lc], p)
for f_i in f_list[:k]:
g = gf_mul(g, gf_from_int_poly(f_i, p), p, K)
h = gf_from_int_poly(f_list[k], p)
for f_i in f_list[k + 1:]:
h = gf_mul(h, gf_from_int_poly(f_i, p), p, K)
s, t, _ = gf_gcdex(g, h, p, K)
g = gf_to_int_poly(g, p)
h = gf_to_int_poly(h, p)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
for _ in range(1, d + 1):
(g, h, s, t), m = dup_zz_hensel_step(m, f, g, h, s, t, K), m**2
return dup_zz_hensel_lift(p, g, f_list[:k], l, K) \
+ dup_zz_hensel_lift(p, h, f_list[k:], l, K)
def _test_pl(fc, q, pl):
if q > pl // 2:
q = q - pl
if not q:
return True
return fc % q == 0
def dup_zz_zassenhaus(f, K):
"""Factor primitive square-free polynomials in `Z[x]`. """
n = dup_degree(f)
if n == 1:
return [f]
fc = f[-1]
A = dup_max_norm(f, K)
b = dup_LC(f, K)
B = int(abs(K.sqrt(K(n + 1))*2**n*A*b))
C = int((n + 1)**(2*n)*A**(2*n - 1))
gamma = int(_ceil(2*_log(C, 2)))
bound = int(2*gamma*_log(gamma))
a = []
# choose a prime number `p` such that `f` be square free in Z_p
# if there are many factors in Z_p, choose among a few different `p`
# the one with fewer factors
for px in range(3, bound + 1):
if not isprime(px) or b % px == 0:
continue
px = K.convert(px)
F = gf_from_int_poly(f, px)
if not gf_sqf_p(F, px, K):
continue
fsqfx = gf_factor_sqf(F, px, K)[1]
a.append((px, fsqfx))
if len(fsqfx) < 15 or len(a) > 4:
break
p, fsqf = min(a, key=lambda x: len(x[1]))
l = int(_ceil(_log(2*B + 1, p)))
modular = [gf_to_int_poly(ff, p) for ff in fsqf]
g = dup_zz_hensel_lift(p, f, modular, l, K)
sorted_T = range(len(g))
T = set(sorted_T)
factors, s = [], 1
pl = p**l
while 2*s <= len(T):
for S in subsets(sorted_T, s):
# lift the constant coefficient of the product `G` of the factors
# in the subset `S`; if it is does not divide `fc`, `G` does
# not divide the input polynomial
if b == 1:
q = 1
for i in S:
q = q*g[i][-1]
q = q % pl
if not _test_pl(fc, q, pl):
continue
else:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
G = dup_primitive(G, K)[1]
q = G[-1]
if q and fc % q != 0:
continue
H = [b]
S = set(S)
T_S = T - S
if b == 1:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
for i in T_S:
H = dup_mul(H, g[i], K)
H = dup_trunc(H, pl, K)
G_norm = dup_l1_norm(G, K)
H_norm = dup_l1_norm(H, K)
if G_norm*H_norm <= B:
T = T_S
sorted_T = [i for i in sorted_T if i not in S]
G = dup_primitive(G, K)[1]
f = dup_primitive(H, K)[1]
factors.append(G)
b = dup_LC(f, K)
break
else:
s += 1
return factors + [f]
def dup_zz_irreducible_p(f, K):
"""Test irreducibility using Eisenstein's criterion. """
lc = dup_LC(f, K)
tc = dup_TC(f, K)
e_fc = dup_content(f[1:], K)
if e_fc:
e_ff = factorint(int(e_fc))
for p in e_ff.keys():
if (lc % p) and (tc % p**2):
return True
def dup_cyclotomic_p(f, K, irreducible=False):
"""
Efficiently test if ``f`` is a cyclotomic polynomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(f)
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(g)
True
"""
if K.is_QQ:
try:
K0, K = K, K.get_ring()
f = dup_convert(f, K0, K)
except CoercionFailed:
return False
elif not K.is_ZZ:
return False
lc = dup_LC(f, K)
tc = dup_TC(f, K)
if lc != 1 or (tc != -1 and tc != 1):
return False
if not irreducible:
coeff, factors = dup_factor_list(f, K)
if coeff != K.one or factors != [(f, 1)]:
return False
n = dup_degree(f)
g, h = [], []
for i in range(n, -1, -2):
g.insert(0, f[i])
for i in range(n - 1, -1, -2):
h.insert(0, f[i])
g = dup_sqr(dup_strip(g), K)
h = dup_sqr(dup_strip(h), K)
F = dup_sub(g, dup_lshift(h, 1, K), K)
if K.is_negative(dup_LC(F, K)):
F = dup_neg(F, K)
if F == f:
return True
g = dup_mirror(f, K)
if K.is_negative(dup_LC(g, K)):
g = dup_neg(g, K)
if F == g and dup_cyclotomic_p(g, K):
return True
G = dup_sqf_part(F, K)
if dup_sqr(G, K) == F and dup_cyclotomic_p(G, K):
return True
return False
def dup_zz_cyclotomic_poly(n, K):
"""Efficiently generate n-th cyclotomic polynomial. """
h = [K.one, -K.one]
for p, k in factorint(n).items():
h = dup_quo(dup_inflate(h, p, K), h, K)
h = dup_inflate(h, p**(k - 1), K)
return h
def _dup_cyclotomic_decompose(n, K):
H = [[K.one, -K.one]]
for p, k in factorint(n).items():
Q = [ dup_quo(dup_inflate(h, p, K), h, K) for h in H ]
H.extend(Q)
for i in range(1, k):
Q = [ dup_inflate(q, p, K) for q in Q ]
H.extend(Q)
return H
def dup_zz_cyclotomic_factor(f, K):
"""
Efficiently factor polynomials `x**n - 1` and `x**n + 1` in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` returns a list of factors
of `f`, provided that `f` is in the form `x**n - 1` or `x**n + 1` for
`n >= 1`. Otherwise returns None.
Factorization is performed using cyclotomic decomposition of `f`,
which makes this method much faster that any other direct factorization
approach (e.g. Zassenhaus's).
References
==========
.. [1] [Weisstein09]_
"""
lc_f, tc_f = dup_LC(f, K), dup_TC(f, K)
if dup_degree(f) <= 0:
return None
if lc_f != 1 or tc_f not in [-1, 1]:
return None
if any(bool(cf) for cf in f[1:-1]):
return None
n = dup_degree(f)
F = _dup_cyclotomic_decompose(n, K)
if not K.is_one(tc_f):
return F
else:
H = []
for h in _dup_cyclotomic_decompose(2*n, K):
if h not in F:
H.append(h)
return H
def dup_zz_factor_sqf(f, K):
"""Factor square-free (non-primitive) polynomials in `Z[x]`. """
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [g]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [g]
factors = None
if query('USE_CYCLOTOMIC_FACTOR'):
factors = dup_zz_cyclotomic_factor(g, K)
if factors is None:
factors = dup_zz_zassenhaus(g, K)
return cont, _sort_factors(factors, multiple=False)
def dup_zz_factor(f, K):
"""
Factor (non square-free) polynomials in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Zassenhaus algorithm. Trial division is used to recover the
multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Examples
========
Consider the polynomial `f = 2*x**4 - 2`::
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_zz_factor(2*x**4 - 2)
(2, [(x - 1, 1), (x + 1, 1), (x**2 + 1, 1)])
In result we got the following factorization::
f = 2 (x - 1) (x + 1) (x**2 + 1)
Note that this is a complete factorization over integers,
however over Gaussian integers we can factor the last term.
By default, polynomials `x**n - 1` and `x**n + 1` are factored
using cyclotomic decomposition to speedup computations. To
disable this behaviour set cyclotomic=False.
References
==========
.. [1] [Gathen99]_
"""
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [(g, 1)]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
g = dup_sqf_part(g, K)
H = None
if query('USE_CYCLOTOMIC_FACTOR'):
H = dup_zz_cyclotomic_factor(g, K)
if H is None:
H = dup_zz_zassenhaus(g, K)
factors = dup_trial_division(f, H, K)
return cont, factors
def dmp_zz_wang_non_divisors(E, cs, ct, K):
"""Wang/EEZ: Compute a set of valid divisors. """
result = [ cs*ct ]
for q in E:
q = abs(q)
for r in reversed(result):
while r != 1:
r = K.gcd(r, q)
q = q // r
if K.is_one(q):
return None
result.append(q)
return result[1:]
def dmp_zz_wang_test_points(f, T, ct, A, u, K):
"""Wang/EEZ: Test evaluation points for suitability. """
if not dmp_eval_tail(dmp_LC(f, K), A, u - 1, K):
raise EvaluationFailed('no luck')
g = dmp_eval_tail(f, A, u, K)
if not dup_sqf_p(g, K):
raise EvaluationFailed('no luck')
c, h = dup_primitive(g, K)
if K.is_negative(dup_LC(h, K)):
c, h = -c, dup_neg(h, K)
v = u - 1
E = [ dmp_eval_tail(t, A, v, K) for t, _ in T ]
D = dmp_zz_wang_non_divisors(E, c, ct, K)
if D is not None:
return c, h, E
else:
raise EvaluationFailed('no luck')
def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K):
"""Wang/EEZ: Compute correct leading coefficients. """
C, J, v = [], [0]*len(E), u - 1
for h in H:
c = dmp_one(v, K)
d = dup_LC(h, K)*cs
for i in reversed(range(len(E))):
k, e, (t, _) = 0, E[i], T[i]
while not (d % e):
d, k = d//e, k + 1
if k != 0:
c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1
C.append(c)
if any(not j for j in J):
raise ExtraneousFactors # pragma: no cover
CC, HH = [], []
for c, h in zip(C, H):
d = dmp_eval_tail(c, A, v, K)
lc = dup_LC(h, K)
if K.is_one(cs):
cc = lc//d
else:
g = K.gcd(lc, d)
d, cc = d//g, lc//g
h, cs = dup_mul_ground(h, d, K), cs//d
c = dmp_mul_ground(c, cc, v, K)
CC.append(c)
HH.append(h)
if K.is_one(cs):
return f, HH, CC
CCC, HHH = [], []
for c, h in zip(CC, HH):
CCC.append(dmp_mul_ground(c, cs, v, K))
HHH.append(dmp_mul_ground(h, cs, 0, K))
f = dmp_mul_ground(f, cs**(len(H) - 1), u, K)
return f, HHH, CCC
def dup_zz_diophantine(F, m, p, K):
"""Wang/EEZ: Solve univariate Diophantine equations. """
if len(F) == 2:
a, b = F
f = gf_from_int_poly(a, p)
g = gf_from_int_poly(b, p)
s, t, G = gf_gcdex(g, f, p, K)
s = gf_lshift(s, m, K)
t = gf_lshift(t, m, K)
q, s = gf_div(s, f, p, K)
t = gf_add_mul(t, q, g, p, K)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
result = [s, t]
else:
G = [F[-1]]
for f in reversed(F[1:-1]):
G.insert(0, dup_mul(f, G[0], K))
S, T = [], [[1]]
for f, g in zip(F, G):
t, s = dmp_zz_diophantine([g, f], T[-1], [], 0, p, 1, K)
T.append(t)
S.append(s)
result, S = [], S + [T[-1]]
for s, f in zip(S, F):
s = gf_from_int_poly(s, p)
f = gf_from_int_poly(f, p)
r = gf_rem(gf_lshift(s, m, K), f, p, K)
s = gf_to_int_poly(r, p)
result.append(s)
return result
def dmp_zz_diophantine(F, c, A, d, p, u, K):
"""Wang/EEZ: Solve multivariate Diophantine equations. """
if not A:
S = [ [] for _ in F ]
n = dup_degree(c)
for i, coeff in enumerate(c):
if not coeff:
continue
T = dup_zz_diophantine(F, n - i, p, K)
for j, (s, t) in enumerate(zip(S, T)):
t = dup_mul_ground(t, coeff, K)
S[j] = dup_trunc(dup_add(s, t, K), p, K)
else:
n = len(A)
e = dmp_expand(F, u, K)
a, A = A[-1], A[:-1]
B, G = [], []
for f in F:
B.append(dmp_quo(e, f, u, K))
G.append(dmp_eval_in(f, a, n, u, K))
C = dmp_eval_in(c, a, n, u, K)
v = u - 1
S = dmp_zz_diophantine(G, C, A, d, p, v, K)
S = [ dmp_raise(s, 1, v, K) for s in S ]
for s, b in zip(S, B):
c = dmp_sub_mul(c, s, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
m = dmp_nest([K.one, -a], n, K)
M = dmp_one(n, K)
for k in K.map(range(0, d)):
if dmp_zero_p(c, u):
break
M = dmp_mul(M, m, u, K)
C = dmp_diff_eval_in(c, k + 1, a, n, u, K)
if not dmp_zero_p(C, v):
C = dmp_quo_ground(C, K.factorial(k + 1), v, K)
T = dmp_zz_diophantine(G, C, A, d, p, v, K)
for i, t in enumerate(T):
T[i] = dmp_mul(dmp_raise(t, 1, v, K), M, u, K)
for i, (s, t) in enumerate(zip(S, T)):
S[i] = dmp_add(s, t, u, K)
for t, b in zip(T, B):
c = dmp_sub_mul(c, t, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
S = [ dmp_ground_trunc(s, p, u, K) for s in S ]
return S
def dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K):
"""Wang/EEZ: Parallel Hensel lifting algorithm. """
S, n, v = [f], len(A), u - 1
H = list(H)
for i, a in enumerate(reversed(A[1:])):
s = dmp_eval_in(S[0], a, n - i, u - i, K)
S.insert(0, dmp_ground_trunc(s, p, v - i, K))
d = max(dmp_degree_list(f, u)[1:])
for j, s, a in zip(range(2, n + 2), S, A):
G, w = list(H), j - 1
I, J = A[:j - 2], A[j - 1:]
for i, (h, lc) in enumerate(zip(H, LC)):
lc = dmp_ground_trunc(dmp_eval_tail(lc, J, v, K), p, w - 1, K)
H[i] = [lc] + dmp_raise(h[1:], 1, w - 1, K)
m = dmp_nest([K.one, -a], w, K)
M = dmp_one(w, K)
c = dmp_sub(s, dmp_expand(H, w, K), w, K)
dj = dmp_degree_in(s, w, w)
for k in K.map(range(0, dj)):
if dmp_zero_p(c, w):
break
M = dmp_mul(M, m, w, K)
C = dmp_diff_eval_in(c, k + 1, a, w, w, K)
if not dmp_zero_p(C, w - 1):
C = dmp_quo_ground(C, K.factorial(k + 1), w - 1, K)
T = dmp_zz_diophantine(G, C, I, d, p, w - 1, K)
for i, (h, t) in enumerate(zip(H, T)):
h = dmp_add_mul(h, dmp_raise(t, 1, w - 1, K), M, w, K)
H[i] = dmp_ground_trunc(h, p, w, K)
h = dmp_sub(s, dmp_expand(H, w, K), w, K)
c = dmp_ground_trunc(h, p, w, K)
if dmp_expand(H, u, K) != f:
raise ExtraneousFactors # pragma: no cover
else:
return H
def dmp_zz_wang(f, u, K, mod=None, seed=None):
"""
Factor primitive square-free polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x_1,...,x_n]`, which is
primitive and square-free in `x_1`, computes factorization of `f` into
irreducibles over integers.
The procedure is based on Wang's Enhanced Extended Zassenhaus
algorithm. The algorithm works by viewing `f` as a univariate polynomial
in `Z[x_2,...,x_n][x_1]`, for which an evaluation mapping is computed::
x_2 -> a_2, ..., x_n -> a_n
where `a_i`, for `i = 2, ..., n`, are carefully chosen integers. The
mapping is used to transform `f` into a univariate polynomial in `Z[x_1]`,
which can be factored efficiently using Zassenhaus algorithm. The last
step is to lift univariate factors to obtain true multivariate
factors. For this purpose a parallel Hensel lifting procedure is used.
The parameter ``seed`` is passed to _randint and can be used to seed randint
(when an integer) or (for testing purposes) can be a sequence of numbers.
References
==========
.. [1] [Wang78]_
.. [2] [Geddes92]_
"""
from sympy.testing.randtest import _randint
randint = _randint(seed)
ct, T = dmp_zz_factor(dmp_LC(f, K), u - 1, K)
b = dmp_zz_mignotte_bound(f, u, K)
p = K(nextprime(b))
if mod is None:
if u == 1:
mod = 2
else:
mod = 1
history, configs, A, r = set([]), [], [K.zero]*u, None
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
_, H = dup_zz_factor_sqf(s, K)
r = len(H)
if r == 1:
return [f]
configs = [(s, cs, E, H, A)]
except EvaluationFailed:
pass
eez_num_configs = query('EEZ_NUMBER_OF_CONFIGS')
eez_num_tries = query('EEZ_NUMBER_OF_TRIES')
eez_mod_step = query('EEZ_MODULUS_STEP')
while len(configs) < eez_num_configs:
for _ in range(eez_num_tries):
A = [ K(randint(-mod, mod)) for _ in range(u) ]
if tuple(A) not in history:
history.add(tuple(A))
else:
continue
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
except EvaluationFailed:
continue
_, H = dup_zz_factor_sqf(s, K)
rr = len(H)
if r is not None:
if rr != r: # pragma: no cover
if rr < r:
configs, r = [], rr
else:
continue
else:
r = rr
if r == 1:
return [f]
configs.append((s, cs, E, H, A))
if len(configs) == eez_num_configs:
break
else:
mod += eez_mod_step
s_norm, s_arg, i = None, 0, 0
for s, _, _, _, _ in configs:
_s_norm = dup_max_norm(s, K)
if s_norm is not None:
if _s_norm < s_norm:
s_norm = _s_norm
s_arg = i
else:
s_norm = _s_norm
i += 1
_, cs, E, H, A = configs[s_arg]
orig_f = f
try:
f, H, LC = dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K)
factors = dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K)
except ExtraneousFactors: # pragma: no cover
if query('EEZ_RESTART_IF_NEEDED'):
return dmp_zz_wang(orig_f, u, K, mod + 1)
else:
raise ExtraneousFactors(
"we need to restart algorithm with better parameters")
result = []
for f in factors:
_, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
result.append(f)
return result
def dmp_zz_factor(f, u, K):
"""
Factor (non square-free) polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Enhanced Extended Zassenhaus (EEZ) algorithm. Trial division
is used to recover the multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*(x**2 - y**2)`::
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_zz_factor(2*x**2 - 2*y**2)
(2, [(x - y, 1), (x + y, 1)])
In result we got the following factorization::
f = 2 (x - y) (x + y)
References
==========
.. [1] [Gathen99]_
"""
if not u:
return dup_zz_factor(f, K)
if dmp_zero_p(f, u):
return K.zero, []
cont, g = dmp_ground_primitive(f, u, K)
if dmp_ground_LC(g, u, K) < 0:
cont, g = -cont, dmp_neg(g, u, K)
if all(d <= 0 for d in dmp_degree_list(g, u)):
return cont, []
G, g = dmp_primitive(g, u, K)
factors = []
if dmp_degree(g, u) > 0:
g = dmp_sqf_part(g, u, K)
H = dmp_zz_wang(g, u, K)
factors = dmp_trial_division(f, H, u, K)
for g, k in dmp_zz_factor(G, u - 1, K)[1]:
factors.insert(0, ([g], k))
return cont, _sort_factors(factors)
def dup_ext_factor(f, K):
"""Factor univariate polynomials over algebraic number fields. """
n, lc = dup_degree(f), dup_LC(f, K)
f = dup_monic(f, K)
if n <= 0:
return lc, []
if n == 1:
return lc, [(f, 1)]
f, F = dup_sqf_part(f, K), f
s, g, r = dup_sqf_norm(f, K)
factors = dup_factor_list_include(r, K.dom)
if len(factors) == 1:
return lc, [(f, n//dup_degree(f))]
H = s*K.unit
for i, (factor, _) in enumerate(factors):
h = dup_convert(factor, K.dom, K)
h, _, g = dup_inner_gcd(h, g, K)
h = dup_shift(h, H, K)
factors[i] = h
factors = dup_trial_division(F, factors, K)
return lc, factors
def dmp_ext_factor(f, u, K):
"""Factor multivariate polynomials over algebraic number fields. """
if not u:
return dup_ext_factor(f, K)
lc = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
if all(d <= 0 for d in dmp_degree_list(f, u)):
return lc, []
f, F = dmp_sqf_part(f, u, K), f
s, g, r = dmp_sqf_norm(f, u, K)
factors = dmp_factor_list_include(r, u, K.dom)
if len(factors) == 1:
factors = [f]
else:
H = dmp_raise([K.one, s*K.unit], u, 0, K)
for i, (factor, _) in enumerate(factors):
h = dmp_convert(factor, u, K.dom, K)
h, _, g = dmp_inner_gcd(h, g, u, K)
h = dmp_compose(h, H, u, K)
factors[i] = h
return lc, dmp_trial_division(F, factors, u, K)
def dup_gf_factor(f, K):
"""Factor univariate polynomials over finite fields. """
f = dup_convert(f, K, K.dom)
coeff, factors = gf_factor(f, K.mod, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K.dom, K), k)
return K.convert(coeff, K.dom), factors
def dmp_gf_factor(f, u, K):
"""Factor multivariate polynomials over finite fields. """
raise NotImplementedError('multivariate polynomials over finite fields')
def dup_factor_list(f, K0):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
j, f = dup_terms_gcd(f, K0)
cont, f = dup_primitive(f, K0)
if K0.is_FiniteField:
coeff, factors = dup_gf_factor(f, K0)
elif K0.is_Algebraic:
coeff, factors = dup_ext_factor(f, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dup_convert(f, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dup_clear_denoms(f, K0, K)
f = dup_convert(f, K0, K)
else:
K = K0
if K.is_ZZ:
coeff, factors = dup_zz_factor(f, K)
elif K.is_Poly:
f, u = dmp_inject(f, 0, K)
coeff, factors = dmp_factor_list(f, u, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, u, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dup_max_norm(f, K0)
f = dup_quo_ground(f, max_norm, K0)
f = dup_convert(f, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
if j:
factors.insert(0, ([K0.one, K0.zero], j))
return coeff*cont, _sort_factors(factors)
def dup_factor_list_include(f, K):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
coeff, factors = dup_factor_list(f, K)
if not factors:
return [(dup_strip([coeff]), 1)]
else:
g = dup_mul_ground(factors[0][0], coeff, K)
return [(g, factors[0][1])] + factors[1:]
def dmp_factor_list(f, u, K0):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list(f, K0)
J, f = dmp_terms_gcd(f, u, K0)
cont, f = dmp_ground_primitive(f, u, K0)
if K0.is_FiniteField: # pragma: no cover
coeff, factors = dmp_gf_factor(f, u, K0)
elif K0.is_Algebraic:
coeff, factors = dmp_ext_factor(f, u, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dmp_convert(f, u, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dmp_clear_denoms(f, u, K0, K)
f = dmp_convert(f, u, K0, K)
else:
K = K0
if K.is_ZZ:
levels, f, v = dmp_exclude(f, u, K)
coeff, factors = dmp_zz_factor(f, v, K)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_include(f, levels, v, K), k)
elif K.is_Poly:
f, v = dmp_inject(f, u, K)
coeff, factors = dmp_factor_list(f, v, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, v, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dmp_max_norm(f, u, K0)
f = dmp_quo_ground(f, max_norm, u, K0)
f = dmp_convert(f, u, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
for i, j in enumerate(reversed(J)):
if not j:
continue
term = {(0,)*(u - i) + (1,) + (0,)*i: K0.one}
factors.insert(0, (dmp_from_dict(term, u, K0), j))
return coeff*cont, _sort_factors(factors)
def dmp_factor_list_include(f, u, K):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list_include(f, K)
coeff, factors = dmp_factor_list(f, u, K)
if not factors:
return [(dmp_ground(coeff, u), 1)]
else:
g = dmp_mul_ground(factors[0][0], coeff, u, K)
return [(g, factors[0][1])] + factors[1:]
def dup_irreducible_p(f, K):
"""
Returns ``True`` if a univariate polynomial ``f`` has no factors
over its domain.
"""
return dmp_irreducible_p(f, 0, K)
def dmp_irreducible_p(f, u, K):
"""
Returns ``True`` if a multivariate polynomial ``f`` has no factors
over its domain.
"""
_, factors = dmp_factor_list(f, u, K)
if not factors:
return True
elif len(factors) > 1:
return False
else:
_, k = factors[0]
return k == 1
|
6eb7476df6564b27a79f776ac87512061ae973482ee15ab785cc3899d6e9a7eb | r"""
Sparse distributed elements of free modules over multivariate (generalized)
polynomial rings.
This code and its data structures are very much like the distributed
polynomials, except that the first "exponent" of the monomial is
a module generator index. That is, the multi-exponent ``(i, e_1, ..., e_n)``
represents the "monomial" `x_1^{e_1} \cdots x_n^{e_n} f_i` of the free module
`F` generated by `f_1, \ldots, f_r` over (a localization of) the ring
`K[x_1, \ldots, x_n]`. A module element is simply stored as a list of terms
ordered by the monomial order. Here a term is a pair of a multi-exponent and a
coefficient. In general, this coefficient should never be zero (since it can
then be omitted). The zero module element is stored as an empty list.
The main routines are ``sdm_nf_mora`` and ``sdm_groebner`` which can be used
to compute, respectively, weak normal forms and standard bases. They work with
arbitrary (not necessarily global) monomial orders.
In general, product orders have to be used to construct valid monomial orders
for modules. However, ``lex`` can be used as-is.
Note that the "level" (number of variables, i.e. parameter u+1 in
distributedpolys.py) is never needed in this code.
The main reference for this file is [SCA],
"A Singular Introduction to Commutative Algebra".
"""
from __future__ import print_function, division
from itertools import permutations
from sympy.polys.monomials import (
monomial_mul, monomial_lcm, monomial_div, monomial_deg
)
from sympy.polys.polytools import Poly
from sympy.polys.polyutils import parallel_dict_from_expr
from sympy import S, sympify
# Additional monomial tools.
def sdm_monomial_mul(M, X):
"""
Multiply tuple ``X`` representing a monomial of `K[X]` into the tuple
``M`` representing a monomial of `F`.
Examples
========
Multiplying `xy^3` into `x f_1` yields `x^2 y^3 f_1`:
>>> from sympy.polys.distributedmodules import sdm_monomial_mul
>>> sdm_monomial_mul((1, 1, 0), (1, 3))
(1, 2, 3)
"""
return (M[0],) + monomial_mul(X, M[1:])
def sdm_monomial_deg(M):
"""
Return the total degree of ``M``.
Examples
========
For example, the total degree of `x^2 y f_5` is 3:
>>> from sympy.polys.distributedmodules import sdm_monomial_deg
>>> sdm_monomial_deg((5, 2, 1))
3
"""
return monomial_deg(M[1:])
def sdm_monomial_lcm(A, B):
r"""
Return the "least common multiple" of ``A`` and ``B``.
IF `A = M e_j` and `B = N e_j`, where `M` and `N` are polynomial monomials,
this returns `\lcm(M, N) e_j`. Note that ``A`` and ``B`` involve distinct
monomials.
Otherwise the result is undefined.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_monomial_lcm
>>> sdm_monomial_lcm((1, 2, 3), (1, 0, 5))
(1, 2, 5)
"""
return (A[0],) + monomial_lcm(A[1:], B[1:])
def sdm_monomial_divides(A, B):
"""
Does there exist a (polynomial) monomial X such that XA = B?
Examples
========
Positive examples:
In the following examples, the monomial is given in terms of x, y and the
generator(s), f_1, f_2 etc. The tuple form of that monomial is used in
the call to sdm_monomial_divides.
Note: the generator appears last in the expression but first in the tuple
and other factors appear in the same order that they appear in the monomial
expression.
`A = f_1` divides `B = f_1`
>>> from sympy.polys.distributedmodules import sdm_monomial_divides
>>> sdm_monomial_divides((1, 0, 0), (1, 0, 0))
True
`A = f_1` divides `B = x^2 y f_1`
>>> sdm_monomial_divides((1, 0, 0), (1, 2, 1))
True
`A = xy f_5` divides `B = x^2 y f_5`
>>> sdm_monomial_divides((5, 1, 1), (5, 2, 1))
True
Negative examples:
`A = f_1` does not divide `B = f_2`
>>> sdm_monomial_divides((1, 0, 0), (2, 0, 0))
False
`A = x f_1` does not divide `B = f_1`
>>> sdm_monomial_divides((1, 1, 0), (1, 0, 0))
False
`A = xy^2 f_5` does not divide `B = y f_5`
>>> sdm_monomial_divides((5, 1, 2), (5, 0, 1))
False
"""
return A[0] == B[0] and all(a <= b for a, b in zip(A[1:], B[1:]))
# The actual distributed modules code.
def sdm_LC(f, K):
"""Returns the leading coeffcient of ``f``. """
if not f:
return K.zero
else:
return f[0][1]
def sdm_to_dict(f):
"""Make a dictionary from a distributed polynomial. """
return dict(f)
def sdm_from_dict(d, O):
"""
Create an sdm from a dictionary.
Here ``O`` is the monomial order to use.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_from_dict
>>> from sympy.polys import QQ, lex
>>> dic = {(1, 1, 0): QQ(1), (1, 0, 0): QQ(2), (0, 1, 0): QQ(0)}
>>> sdm_from_dict(dic, lex)
[((1, 1, 0), 1), ((1, 0, 0), 2)]
"""
return sdm_strip(sdm_sort(list(d.items()), O))
def sdm_sort(f, O):
"""Sort terms in ``f`` using the given monomial order ``O``. """
return sorted(f, key=lambda term: O(term[0]), reverse=True)
def sdm_strip(f):
"""Remove terms with zero coefficients from ``f`` in ``K[X]``. """
return [ (monom, coeff) for monom, coeff in f if coeff ]
def sdm_add(f, g, O, K):
"""
Add two module elements ``f``, ``g``.
Addition is done over the ground field ``K``, monomials are ordered
according to ``O``.
Examples
========
All examples use lexicographic order.
`(xy f_1) + (f_2) = f_2 + xy f_1`
>>> from sympy.polys.distributedmodules import sdm_add
>>> from sympy.polys import lex, QQ
>>> sdm_add([((1, 1, 1), QQ(1))], [((2, 0, 0), QQ(1))], lex, QQ)
[((2, 0, 0), 1), ((1, 1, 1), 1)]
`(xy f_1) + (-xy f_1)` = 0`
>>> sdm_add([((1, 1, 1), QQ(1))], [((1, 1, 1), QQ(-1))], lex, QQ)
[]
`(f_1) + (2f_1) = 3f_1`
>>> sdm_add([((1, 0, 0), QQ(1))], [((1, 0, 0), QQ(2))], lex, QQ)
[((1, 0, 0), 3)]
`(yf_1) + (xf_1) = xf_1 + yf_1`
>>> sdm_add([((1, 0, 1), QQ(1))], [((1, 1, 0), QQ(1))], lex, QQ)
[((1, 1, 0), 1), ((1, 0, 1), 1)]
"""
h = dict(f)
for monom, c in g:
if monom in h:
coeff = h[monom] + c
if not coeff:
del h[monom]
else:
h[monom] = coeff
else:
h[monom] = c
return sdm_from_dict(h, O)
def sdm_LM(f):
r"""
Returns the leading monomial of ``f``.
Only valid if `f \ne 0`.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_LM, sdm_from_dict
>>> from sympy.polys import QQ, lex
>>> dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(1), (4, 0, 1): QQ(1)}
>>> sdm_LM(sdm_from_dict(dic, lex))
(4, 0, 1)
"""
return f[0][0]
def sdm_LT(f):
r"""
Returns the leading term of ``f``.
Only valid if `f \ne 0`.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_LT, sdm_from_dict
>>> from sympy.polys import QQ, lex
>>> dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(2), (4, 0, 1): QQ(3)}
>>> sdm_LT(sdm_from_dict(dic, lex))
((4, 0, 1), 3)
"""
return f[0]
def sdm_mul_term(f, term, O, K):
"""
Multiply a distributed module element ``f`` by a (polynomial) term ``term``.
Multiplication of coefficients is done over the ground field ``K``, and
monomials are ordered according to ``O``.
Examples
========
`0 f_1 = 0`
>>> from sympy.polys.distributedmodules import sdm_mul_term
>>> from sympy.polys import lex, QQ
>>> sdm_mul_term([((1, 0, 0), QQ(1))], ((0, 0), QQ(0)), lex, QQ)
[]
`x 0 = 0`
>>> sdm_mul_term([], ((1, 0), QQ(1)), lex, QQ)
[]
`(x) (f_1) = xf_1`
>>> sdm_mul_term([((1, 0, 0), QQ(1))], ((1, 0), QQ(1)), lex, QQ)
[((1, 1, 0), 1)]
`(2xy) (3x f_1 + 4y f_2) = 8xy^2 f_2 + 6x^2y f_1`
>>> f = [((2, 0, 1), QQ(4)), ((1, 1, 0), QQ(3))]
>>> sdm_mul_term(f, ((1, 1), QQ(2)), lex, QQ)
[((2, 1, 2), 8), ((1, 2, 1), 6)]
"""
X, c = term
if not f or not c:
return []
else:
if K.is_one(c):
return [ (sdm_monomial_mul(f_M, X), f_c) for f_M, f_c in f ]
else:
return [ (sdm_monomial_mul(f_M, X), f_c * c) for f_M, f_c in f ]
def sdm_zero():
"""Return the zero module element."""
return []
def sdm_deg(f):
"""
Degree of ``f``.
This is the maximum of the degrees of all its monomials.
Invalid if ``f`` is zero.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_deg
>>> sdm_deg([((1, 2, 3), 1), ((10, 0, 1), 1), ((2, 3, 4), 4)])
7
"""
return max(sdm_monomial_deg(M[0]) for M in f)
# Conversion
def sdm_from_vector(vec, O, K, **opts):
"""
Create an sdm from an iterable of expressions.
Coefficients are created in the ground field ``K``, and terms are ordered
according to monomial order ``O``. Named arguments are passed on to the
polys conversion code and can be used to specify for example generators.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_from_vector
>>> from sympy.abc import x, y, z
>>> from sympy.polys import QQ, lex
>>> sdm_from_vector([x**2+y**2, 2*z], lex, QQ)
[((1, 0, 0, 1), 2), ((0, 2, 0, 0), 1), ((0, 0, 2, 0), 1)]
"""
dics, gens = parallel_dict_from_expr(sympify(vec), **opts)
dic = {}
for i, d in enumerate(dics):
for k, v in d.items():
dic[(i,) + k] = K.convert(v)
return sdm_from_dict(dic, O)
def sdm_to_vector(f, gens, K, n=None):
"""
Convert sdm ``f`` into a list of polynomial expressions.
The generators for the polynomial ring are specified via ``gens``. The rank
of the module is guessed, or passed via ``n``. The ground field is assumed
to be ``K``.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_to_vector
>>> from sympy.abc import x, y, z
>>> from sympy.polys import QQ, lex
>>> f = [((1, 0, 0, 1), QQ(2)), ((0, 2, 0, 0), QQ(1)), ((0, 0, 2, 0), QQ(1))]
>>> sdm_to_vector(f, [x, y, z], QQ)
[x**2 + y**2, 2*z]
"""
dic = sdm_to_dict(f)
dics = {}
for k, v in dic.items():
dics.setdefault(k[0], []).append((k[1:], v))
n = n or len(dics)
res = []
for k in range(n):
if k in dics:
res.append(Poly(dict(dics[k]), gens=gens, domain=K).as_expr())
else:
res.append(S.Zero)
return res
# Algorithms.
def sdm_spoly(f, g, O, K, phantom=None):
"""
Compute the generalized s-polynomial of ``f`` and ``g``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
This is invalid if either of ``f`` or ``g`` is zero.
If the leading terms of `f` and `g` involve different basis elements of
`F`, their s-poly is defined to be zero. Otherwise it is a certain linear
combination of `f` and `g` in which the leading terms cancel.
See [SCA, defn 2.3.6] for details.
If ``phantom`` is not ``None``, it should be a pair of module elements on
which to perform the same operation(s) as on ``f`` and ``g``. The in this
case both results are returned.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_spoly
>>> from sympy.polys import QQ, lex
>>> f = [((2, 1, 1), QQ(1)), ((1, 0, 1), QQ(1))]
>>> g = [((2, 3, 0), QQ(1))]
>>> h = [((1, 2, 3), QQ(1))]
>>> sdm_spoly(f, h, lex, QQ)
[]
>>> sdm_spoly(f, g, lex, QQ)
[((1, 2, 1), 1)]
"""
if not f or not g:
return sdm_zero()
LM1 = sdm_LM(f)
LM2 = sdm_LM(g)
if LM1[0] != LM2[0]:
return sdm_zero()
LM1 = LM1[1:]
LM2 = LM2[1:]
lcm = monomial_lcm(LM1, LM2)
m1 = monomial_div(lcm, LM1)
m2 = monomial_div(lcm, LM2)
c = K.quo(-sdm_LC(f, K), sdm_LC(g, K))
r1 = sdm_add(sdm_mul_term(f, (m1, K.one), O, K),
sdm_mul_term(g, (m2, c), O, K), O, K)
if phantom is None:
return r1
r2 = sdm_add(sdm_mul_term(phantom[0], (m1, K.one), O, K),
sdm_mul_term(phantom[1], (m2, c), O, K), O, K)
return r1, r2
def sdm_ecart(f):
"""
Compute the ecart of ``f``.
This is defined to be the difference of the total degree of `f` and the
total degree of the leading monomial of `f` [SCA, defn 2.3.7].
Invalid if f is zero.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_ecart
>>> sdm_ecart([((1, 2, 3), 1), ((1, 0, 1), 1)])
0
>>> sdm_ecart([((2, 2, 1), 1), ((1, 5, 1), 1)])
3
"""
return sdm_deg(f) - sdm_monomial_deg(sdm_LM(f))
def sdm_nf_mora(f, G, O, K, phantom=None):
r"""
Compute a weak normal form of ``f`` with respect to ``G`` and order ``O``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
Weak normal forms are defined in [SCA, defn 2.3.3]. They are not unique.
This function deterministically computes a weak normal form, depending on
the order of `G`.
The most important property of a weak normal form is the following: if
`R` is the ring associated with the monomial ordering (if the ordering is
global, we just have `R = K[x_1, \ldots, x_n]`, otherwise it is a certain
localization thereof), `I` any ideal of `R` and `G` a standard basis for
`I`, then for any `f \in R`, we have `f \in I` if and only if
`NF(f | G) = 0`.
This is the generalized Mora algorithm for computing weak normal forms with
respect to arbitrary monomial orders [SCA, algorithm 2.3.9].
If ``phantom`` is not ``None``, it should be a pair of "phantom" arguments
on which to perform the same computations as on ``f``, ``G``, both results
are then returned.
"""
from itertools import repeat
h = f
T = list(G)
if phantom is not None:
# "phantom" variables with suffix p
hp = phantom[0]
Tp = list(phantom[1])
phantom = True
else:
Tp = repeat([])
phantom = False
while h:
# TODO better data structure!!!
Th = [(g, sdm_ecart(g), gp) for g, gp in zip(T, Tp)
if sdm_monomial_divides(sdm_LM(g), sdm_LM(h))]
if not Th:
break
g, _, gp = min(Th, key=lambda x: x[1])
if sdm_ecart(g) > sdm_ecart(h):
T.append(h)
if phantom:
Tp.append(hp)
if phantom:
h, hp = sdm_spoly(h, g, O, K, phantom=(hp, gp))
else:
h = sdm_spoly(h, g, O, K)
if phantom:
return h, hp
return h
def sdm_nf_buchberger(f, G, O, K, phantom=None):
r"""
Compute a weak normal form of ``f`` with respect to ``G`` and order ``O``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
This is the standard Buchberger algorithm for computing weak normal forms with
respect to *global* monomial orders [SCA, algorithm 1.6.10].
If ``phantom`` is not ``None``, it should be a pair of "phantom" arguments
on which to perform the same computations as on ``f``, ``G``, both results
are then returned.
"""
from itertools import repeat
h = f
T = list(G)
if phantom is not None:
# "phantom" variables with suffix p
hp = phantom[0]
Tp = list(phantom[1])
phantom = True
else:
Tp = repeat([])
phantom = False
while h:
try:
g, gp = next((g, gp) for g, gp in zip(T, Tp)
if sdm_monomial_divides(sdm_LM(g), sdm_LM(h)))
except StopIteration:
break
if phantom:
h, hp = sdm_spoly(h, g, O, K, phantom=(hp, gp))
else:
h = sdm_spoly(h, g, O, K)
if phantom:
return h, hp
return h
def sdm_nf_buchberger_reduced(f, G, O, K):
r"""
Compute a reduced normal form of ``f`` with respect to ``G`` and order ``O``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
In contrast to weak normal forms, reduced normal forms *are* unique, but
their computation is more expensive.
This is the standard Buchberger algorithm for computing reduced normal forms
with respect to *global* monomial orders [SCA, algorithm 1.6.11].
The ``pantom`` option is not supported, so this normal form cannot be used
as a normal form for the "extended" groebner algorithm.
"""
h = sdm_zero()
g = f
while g:
g = sdm_nf_buchberger(g, G, O, K)
if g:
h = sdm_add(h, [sdm_LT(g)], O, K)
g = g[1:]
return h
def sdm_groebner(G, NF, O, K, extended=False):
"""
Compute a minimal standard basis of ``G`` with respect to order ``O``.
The algorithm uses a normal form ``NF``, for example ``sdm_nf_mora``.
The ground field is assumed to be ``K``, and monomials ordered according
to ``O``.
Let `N` denote the submodule generated by elements of `G`. A standard
basis for `N` is a subset `S` of `N`, such that `in(S) = in(N)`, where for
any subset `X` of `F`, `in(X)` denotes the submodule generated by the
initial forms of elements of `X`. [SCA, defn 2.3.2]
A standard basis is called minimal if no subset of it is a standard basis.
One may show that standard bases are always generating sets.
Minimal standard bases are not unique. This algorithm computes a
deterministic result, depending on the particular order of `G`.
If ``extended=True``, also compute the transition matrix from the initial
generators to the groebner basis. That is, return a list of coefficient
vectors, expressing the elements of the groebner basis in terms of the
elements of ``G``.
This functions implements the "sugar" strategy, see
Giovini et al: "One sugar cube, please" OR Selection strategies in
Buchberger algorithm.
"""
# The critical pair set.
# A critical pair is stored as (i, j, s, t) where (i, j) defines the pair
# (by indexing S), s is the sugar of the pair, and t is the lcm of their
# leading monomials.
P = []
# The eventual standard basis.
S = []
Sugars = []
def Ssugar(i, j):
"""Compute the sugar of the S-poly corresponding to (i, j)."""
LMi = sdm_LM(S[i])
LMj = sdm_LM(S[j])
return max(Sugars[i] - sdm_monomial_deg(LMi),
Sugars[j] - sdm_monomial_deg(LMj)) \
+ sdm_monomial_deg(sdm_monomial_lcm(LMi, LMj))
ourkey = lambda p: (p[2], O(p[3]), p[1])
def update(f, sugar, P):
"""Add f with sugar ``sugar`` to S, update P."""
if not f:
return P
k = len(S)
S.append(f)
Sugars.append(sugar)
LMf = sdm_LM(f)
def removethis(pair):
i, j, s, t = pair
if LMf[0] != t[0]:
return False
tik = sdm_monomial_lcm(LMf, sdm_LM(S[i]))
tjk = sdm_monomial_lcm(LMf, sdm_LM(S[j]))
return tik != t and tjk != t and sdm_monomial_divides(tik, t) and \
sdm_monomial_divides(tjk, t)
# apply the chain criterion
P = [p for p in P if not removethis(p)]
# new-pair set
N = [(i, k, Ssugar(i, k), sdm_monomial_lcm(LMf, sdm_LM(S[i])))
for i in range(k) if LMf[0] == sdm_LM(S[i])[0]]
# TODO apply the product criterion?
N.sort(key=ourkey)
remove = set()
for i, p in enumerate(N):
for j in range(i + 1, len(N)):
if sdm_monomial_divides(p[3], N[j][3]):
remove.add(j)
# TODO mergesort?
P.extend(reversed([p for i, p in enumerate(N) if not i in remove]))
P.sort(key=ourkey, reverse=True)
# NOTE reverse-sort, because we want to pop from the end
return P
# Figure out the number of generators in the ground ring.
try:
# NOTE: we look for the first non-zero vector, take its first monomial
# the number of generators in the ring is one less than the length
# (since the zeroth entry is for the module generators)
numgens = len(next(x[0] for x in G if x)[0]) - 1
except StopIteration:
# No non-zero elements in G ...
if extended:
return [], []
return []
# This list will store expressions of the elements of S in terms of the
# initial generators
coefficients = []
# First add all the elements of G to S
for i, f in enumerate(G):
P = update(f, sdm_deg(f), P)
if extended and f:
coefficients.append(sdm_from_dict({(i,) + (0,)*numgens: K(1)}, O))
# Now carry out the buchberger algorithm.
while P:
i, j, s, t = P.pop()
f, g = S[i], S[j]
if extended:
sp, coeff = sdm_spoly(f, g, O, K,
phantom=(coefficients[i], coefficients[j]))
h, hcoeff = NF(sp, S, O, K, phantom=(coeff, coefficients))
if h:
coefficients.append(hcoeff)
else:
h = NF(sdm_spoly(f, g, O, K), S, O, K)
P = update(h, Ssugar(i, j), P)
# Finally interreduce the standard basis.
# (TODO again, better data structures)
S = set((tuple(f), i) for i, f in enumerate(S))
for (a, ai), (b, bi) in permutations(S, 2):
A = sdm_LM(a)
B = sdm_LM(b)
if sdm_monomial_divides(A, B) and (b, bi) in S and (a, ai) in S:
S.remove((b, bi))
L = sorted(((list(f), i) for f, i in S), key=lambda p: O(sdm_LM(p[0])),
reverse=True)
res = [x[0] for x in L]
if extended:
return res, [coefficients[i] for _, i in L]
return res
|
f4454d95f6769f3ec9e4f9d209d4f35a0a792ff7682063caf6a229c7ec8817be | """ Generic Unification algorithm for expression trees with lists of children
This implementation is a direct translation of
Artificial Intelligence: A Modern Approach by Stuart Russel and Peter Norvig
Second edition, section 9.2, page 276
It is modified in the following ways:
1. We allow associative and commutative Compound expressions. This results in
combinatorial blowup.
2. We explore the tree lazily.
3. We provide generic interfaces to symbolic algebra libraries in Python.
A more traditional version can be found here
http://aima.cs.berkeley.edu/python/logic.html
"""
from __future__ import print_function, division
from sympy.utilities.iterables import kbins
class Compound(object):
""" A little class to represent an interior node in the tree
This is analogous to SymPy.Basic for non-Atoms
"""
def __init__(self, op, args):
self.op = op
self.args = args
def __eq__(self, other):
return (type(self) == type(other) and self.op == other.op and
self.args == other.args)
def __hash__(self):
return hash((type(self), self.op, self.args))
def __str__(self):
return "%s[%s]" % (str(self.op), ', '.join(map(str, self.args)))
class Variable(object):
""" A Wild token """
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return type(self) == type(other) and self.arg == other.arg
def __hash__(self):
return hash((type(self), self.arg))
def __str__(self):
return "Variable(%s)" % str(self.arg)
class CondVariable(object):
""" A wild token that matches conditionally
arg - a wild token
valid - an additional constraining function on a match
"""
def __init__(self, arg, valid):
self.arg = arg
self.valid = valid
def __eq__(self, other):
return (type(self) == type(other) and
self.arg == other.arg and
self.valid == other.valid)
def __hash__(self):
return hash((type(self), self.arg, self.valid))
def __str__(self):
return "CondVariable(%s)" % str(self.arg)
def unify(x, y, s=None, **fns):
""" Unify two expressions
Parameters
==========
x, y - expression trees containing leaves, Compounds and Variables
s - a mapping of variables to subtrees
Returns
=======
lazy sequence of mappings {Variable: subtree}
Examples
========
>>> from sympy.unify.core import unify, Compound, Variable
>>> expr = Compound("Add", ("x", "y"))
>>> pattern = Compound("Add", ("x", Variable("a")))
>>> next(unify(expr, pattern, {}))
{Variable(a): 'y'}
"""
s = s or {}
if x == y:
yield s
elif isinstance(x, (Variable, CondVariable)):
for match in unify_var(x, y, s, **fns):
yield match
elif isinstance(y, (Variable, CondVariable)):
for match in unify_var(y, x, s, **fns):
yield match
elif isinstance(x, Compound) and isinstance(y, Compound):
is_commutative = fns.get('is_commutative', lambda x: False)
is_associative = fns.get('is_associative', lambda x: False)
for sop in unify(x.op, y.op, s, **fns):
if is_associative(x) and is_associative(y):
a, b = (x, y) if len(x.args) < len(y.args) else (y, x)
if is_commutative(x) and is_commutative(y):
combs = allcombinations(a.args, b.args, 'commutative')
else:
combs = allcombinations(a.args, b.args, 'associative')
for aaargs, bbargs in combs:
aa = [unpack(Compound(a.op, arg)) for arg in aaargs]
bb = [unpack(Compound(b.op, arg)) for arg in bbargs]
for match in unify(aa, bb, sop, **fns):
yield match
elif len(x.args) == len(y.args):
for match in unify(x.args, y.args, sop, **fns):
yield match
elif is_args(x) and is_args(y) and len(x) == len(y):
if len(x) == 0:
yield s
else:
for shead in unify(x[0], y[0], s, **fns):
for match in unify(x[1:], y[1:], shead, **fns):
yield match
def unify_var(var, x, s, **fns):
if var in s:
for match in unify(s[var], x, s, **fns):
yield match
elif occur_check(var, x):
pass
elif isinstance(var, CondVariable) and var.valid(x):
yield assoc(s, var, x)
elif isinstance(var, Variable):
yield assoc(s, var, x)
def occur_check(var, x):
""" var occurs in subtree owned by x? """
if var == x:
return True
elif isinstance(x, Compound):
return occur_check(var, x.args)
elif is_args(x):
if any(occur_check(var, xi) for xi in x): return True
return False
def assoc(d, key, val):
""" Return copy of d with key associated to val """
d = d.copy()
d[key] = val
return d
def is_args(x):
""" Is x a traditional iterable? """
return type(x) in (tuple, list, set)
def unpack(x):
if isinstance(x, Compound) and len(x.args) == 1:
return x.args[0]
else:
return x
def allcombinations(A, B, ordered):
"""
Restructure A and B to have the same number of elements
ordered must be either 'commutative' or 'associative'
A and B can be rearranged so that the larger of the two lists is
reorganized into smaller sublists.
Examples
========
>>> from sympy.unify.core import allcombinations
>>> for x in allcombinations((1, 2, 3), (5, 6), 'associative'): print(x)
(((1,), (2, 3)), ((5,), (6,)))
(((1, 2), (3,)), ((5,), (6,)))
>>> for x in allcombinations((1, 2, 3), (5, 6), 'commutative'): print(x)
(((1,), (2, 3)), ((5,), (6,)))
(((1, 2), (3,)), ((5,), (6,)))
(((1,), (3, 2)), ((5,), (6,)))
(((1, 3), (2,)), ((5,), (6,)))
(((2,), (1, 3)), ((5,), (6,)))
(((2, 1), (3,)), ((5,), (6,)))
(((2,), (3, 1)), ((5,), (6,)))
(((2, 3), (1,)), ((5,), (6,)))
(((3,), (1, 2)), ((5,), (6,)))
(((3, 1), (2,)), ((5,), (6,)))
(((3,), (2, 1)), ((5,), (6,)))
(((3, 2), (1,)), ((5,), (6,)))
"""
if ordered == "commutative":
ordered = 11
if ordered == "associative":
ordered = None
sm, bg = (A, B) if len(A) < len(B) else (B, A)
for part in kbins(list(range(len(bg))), len(sm), ordered=ordered):
if bg == B:
yield tuple((a,) for a in A), partition(B, part)
else:
yield partition(A, part), tuple((b,) for b in B)
def partition(it, part):
""" Partition a tuple/list into pieces defined by indices
Examples
========
>>> from sympy.unify.core import partition
>>> partition((10, 20, 30, 40), [[0, 1, 2], [3]])
((10, 20, 30), (40,))
"""
return type(it)([index(it, ind) for ind in part])
def index(it, ind):
""" Fancy indexing into an indexable iterable (tuple, list)
Examples
========
>>> from sympy.unify.core import index
>>> index([10, 20, 30], (1, 2, 0))
[20, 30, 10]
"""
return type(it)([it[i] for i in ind])
|
0f8a1382ff9daaac01a6bcfd22950d9654e19d9211a5b87fdc187b6576c7c889 | """py.test hacks to support XFAIL/XPASS"""
from __future__ import print_function, division
import sys
import functools
import os
import contextlib
import warnings
from sympy.core.compatibility import get_function_name
from sympy.utilities.exceptions import SymPyDeprecationWarning
ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None)
try:
import pytest
USE_PYTEST = getattr(sys, '_running_pytest', False)
except ImportError:
USE_PYTEST = False
if USE_PYTEST:
raises = pytest.raises
warns = pytest.warns
skip = pytest.skip
XFAIL = pytest.mark.xfail
SKIP = pytest.mark.skip
slow = pytest.mark.slow
nocache_fail = pytest.mark.nocache_fail
else:
# Not using pytest so define the things that would have been imported from
# there.
def raises(expectedException, code=None):
"""
Tests that ``code`` raises the exception ``expectedException``.
``code`` may be a callable, such as a lambda expression or function
name.
If ``code`` is not given or None, ``raises`` will return a context
manager for use in ``with`` statements; the code to execute then
comes from the scope of the ``with``.
``raises()`` does nothing if the callable raises the expected exception,
otherwise it raises an AssertionError.
Examples
========
>>> from sympy.testing.pytest import raises
>>> raises(ZeroDivisionError, lambda: 1/0)
>>> raises(ZeroDivisionError, lambda: 1/2)
Traceback (most recent call last):
...
Failed: DID NOT RAISE
>>> with raises(ZeroDivisionError):
... n = 1/0
>>> with raises(ZeroDivisionError):
... n = 1/2
Traceback (most recent call last):
...
Failed: DID NOT RAISE
Note that you cannot test multiple statements via
``with raises``:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise, aborting the ``with``
... n = 9999/0 # never executed
This is just what ``with`` is supposed to do: abort the
contained statement sequence at the first exception and let
the context manager deal with the exception.
To test multiple statements, you'll need a separate ``with``
for each:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise
>>> with raises(ZeroDivisionError):
... n = 9999/0 # will also execute and raise
"""
if code is None:
return RaisesContext(expectedException)
elif callable(code):
try:
code()
except expectedException:
return
raise Failed("DID NOT RAISE")
elif isinstance(code, str):
raise TypeError(
'\'raises(xxx, "code")\' has been phased out; '
'change \'raises(xxx, "expression")\' '
'to \'raises(xxx, lambda: expression)\', '
'\'raises(xxx, "statement")\' '
'to \'with raises(xxx): statement\'')
else:
raise TypeError(
'raises() expects a callable for the 2nd argument.')
class RaisesContext(object):
def __init__(self, expectedException):
self.expectedException = expectedException
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
raise Failed("DID NOT RAISE")
return issubclass(exc_type, self.expectedException)
class XFail(Exception):
pass
class XPass(Exception):
pass
class Skipped(Exception):
pass
class Failed(Exception):
pass
def XFAIL(func):
def wrapper():
try:
func()
except Exception as e:
message = str(e)
if message != "Timeout":
raise XFail(get_function_name(func))
else:
raise Skipped("Timeout")
raise XPass(get_function_name(func))
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def skip(str):
raise Skipped(str)
def SKIP(reason):
"""Similar to ``skip()``, but this is a decorator. """
def wrapper(func):
def func_wrapper():
raise Skipped(reason)
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
return wrapper
def slow(func):
func._slow = True
def func_wrapper():
func()
func_wrapper = functools.update_wrapper(func_wrapper, func)
func_wrapper.__wrapped__ = func
return func_wrapper
def nocache_fail(func):
"Dummy decorator for marking tests that fail when cache is disabled"
return func
@contextlib.contextmanager
def warns(warningcls, **kwargs):
'''Like raises but tests that warnings are emitted.
>>> from sympy.testing.pytest import warns
>>> import warnings
>>> with warns(UserWarning):
... warnings.warn('deprecated', UserWarning)
>>> with warns(UserWarning):
... pass
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type UserWarning\
was emitted. The list of emitted warnings is: [].
'''
match = kwargs.pop('match', '')
if kwargs:
raise TypeError('Invalid keyword arguments: %s' % kwargs)
# Absorbs all warnings in warnrec
with warnings.catch_warnings(record=True) as warnrec:
# Hide all warnings but make sure that our warning is emitted
warnings.simplefilter("ignore")
warnings.filterwarnings("always", match, warningcls)
# Now run the test
yield
# Raise if expected warning not found
if not any(issubclass(w.category, warningcls) for w in warnrec):
msg = ('Failed: DID NOT WARN.'
' No warnings of type %s was emitted.'
' The list of emitted warnings is: %s.'
) % (warningcls, [w.message for w in warnrec])
raise Failed(msg)
@contextlib.contextmanager
def warns_deprecated_sympy():
'''Shorthand for ``warns(SymPyDeprecationWarning)``
This is the recommended way to test that ``SymPyDeprecationWarning`` is
emitted for deprecated features in SymPy. To test for other warnings use
``warns``. To suppress warnings without asserting that they are emitted
use ``ignore_warnings``.
>>> from sympy.testing.pytest import warns_deprecated_sympy
>>> from sympy.utilities.exceptions import SymPyDeprecationWarning
>>> import warnings
>>> with warns_deprecated_sympy():
... SymPyDeprecationWarning("Don't use", feature="old thing",
... deprecated_since_version="1.0", issue=123).warn()
>>> with warns_deprecated_sympy():
... pass
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type \
SymPyDeprecationWarning was emitted. The list of emitted warnings is: [].
'''
with warns(SymPyDeprecationWarning):
yield
@contextlib.contextmanager
def ignore_warnings(warningcls):
'''Context manager to suppress warnings during tests.
This function is useful for suppressing warnings during tests. The warns
function should be used to assert that a warning is raised. The
ignore_warnings function is useful in situation when the warning is not
guaranteed to be raised (e.g. on importing a module) or if the warning
comes from third-party code.
When the warning is coming (reliably) from SymPy the warns function should
be preferred to ignore_warnings.
>>> from sympy.testing.pytest import ignore_warnings
>>> import warnings
Here's a warning:
>>> with warnings.catch_warnings(): # reset warnings in doctest
... warnings.simplefilter('error')
... warnings.warn('deprecated', UserWarning)
Traceback (most recent call last):
...
UserWarning: deprecated
Let's suppress it with ignore_warnings:
>>> with warnings.catch_warnings(): # reset warnings in doctest
... warnings.simplefilter('error')
... with ignore_warnings(UserWarning):
... warnings.warn('deprecated', UserWarning)
(No warning emitted)
'''
# Absorbs all warnings in warnrec
with warnings.catch_warnings(record=True) as warnrec:
# Make sure our warning doesn't get filtered
warnings.simplefilter("always", warningcls)
# Now run the test
yield
# Reissue any warnings that we aren't testing for
for w in warnrec:
if not issubclass(w.category, warningcls):
warnings.warn_explicit(w.message, w.category, w.filename, w.lineno)
|
62174cf3b29fb1c8bcdf8d0798aad7566038cb4cff55e50de5041bc003af11b8 | """This module contains code for running the tests in SymPy.
"""
from .runtests import test, doctest
__all__ = [
'test', 'doctest',
]
|
d0ad15c75ae624414877aaffb37162454d7bf186ff03abede8d78bd07733bc98 | """benchmarking through py.test"""
from __future__ import print_function, division
import py
from py.__.test.item import Item
from py.__.test.terminal.terminal import TerminalSession
from math import ceil as _ceil, floor as _floor, log10
import timeit
from inspect import getsource
from sympy.core.compatibility import exec_
# from IPython.Magic.magic_timeit
units = ["s", "ms", "us", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
unitn = dict((s, i) for i, s in enumerate(units))
precision = 3
# like py.test Directory but scan for 'bench_<smth>.py'
class Directory(py.test.collect.Directory):
def filefilter(self, path):
b = path.purebasename
ext = path.ext
return b.startswith('bench_') and ext == '.py'
# like py.test Module but scane for 'bench_<smth>' and 'timeit_<smth>'
class Module(py.test.collect.Module):
def funcnamefilter(self, name):
return name.startswith('bench_') or name.startswith('timeit_')
# Function level benchmarking driver
class Timer(timeit.Timer):
def __init__(self, stmt, setup='pass', timer=timeit.default_timer, globals=globals()):
# copy of timeit.Timer.__init__
# similarity index 95%
self.timer = timer
stmt = timeit.reindent(stmt, 8)
setup = timeit.reindent(setup, 4)
src = timeit.template % {'stmt': stmt, 'setup': setup}
self.src = src # Save for traceback display
code = compile(src, timeit.dummy_src_name, "exec")
ns = {}
#exec code in globals(), ns -- original timeit code
exec_(code, globals, ns) # -- we use caller-provided globals instead
self.inner = ns["inner"]
class Function(py.__.test.item.Function):
def __init__(self, *args, **kw):
super(Function, self).__init__(*args, **kw)
self.benchtime = None
self.benchtitle = None
def execute(self, target, *args):
# get func source without first 'def func(...):' line
src = getsource(target)
src = '\n'.join( src.splitlines()[1:] )
# extract benchmark title
if target.func_doc is not None:
self.benchtitle = target.func_doc
else:
self.benchtitle = src.splitlines()[0].strip()
# XXX we ignore args
timer = Timer(src, globals=target.func_globals)
if self.name.startswith('timeit_'):
# from IPython.Magic.magic_timeit
repeat = 3
number = 1
for i in range(1, 10):
t = timer.timeit(number)
if t >= 0.2:
number *= (0.2 / t)
number = int(_ceil(number))
break
if t <= 0.02:
# we are not close enough to that 0.2s
number *= 10
else:
# since we are very close to be > 0.2s we'd better adjust number
# so that timing time is not too high
number *= (0.2 / t)
number = int(_ceil(number))
break
self.benchtime = min(timer.repeat(repeat, number)) / number
# 'bench_<smth>'
else:
self.benchtime = timer.timeit(1)
class BenchSession(TerminalSession):
def header(self, colitems):
super(BenchSession, self).header(colitems)
def footer(self, colitems):
super(BenchSession, self).footer(colitems)
self.out.write('\n')
self.print_bench_results()
def print_bench_results(self):
self.out.write('==============================\n')
self.out.write(' *** BENCHMARKING RESULTS *** \n')
self.out.write('==============================\n')
self.out.write('\n')
# benchname, time, benchtitle
results = []
for item, outcome in self._memo:
if isinstance(item, Item):
best = item.benchtime
if best is None:
# skipped or failed benchmarks
tstr = '---'
else:
# from IPython.Magic.magic_timeit
if best > 0.0:
order = min(-int(_floor(log10(best)) // 3), 3)
else:
order = 3
tstr = "%.*g %s" % (
precision, best * scaling[order], units[order])
results.append( [item.name, tstr, item.benchtitle] )
# dot/unit align second column
# FIXME simpler? this is crappy -- shame on me...
wm = [0]*len(units)
we = [0]*len(units)
for s in results:
tstr = s[1]
n, u = tstr.split()
# unit n
un = unitn[u]
try:
m, e = n.split('.')
except ValueError:
m, e = n, ''
wm[un] = max(len(m), wm[un])
we[un] = max(len(e), we[un])
for s in results:
tstr = s[1]
n, u = tstr.split()
un = unitn[u]
try:
m, e = n.split('.')
except ValueError:
m, e = n, ''
m = m.rjust(wm[un])
e = e.ljust(we[un])
if e.strip():
n = '.'.join((m, e))
else:
n = ' '.join((m, e))
# let's put the number into the right place
txt = ''
for i in range(len(units)):
if i == un:
txt += n
else:
txt += ' '*(wm[i] + we[i] + 1)
s[1] = '%s %s' % (txt, u)
# align all columns besides the last one
for i in range(2):
w = max(len(s[i]) for s in results)
for s in results:
s[i] = s[i].ljust(w)
# show results
for s in results:
self.out.write('%s | %s | %s\n' % tuple(s))
def main(args=None):
# hook our Directory/Module/Function as defaults
from py.__.test import defaultconftest
defaultconftest.Directory = Directory
defaultconftest.Module = Module
defaultconftest.Function = Function
# hook BenchSession as py.test session
config = py.test.config
config._getsessionclass = lambda: BenchSession
py.test.cmdline.main(args)
|
c8a6b48fbb99200698d6ef96a135b8c04b30b45d970efd0ec02ad71834e13310 | import re
import fnmatch
# XXX Python 2 unicode import test.
# May remove after deprecating python 2.7.
message_unicode_A = \
"File contains a unicode character : %s, line %s. " \
"But with no encoding header. " \
"See https://www.python.org/dev/peps/pep-0263/ " \
"and add '# coding=utf-8'"
message_unicode_B = \
"File contains a unicode character : %s, line %s. " \
"But not in the whitelist. " \
"Add the file to the whitelist in " + __file__
message_unicode_C = \
"File contains a unicode character : %s, line %s. " \
"And is in the whitelist, but without the encoding header. " \
"See https://www.python.org/dev/peps/pep-0263/ " \
"and add '# coding=utf-8'."
message_unicode_D = \
"File does not contain a unicode character : %s." \
"but is in the whitelist. " \
"Remove the file from the whitelist in " + __file__
message_unicode_E = \
"File does not contain a unicode character : %s." \
"but contains the header '# coding=utf-8' or equivalent." \
"Remove the header."
encoding_header_re = re.compile(
r'^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)')
# Whitelist pattern for files which can have unicode.
unicode_whitelist = [
# Author names can include non-ASCII characters
r'*/bin/authors_update.py',
# These files have functions and test functions for unicode input and
# output.
r'*/sympy/testing/tests/test_code_quality.py',
r'*/sympy/physics/vector/tests/test_printing.py',
r'*/physics/quantum/tests/test_printing.py',
r'*/sympy/vector/tests/test_printing.py',
r'*/sympy/parsing/tests/test_sympy_parser.py',
r'*/sympy/printing/pretty/tests/test_pretty.py',
r'*/sympy/printing/tests/test_preview.py',
r'*/liealgebras/type_g.py',
r'*/liealgebras/weyl_group.py',
r'*/liealgebras/tests/test_type_G.py',
# wigner.py and polarization.py have unicode doctests. These probably
# don't need to be there but some of the examples that are there are
# pretty ugly without use_unicode (matrices need to be wrapped across
# multiple lines etc)
r'*/sympy/physics/wigner.py',
r'*/sympy/physics/optics/polarization.py',
]
unicode_strict_whitelist = [
r'*/sympy/parsing/latex/_antlr/__init__.py',
]
def test_this_file_encoding(
fname, test_file,
unicode_whitelist=unicode_whitelist,
unicode_strict_whitelist=unicode_strict_whitelist):
"""Test helper function for python 2 importability test
This test checks whether the file has
# coding=utf-8
or
# -*- coding: utf-8 -*-
line if there is a unicode character in the code
The test may have to operate on filewise manner, so it had moved
to a separate process.
May remove after deprecating python 2.7.
"""
has_coding_utf8 = False
has_unicode = False
is_in_whitelist = False
is_in_strict_whitelist = False
for patt in unicode_whitelist:
if fnmatch.fnmatch(fname, patt):
is_in_whitelist = True
break
for patt in unicode_strict_whitelist:
if fnmatch.fnmatch(fname, patt):
is_in_strict_whitelist = True
is_in_whitelist = True
break
if is_in_whitelist:
for idx, line in enumerate(test_file):
if idx in (0, 1):
match = encoding_header_re.match(line)
if match and match.group(1).lower() == 'utf-8':
has_coding_utf8 = True
try:
line.encode(encoding='ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
has_unicode = True
if has_coding_utf8 is False:
assert False, \
message_unicode_C % (fname, idx + 1)
if not has_unicode and not is_in_strict_whitelist:
assert False, message_unicode_D % fname
else:
for idx, line in enumerate(test_file):
if idx in (0, 1):
match = encoding_header_re.match(line)
if match and match.group(1).lower() == 'utf-8':
has_coding_utf8 = True
try:
line.encode(encoding='ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
has_unicode = True
if has_coding_utf8:
assert False, \
message_unicode_B % (fname, idx + 1)
else:
assert False, \
message_unicode_A % (fname, idx + 1)
if not has_unicode and has_coding_utf8:
assert False, \
message_unicode_E % fname
|
a8a311e8300fa4d626ee555330d7b150b0a9ce92dfe24ec6efe5086719988a69 | """
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
import time
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
import tempfile
from sympy.core.cache import clear_cache
from sympy.core.compatibility import (exec_, PY3, unwrap,
unicode)
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None)
# emperically generated list of the proportion of time spent running
# an even split of tests. This should periodically be regenerated.
# A list of [.6, .1, .3] would mean that if the tests are evenly split
# into '1/3', '2/3', '3/3', the first split would take 60% of the time,
# the second 10% and the third 30%. These lists are normalized to sum
# to 1, so [60, 10, 30] has the same behavior as [6, 1, 3] or [.6, .1, .3].
#
# This list can be generated with the code:
# from time import time
# import sympy
# import os
# os.environ["TRAVIS_BUILD_NUMBER"] = '2' # Mock travis to get more correct densities
# delays, num_splits = [], 30
# for i in range(1, num_splits + 1):
# tic = time()
# sympy.test(split='{}/{}'.format(i, num_splits), time_balance=False) # Add slow=True for slow tests
# delays.append(time() - tic)
# tot = sum(delays)
# print([round(x / tot, 4) for x in delays])
SPLIT_DENSITY = [0.0185, 0.0047, 0.0155, 0.02, 0.0311, 0.0098, 0.0045, 0.0102, 0.0127, 0.0532, 0.0171, 0.097, 0.0906, 0.0007, 0.0086, 0.0013, 0.0143, 0.0068, 0.0252, 0.0128, 0.0043, 0.0043, 0.0118, 0.016, 0.0073, 0.0476, 0.0042, 0.0102, 0.012, 0.002, 0.0019, 0.0409, 0.054, 0.0237, 0.1236, 0.0973, 0.0032, 0.0047, 0.0081, 0.0685]
SPLIT_DENSITY_SLOW = [0.0086, 0.0004, 0.0568, 0.0003, 0.0032, 0.0005, 0.0004, 0.0013, 0.0016, 0.0648, 0.0198, 0.1285, 0.098, 0.0005, 0.0064, 0.0003, 0.0004, 0.0026, 0.0007, 0.0051, 0.0089, 0.0024, 0.0033, 0.0057, 0.0005, 0.0003, 0.001, 0.0045, 0.0091, 0.0006, 0.0005, 0.0321, 0.0059, 0.1105, 0.216, 0.1489, 0.0004, 0.0003, 0.0006, 0.0483]
class Skipped(Exception):
pass
class TimeOutError(Exception):
pass
class DependencyError(Exception):
pass
# add more flags ??
future_flags = division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent # type: ignore
# override reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure # type: ignore
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(os.path.normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return os.path.normcase(sympy_dir)
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
import sympy.interactive.printing as interactive_printing
# force pprint to be in ascii mode in doctests
use_unicode_prev = pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
# Prevent init_printing() in doctests from affecting other doctests
interactive_printing.NO_GLOBAL = True
return use_unicode_prev
def run_in_subprocess_with_hash_randomization(
function, function_args=(),
function_kwargs=None, command=sys.executable,
module='sympy.testing.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.testing.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.testing.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
cwd = get_sympy_dir()
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't recognize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
function_kwargs = function_kwargs or {}
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring], cwd=cwd)
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs=None,
doctest_args=(), doctest_kwargs=None,
examples_args=(), examples_kwargs=None):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.testing.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
cwd = get_sympy_dir()
tests_successful = True
test_kwargs = test_kwargs or {}
doctest_kwargs = doctest_kwargs or {}
examples_kwargs = examples_kwargs or {'quiet': True}
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples") # examples/all.py
from all import run_examples # type: ignore
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if sys.platform != "win32" and not PY3 and os.path.exists("bin/test"):
# run Sage tests; Sage currently doesn't support Windows or Python 3
# Only run Sage tests if 'bin/test' is present (it is missing from
# our release because everything in the 'bin' directory gets
# installed).
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py",
shell=True, cwd=cwd) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
The ``time_balance`` option can be passed in conjunction with ``split``.
If ``time_balance=True`` (the default for ``sympy.test``), sympy will attempt
to split the tests such that each split takes equal time. This heuristic
for balancing is based on pre-recorded test data.
>>> sympy.test(split='1/2', time_balance=True) # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
fail_on_timeout = kwargs.get("fail_on_timeout", False)
if ON_TRAVIS and timeout is False:
# Travis times out if no activity is seen for 10 minutes.
timeout = 595
fail_on_timeout = True
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
time_balance = kwargs.get('time_balance', True)
blacklist = kwargs.get('blacklist', ['sympy/integrals/rubi/rubi_tests/tests'])
if ON_TRAVIS:
# pyglet does not work on Travis
blacklist.extend(['sympy/plotting/pygletplot/tests'])
blacklist = convert_to_native_paths(blacklist)
fast_threshold = kwargs.get('fast_threshold', None)
slow_threshold = kwargs.get('slow_threshold', None)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed,
fast_threshold=fast_threshold,
slow_threshold=slow_threshold)
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*')
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
density = None
if time_balance:
if slow:
density = SPLIT_DENSITY_SLOW
else:
density = SPLIT_DENSITY
if split:
matched = split_list(matched, split, density=density)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout, slow=slow,
enhance_asserts=enhance_asserts, fail_on_timeout=fail_on_timeout))
def doctest(*paths, **kwargs):
r"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _get_doctest_blacklist():
'''Get the default blacklist for the doctests'''
blacklist = []
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"doc/src/modules/physics/mechanics/autolev_parser.rst",
"sympy/galgebra.py", # no longer part of SymPy
"sympy/this.py", # prints text
"sympy/physics/gaussopt.py", # raises deprecation warning
"sympy/matrices/densearith.py", # raises deprecation warning
"sympy/matrices/densesolve.py", # raises deprecation warning
"sympy/matrices/densetools.py", # raises deprecation warning
"sympy/parsing/autolev/_antlr/autolevlexer.py", # generated code
"sympy/parsing/autolev/_antlr/autolevparser.py", # generated code
"sympy/parsing/autolev/_antlr/autolevlistener.py", # generated code
"sympy/parsing/latex/_antlr/latexlexer.py", # generated code
"sympy/parsing/latex/_antlr/latexparser.py", # generated code
"sympy/integrals/rubi/rubi.py",
"sympy/plotting/pygletplot/__init__.py", # crashes on some systems
"sympy/plotting/pygletplot/plot.py", # crashes on some systems
])
# autolev parser tests
num = 12
for i in range (1, num+1):
blacklist.append("sympy/parsing/autolev/test-examples/ruletest" + str(i) + ".py")
blacklist.extend(["sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.py",
"sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py",
"sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.py",
"sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.py"])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# Use a non-windowed backend, so that the tests work on Travis
import matplotlib
matplotlib.use('Agg')
if ON_TRAVIS or import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend([
"sympy/printing/theanocode.py",
"doc/src/modules/numeric-computation.rst",
])
if import_module('antlr4') is None:
blacklist.extend([
"sympy/parsing/autolev/__init__.py",
"sympy/parsing/latex/_parse_latex_antlr.py",
])
if import_module('lfortran') is None:
#throws ImportError when lfortran not installed
blacklist.extend([
"sympy/parsing/sym_expr.py",
])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py", # Python 2.7 issues
"sympy/testing/benchmarking.py",
])
# These are deprecated stubs to be removed:
blacklist.extend([
"sympy/utilities/benchmarking.py",
"sympy/utilities/tmpfiles.py",
"sympy/utilities/pytest.py",
"sympy/utilities/runtests.py",
"sympy/utilities/quality_unicode.py",
"sympy/utilities/randtest.py",
])
blacklist = convert_to_native_paths(blacklist)
return blacklist
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
from sympy import pprint_use_unicode
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend(_get_doctest_blacklist())
# Use a non-windowed backend, so that the tests work on Travis
if import_module('matplotlib') is not None:
import matplotlib
matplotlib.use('Agg')
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Disable showing up of plots
from sympy.plotting.plot import unset_show
unset_show()
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*')
r = PyTestReporter(verbose, split=split, colors=colors,\
force_colors=force_colors)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
use_unicode_prev = setup_pprint()
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
# The NO_GLOBAL flag overrides the no_global flag to init_printing
# if True
import sympy.interactive.printing as interactive_printing
interactive_printing.NO_GLOBAL = False
pprint_use_unicode(use_unicode_prev)
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split, density=None):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
`density` may be specified as a list. If specified,
tests will be balanced so that each split has as equal-as-possible
amount of mass according to `density`.
>>> from sympy.testing.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
if not density:
return l[(i - 1)*len(l)//t : i*len(l)//t]
# normalize density
tot = sum(density)
density = [x / tot for x in density]
def density_inv(x):
"""Interpolate the inverse to the cumulative
distribution function given by density"""
if x <= 0:
return 0
if x >= sum(density):
return 1
# find the first time the cumulative sum surpasses x
# and linearly interpolate
cumm = 0
for i, d in enumerate(density):
cumm += d
if cumm >= x:
break
frac = (d - (cumm - x)) / d
return (i + frac) / len(density)
lower_frac = density_inv((i - 1) / t)
higher_frac = density_inv(i / t)
return l[int(lower_frac*len(l)) : int(higher_frac*len(l))]
from collections import namedtuple
SymPyTestResults = namedtuple('SymPyTestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None, fast_threshold=None, slow_threshold=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
# Defaults in seconds, from human / UX design limits
# http://www.nngroup.com/articles/response-times-3-important-limits/
#
# These defaults are *NOT* set in stone as we are measuring different
# things, so others feel free to come up with a better yardstick :)
if fast_threshold:
self._fast_threshold = float(fast_threshold)
else:
self._fast_threshold = 8
if slow_threshold:
self._slow_threshold = float(slow_threshold)
else:
self._slow_threshold = 10
def test(self, sort=False, timeout=False, slow=False,
enhance_asserts=False, fail_on_timeout=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
elif slow:
pass
else:
random.seed(self._seed)
random.shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow,
enhance_asserts, fail_on_timeout)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False,
enhance_asserts=False, fail_on_timeout=False):
reporter = self._reporter
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec", flags=0, dont_inherit=True)
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
reporter.import_error(filename, sys.exc_info())
return
except Exception:
reporter.test_exception(sys.exc_info())
clear_cache()
self._count += 1
random.seed(self._seed)
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# We have to be careful about decorated functions. As long as
# the decorator uses functools.wraps, we can detect it.
funcs = []
for f in gl:
if (f.startswith("test_") and (inspect.isfunction(gl[f])
or inspect.ismethod(gl[f]))):
func = gl[f]
# Handle multiple decorators
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if inspect.getsourcefile(func) == filename:
funcs.append(gl[f])
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if inspect.isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
reporter.entering_filename(filename, len(funcs))
raise
reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
start = time.time()
reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout, fail_on_timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
reporter.test_skip(v)
elif t.__name__ == "XFail":
reporter.test_xfail()
elif t.__name__ == "XPass":
reporter.test_xpass(v)
else:
reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
reporter.test_pass()
taken = time.time() - start
if taken > self._slow_threshold:
reporter.slow_test_functions.append((f.__name__, taken))
if getattr(f, '_slow', False) and slow:
if taken < self._fast_threshold:
reporter.fast_test_functions.append((f.__name__, taken))
reporter.leaving_filename()
def _timeout(self, function, timeout, fail_on_timeout):
def callback(x, y):
signal.alarm(0)
if fail_on_timeout:
raise TimeOutError("Timed out after %d seconds" % timeout)
else:
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([os.path.normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
import sympy.interactive.printing as interactive_printing
from sympy import pprint_use_unicode
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
if self._reporter._verbose:
self._reporter.write("\n{} ".format(test.name))
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
try:
self._check_dependencies(**test.globs['_doctest_depends_on'])
except DependencyError as e:
self._reporter.test_skip(v=str(e))
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
old_displayhook = sys.displayhook
use_unicode_prev = setup_pprint()
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
sys.displayhook = old_displayhook
interactive_printing.NO_GLOBAL = False
pprint_use_unicode(use_unicode_prev)
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
r"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [os.path.normcase(gi) for gi in g]
def _check_dependencies(self,
executables=(),
modules=(),
disable_viewers=(),
python_version=(2,)):
"""
Checks if the dependencies for the test are installed.
Raises ``DependencyError`` it at least one dependency is not installed.
"""
for executable in executables:
if not find_executable(executable):
raise DependencyError("Could not find %s" % executable)
for module in modules:
if module == 'matplotlib':
matplotlib = import_module(
'matplotlib',
import_kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is None:
raise DependencyError("Could not import matplotlib")
else:
if not import_module(module):
raise DependencyError("Could not import %s" % module)
if disable_viewers:
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
vw = ('#!/usr/bin/env {}\n'
'import sys\n'
'if len(sys.argv) <= 1:\n'
' exit("wrong number of args")\n').format(
'python3' if PY3 else 'python')
for viewer in disable_viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if python_version:
if sys.version_info < python_version:
raise DependencyError("Requires Python >= " + '.'.join(map(str, python_version)))
if 'pyglet' in modules:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit = True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version to look harder for code that
appears comes from a different module. For example, the @vectorize
decorator makes it look like functions come from multidimensional.py
even though their code exists elsewhere.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, str):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, str)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(unwrap(val)) or
inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, str):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall(r"line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, str):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
obj = unwrap(obj)
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
globs['_doctest_depends_on'] = getattr(obj, '_doctest_depends_on', {})
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
monkeypatched_methods = [
'patched_linecache_getlines',
'run',
'record_outcome'
]
for method in monkeypatched_methods:
oldname = '_DocTestRunner__' + method
newname = '_SymPyDocTestRunner__' + method
setattr(SymPyDocTestRunner, newname, getattr(DocTestRunner, oldname))
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occurring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub(r'(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
self._active_file = ''
self._active_f = None
# TODO: Should these be protected?
self.slow_test_functions = []
self.fast_test_functions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
# remove the first item, as that is always runtests.py
tb = tb.tb_next
t = traceback.format_exception(e, val, tb)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from sympy.utilities.misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
numpy = import_module('numpy')
self.write("numpy: %s\n" % (None if not numpy else numpy.__version__))
if seed is not None:
self.write("random seed: %d\n" % seed)
from sympy.utilities.misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if self.slow_test_functions:
self.write_center('slowest tests', '_')
sorted_slow = sorted(self.slow_test_functions, key=lambda r: r[1])
for slow_func_name, taken in sorted_slow:
print('%s - Took %.3f seconds' % (slow_func_name, taken))
if self.fast_test_functions:
self.write_center('unexpectedly fast tests', '_')
sorted_fast = sorted(self.fast_test_functions,
key=lambda r: r[1])
for fast_func_name, taken in sorted_fast:
print('%s - Took %.3f seconds' % (fast_func_name, taken))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
if self._verbose:
if v is not None:
self.write(message + ' ', "Blue")
else:
self.write(" - ", "Blue")
self.write(char, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
if exc_info[0] is TimeOutError:
self.write("T", "Red")
else:
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
|
dfb9a8f205625b804a7832f539809737202d1543a7cac2b9391bf8399a9aaa79 | """ Helpers for randomized testing """
from __future__ import print_function, division
from random import uniform, Random, randrange, randint
from sympy.core.compatibility import is_sequence, as_int
from sympy.core.containers import Tuple
from sympy.core.numbers import comp, I
from sympy.core.symbol import Symbol
from sympy.simplify.simplify import nsimplify
def random_complex_number(a=2, b=-1, c=3, d=1, rational=False, tolerance=None):
"""
Return a random complex number.
To reduce chance of hitting branch cuts or anything, we guarantee
b <= Im z <= d, a <= Re z <= c
When rational is True, a rational approximation to a random number
is obtained within specified tolerance, if any.
"""
A, B = uniform(a, c), uniform(b, d)
if not rational:
return A + I*B
return (nsimplify(A, rational=True, tolerance=tolerance) +
I*nsimplify(B, rational=True, tolerance=tolerance))
def verify_numerically(f, g, z=None, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that f and g agree when evaluated in the argument z.
If z is None, all symbols will be tested. This routine does not test
whether there are Floats present with precision higher than 15 digits
so if there are, your results may not be what you expect due to round-
off errors.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x
>>> from sympy.testing.randtest import verify_numerically as tn
>>> tn(sin(x)**2 + cos(x)**2, 1, x)
True
"""
f, g, z = Tuple(f, g, z)
z = [z] if isinstance(z, Symbol) else (f.free_symbols | g.free_symbols)
reps = list(zip(z, [random_complex_number(a, b, c, d) for _ in z]))
z1 = f.subs(reps).n()
z2 = g.subs(reps).n()
return comp(z1, z2, tol)
def test_derivative_numerically(f, z, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that the symbolically computed derivative of f
with respect to z is correct.
This routine does not test whether there are Floats present with
precision higher than 15 digits so if there are, your results may
not be what you expect due to round-off errors.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x
>>> from sympy.testing.randtest import test_derivative_numerically as td
>>> td(sin(x), x)
True
"""
from sympy.core.function import Derivative
z0 = random_complex_number(a, b, c, d)
f1 = f.diff(z).subs(z, z0)
f2 = Derivative(f, z).doit_numerically(z0)
return comp(f1.n(), f2.n(), tol)
def _randrange(seed=None):
"""Return a randrange generator. ``seed`` can be
o None - return randomly seeded generator
o int - return a generator seeded with the int
o list - the values to be returned will be taken from the list
in the order given; the provided list is not modified.
Examples
========
>>> from sympy.testing.randtest import _randrange
>>> rr = _randrange()
>>> rr(1000) # doctest: +SKIP
999
>>> rr = _randrange(3)
>>> rr(1000) # doctest: +SKIP
238
>>> rr = _randrange([0, 5, 1, 3, 4])
>>> rr(3), rr(3)
(0, 1)
"""
if seed is None:
return randrange
elif isinstance(seed, int):
return Random(seed).randrange
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
def give(a, b=None, seq=seed):
if b is None:
a, b = 0, a
a, b = as_int(a), as_int(b)
w = b - a
if w < 1:
raise ValueError('_randrange got empty range')
try:
x = seq.pop()
except IndexError:
raise ValueError('_randrange sequence was too short')
if a <= x < b:
return x
else:
return give(a, b, seq)
return give
else:
raise ValueError('_randrange got an unexpected seed')
def _randint(seed=None):
"""Return a randint generator. ``seed`` can be
o None - return randomly seeded generator
o int - return a generator seeded with the int
o list - the values to be returned will be taken from the list
in the order given; the provided list is not modified.
Examples
========
>>> from sympy.testing.randtest import _randint
>>> ri = _randint()
>>> ri(1, 1000) # doctest: +SKIP
999
>>> ri = _randint(3)
>>> ri(1, 1000) # doctest: +SKIP
238
>>> ri = _randint([0, 5, 1, 2, 4])
>>> ri(1, 3), ri(1, 3)
(1, 2)
"""
if seed is None:
return randint
elif isinstance(seed, int):
return Random(seed).randint
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
def give(a, b, seq=seed):
a, b = as_int(a), as_int(b)
w = b - a
if w < 0:
raise ValueError('_randint got empty range')
try:
x = seq.pop()
except IndexError:
raise ValueError('_randint sequence was too short')
if a <= x <= b:
return x
else:
return give(a, b, seq)
return give
else:
raise ValueError('_randint got an unexpected seed')
|
2984af4e928e137bf14b09e1e3d7f5811199c50e9089ce0f5f9bfb5aab19c7da | from sympy.core.basic import Basic
from sympy import (sympify, eye, sin, cos, rot_axis1, rot_axis2,
rot_axis3, ImmutableMatrix as Matrix, Symbol)
from sympy.core.cache import cacheit
import sympy.vector
class Orienter(Basic):
"""
Super-class for all orienter classes.
"""
def rotation_matrix(self):
"""
The rotation matrix corresponding to this orienter
instance.
"""
return self._parent_orient
class AxisOrienter(Orienter):
"""
Class to denote an axis orienter.
"""
def __new__(cls, angle, axis):
if not isinstance(axis, sympy.vector.Vector):
raise TypeError("axis should be a Vector")
angle = sympify(angle)
obj = super(AxisOrienter, cls).__new__(cls, angle,
axis)
obj._angle = angle
obj._axis = axis
return obj
def __init__(self, angle, axis):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a SymPy expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSys3D('N')
>>> from sympy.vector import AxisOrienter
>>> orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> B = N.orient_new('B', (orienter, ))
"""
# Dummy initializer for docstrings
pass
@cacheit
def rotation_matrix(self, system):
"""
The rotation matrix corresponding to this orienter
instance.
Parameters
==========
system : CoordSys3D
The coordinate system wrt which the rotation matrix
is to be computed
"""
axis = sympy.vector.express(self.axis, system).normalize()
axis = axis.to_matrix(system)
theta = self.angle
parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +
Matrix([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]) * sin(theta) +
axis * axis.T)
parent_orient = parent_orient.T
return parent_orient
@property
def angle(self):
return self._angle
@property
def axis(self):
return self._axis
class ThreeAngleOrienter(Orienter):
"""
Super-class for Body and Space orienters.
"""
def __new__(cls, angle1, angle2, angle3, rot_order):
if isinstance(rot_order, Symbol):
rot_order = rot_order.name
approved_orders = ('123', '231', '312', '132', '213',
'321', '121', '131', '212', '232',
'313', '323', '')
original_rot_order = rot_order
rot_order = str(rot_order).upper()
if not (len(rot_order) == 3):
raise TypeError('rot_order should be a str of length 3')
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if rot_order not in approved_orders:
raise TypeError('Invalid rot_type parameter')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
angle1 = sympify(angle1)
angle2 = sympify(angle2)
angle3 = sympify(angle3)
if cls._in_order:
parent_orient = (_rot(a1, angle1) *
_rot(a2, angle2) *
_rot(a3, angle3))
else:
parent_orient = (_rot(a3, angle3) *
_rot(a2, angle2) *
_rot(a1, angle1))
parent_orient = parent_orient.T
obj = super(ThreeAngleOrienter, cls).__new__(
cls, angle1, angle2, angle3, Symbol(rot_order))
obj._angle1 = angle1
obj._angle2 = angle2
obj._angle3 = angle3
obj._rot_order = original_rot_order
obj._parent_orient = parent_orient
return obj
@property
def angle1(self):
return self._angle1
@property
def angle2(self):
return self._angle2
@property
def angle3(self):
return self._angle3
@property
def rot_order(self):
return self._rot_order
class BodyOrienter(ThreeAngleOrienter):
"""
Class to denote a body-orienter.
"""
_in_order = True
def __new__(cls, angle1, angle2, angle3, rot_order):
obj = ThreeAngleOrienter.__new__(cls, angle1, angle2, angle3,
rot_order)
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Body orientation takes this coordinate system through three
successive simple rotations.
Body fixed rotations include both Euler Angles and
Tait-Bryan Angles, see https://en.wikipedia.org/wiki/Euler_angles.
Parameters
==========
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
Examples
========
>>> from sympy.vector import CoordSys3D, BodyOrienter
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
A 'Body' fixed rotation is described by three angles and
three body-fixed rotation axes. To orient a coordinate system D
with respect to N, each sequential rotation is always about
the orthogonal unit vectors fixed to D. For example, a '123'
rotation will specify rotations about N.i, then D.j, then
D.k. (Initially, D.i is same as N.i)
Therefore,
>>> body_orienter = BodyOrienter(q1, q2, q3, '123')
>>> D = N.orient_new('D', (body_orienter, ))
is same as
>>> from sympy.vector import AxisOrienter
>>> axis_orienter1 = AxisOrienter(q1, N.i)
>>> D = N.orient_new('D', (axis_orienter1, ))
>>> axis_orienter2 = AxisOrienter(q2, D.j)
>>> D = D.orient_new('D', (axis_orienter2, ))
>>> axis_orienter3 = AxisOrienter(q3, D.k)
>>> D = D.orient_new('D', (axis_orienter3, ))
Acceptable rotation orders are of length 3, expressed in XYZ or
123, and cannot have a rotation about about an axis twice in a row.
>>> body_orienter1 = BodyOrienter(q1, q2, q3, '123')
>>> body_orienter2 = BodyOrienter(q1, q2, 0, 'ZXZ')
>>> body_orienter3 = BodyOrienter(0, 0, 0, 'XYX')
"""
# Dummy initializer for docstrings
pass
class SpaceOrienter(ThreeAngleOrienter):
"""
Class to denote a space-orienter.
"""
_in_order = False
def __new__(cls, angle1, angle2, angle3, rot_order):
obj = ThreeAngleOrienter.__new__(cls, angle1, angle2, angle3,
rot_order)
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Space rotation is similar to Body rotation, but the rotations
are applied in the opposite order.
Parameters
==========
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
See Also
========
BodyOrienter : Orienter to orient systems wrt Euler angles.
Examples
========
>>> from sympy.vector import CoordSys3D, SpaceOrienter
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
To orient a coordinate system D with respect to N, each
sequential rotation is always about N's orthogonal unit vectors.
For example, a '123' rotation will specify rotations about
N.i, then N.j, then N.k.
Therefore,
>>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
>>> D = N.orient_new('D', (space_orienter, ))
is same as
>>> from sympy.vector import AxisOrienter
>>> axis_orienter1 = AxisOrienter(q1, N.i)
>>> B = N.orient_new('B', (axis_orienter1, ))
>>> axis_orienter2 = AxisOrienter(q2, N.j)
>>> C = B.orient_new('C', (axis_orienter2, ))
>>> axis_orienter3 = AxisOrienter(q3, N.k)
>>> D = C.orient_new('C', (axis_orienter3, ))
"""
# Dummy initializer for docstrings
pass
class QuaternionOrienter(Orienter):
"""
Class to denote a quaternion-orienter.
"""
def __new__(cls, q0, q1, q2, q3):
q0 = sympify(q0)
q1 = sympify(q1)
q2 = sympify(q2)
q3 = sympify(q3)
parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 -
q3 ** 2,
2 * (q1 * q2 - q0 * q3),
2 * (q0 * q2 + q1 * q3)],
[2 * (q1 * q2 + q0 * q3),
q0 ** 2 - q1 ** 2 +
q2 ** 2 - q3 ** 2,
2 * (q2 * q3 - q0 * q1)],
[2 * (q1 * q3 - q0 * q2),
2 * (q0 * q1 + q2 * q3),
q0 ** 2 - q1 ** 2 -
q2 ** 2 + q3 ** 2]]))
parent_orient = parent_orient.T
obj = super(QuaternionOrienter, cls).__new__(cls, q0, q1, q2, q3)
obj._q0 = q0
obj._q1 = q1
obj._q2 = q2
obj._q3 = q3
obj._parent_orient = parent_orient
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Quaternion orientation orients the new CoordSys3D with
Quaternions, defined as a finite rotation about lambda, a unit
vector, by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
Parameters
==========
q0, q1, q2, q3 : Expr
The quaternions to rotate the coordinate system by
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSys3D('N')
>>> from sympy.vector import QuaternionOrienter
>>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
>>> B = N.orient_new('B', (q_orienter, ))
"""
# Dummy initializer for docstrings
pass
@property
def q0(self):
return self._q0
@property
def q1(self):
return self._q1
@property
def q2(self):
return self._q2
@property
def q3(self):
return self._q3
def _rot(axis, angle):
"""DCM for simple axis 1, 2 or 3 rotations. """
if axis == 1:
return Matrix(rot_axis1(angle).T)
elif axis == 2:
return Matrix(rot_axis2(angle).T)
elif axis == 3:
return Matrix(rot_axis3(angle).T)
|
eed7238de4947fd5d9e6e34e1b4c53945f61bcbd8f4971a46f5c56dfcb3a2c68 | from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.core.basic import Basic
from sympy.core.compatibility import Callable
from sympy.core.cache import cacheit
from sympy.core import S, Dummy, Lambda
from sympy import symbols, MatrixBase, ImmutableDenseMatrix
from sympy.solvers import solve
from sympy.vector.scalar import BaseScalar
from sympy import eye, trigsimp, ImmutableMatrix as Matrix, Symbol, sin, cos,\
sqrt, diff, Tuple, acos, atan2, simplify
import sympy.vector
from sympy.vector.orienters import (Orienter, AxisOrienter, BodyOrienter,
SpaceOrienter, QuaternionOrienter)
def CoordSysCartesian(*args, **kwargs):
SymPyDeprecationWarning(
feature="CoordSysCartesian",
useinstead="CoordSys3D",
issue=12865,
deprecated_since_version="1.1"
).warn()
return CoordSys3D(*args, **kwargs)
class CoordSys3D(Basic):
"""
Represents a coordinate system in 3-D space.
"""
def __new__(cls, name, transformation=None, parent=None, location=None,
rotation_matrix=None, vector_names=None, variable_names=None):
"""
The orientation/location parameters are necessary if this system
is being defined at a certain orientation or location wrt another.
Parameters
==========
name : str
The name of the new CoordSys3D instance.
transformation : Lambda, Tuple, str
Transformation defined by transformation equations or chosen
from predefined ones.
location : Vector
The position vector of the new system's origin wrt the parent
instance.
rotation_matrix : SymPy ImmutableMatrix
The rotation matrix of the new coordinate system with respect
to the parent. In other words, the output of
new_system.rotation_matrix(parent).
parent : CoordSys3D
The coordinate system wrt which the orientation/location
(or both) is being defined.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
"""
name = str(name)
Vector = sympy.vector.Vector
Point = sympy.vector.Point
if not isinstance(name, str):
raise TypeError("name should be a string")
if transformation is not None:
if (location is not None) or (rotation_matrix is not None):
raise ValueError("specify either `transformation` or "
"`location`/`rotation_matrix`")
if isinstance(transformation, (Tuple, tuple, list)):
if isinstance(transformation[0], MatrixBase):
rotation_matrix = transformation[0]
location = transformation[1]
else:
transformation = Lambda(transformation[0],
transformation[1])
elif isinstance(transformation, Callable):
x1, x2, x3 = symbols('x1 x2 x3', cls=Dummy)
transformation = Lambda((x1, x2, x3),
transformation(x1, x2, x3))
elif isinstance(transformation, str):
transformation = Symbol(transformation)
elif isinstance(transformation, (Symbol, Lambda)):
pass
else:
raise TypeError("transformation: "
"wrong type {0}".format(type(transformation)))
# If orientation information has been provided, store
# the rotation matrix accordingly
if rotation_matrix is None:
rotation_matrix = ImmutableDenseMatrix(eye(3))
else:
if not isinstance(rotation_matrix, MatrixBase):
raise TypeError("rotation_matrix should be an Immutable" +
"Matrix instance")
rotation_matrix = rotation_matrix.as_immutable()
# If location information is not given, adjust the default
# location as Vector.zero
if parent is not None:
if not isinstance(parent, CoordSys3D):
raise TypeError("parent should be a " +
"CoordSys3D/None")
if location is None:
location = Vector.zero
else:
if not isinstance(location, Vector):
raise TypeError("location should be a Vector")
# Check that location does not contain base
# scalars
for x in location.free_symbols:
if isinstance(x, BaseScalar):
raise ValueError("location should not contain" +
" BaseScalars")
origin = parent.origin.locate_new(name + '.origin',
location)
else:
location = Vector.zero
origin = Point(name + '.origin')
if transformation is None:
transformation = Tuple(rotation_matrix, location)
if isinstance(transformation, Tuple):
lambda_transformation = CoordSys3D._compose_rotation_and_translation(
transformation[0],
transformation[1],
parent
)
r, l = transformation
l = l._projections
lambda_lame = CoordSys3D._get_lame_coeff('cartesian')
lambda_inverse = lambda x, y, z: r.inv()*Matrix(
[x-l[0], y-l[1], z-l[2]])
elif isinstance(transformation, Symbol):
trname = transformation.name
lambda_transformation = CoordSys3D._get_transformation_lambdas(trname)
if parent is not None:
if parent.lame_coefficients() != (S.One, S.One, S.One):
raise ValueError('Parent for pre-defined coordinate '
'system should be Cartesian.')
lambda_lame = CoordSys3D._get_lame_coeff(trname)
lambda_inverse = CoordSys3D._set_inv_trans_equations(trname)
elif isinstance(transformation, Lambda):
if not CoordSys3D._check_orthogonality(transformation):
raise ValueError("The transformation equation does not "
"create orthogonal coordinate system")
lambda_transformation = transformation
lambda_lame = CoordSys3D._calculate_lame_coeff(lambda_transformation)
lambda_inverse = None
else:
lambda_transformation = lambda x, y, z: transformation(x, y, z)
lambda_lame = CoordSys3D._get_lame_coeff(transformation)
lambda_inverse = None
if variable_names is None:
if isinstance(transformation, Lambda):
variable_names = ["x1", "x2", "x3"]
elif isinstance(transformation, Symbol):
if transformation.name == 'spherical':
variable_names = ["r", "theta", "phi"]
elif transformation.name == 'cylindrical':
variable_names = ["r", "theta", "z"]
else:
variable_names = ["x", "y", "z"]
else:
variable_names = ["x", "y", "z"]
if vector_names is None:
vector_names = ["i", "j", "k"]
# All systems that are defined as 'roots' are unequal, unless
# they have the same name.
# Systems defined at same orientation/position wrt the same
# 'parent' are equal, irrespective of the name.
# This is true even if the same orientation is provided via
# different methods like Axis/Body/Space/Quaternion.
# However, coincident systems may be seen as unequal if
# positioned/oriented wrt different parents, even though
# they may actually be 'coincident' wrt the root system.
if parent is not None:
obj = super(CoordSys3D, cls).__new__(
cls, Symbol(name), transformation, parent)
else:
obj = super(CoordSys3D, cls).__new__(
cls, Symbol(name), transformation)
obj._name = name
# Initialize the base vectors
_check_strings('vector_names', vector_names)
vector_names = list(vector_names)
latex_vects = [(r'\mathbf{\hat{%s}_{%s}}' % (x, name)) for
x in vector_names]
pretty_vects = ['%s_%s' % (x, name) for x in vector_names]
obj._vector_names = vector_names
v1 = BaseVector(0, obj, pretty_vects[0], latex_vects[0])
v2 = BaseVector(1, obj, pretty_vects[1], latex_vects[1])
v3 = BaseVector(2, obj, pretty_vects[2], latex_vects[2])
obj._base_vectors = (v1, v2, v3)
# Initialize the base scalars
_check_strings('variable_names', vector_names)
variable_names = list(variable_names)
latex_scalars = [(r"\mathbf{{%s}_{%s}}" % (x, name)) for
x in variable_names]
pretty_scalars = ['%s_%s' % (x, name) for x in variable_names]
obj._variable_names = variable_names
obj._vector_names = vector_names
x1 = BaseScalar(0, obj, pretty_scalars[0], latex_scalars[0])
x2 = BaseScalar(1, obj, pretty_scalars[1], latex_scalars[1])
x3 = BaseScalar(2, obj, pretty_scalars[2], latex_scalars[2])
obj._base_scalars = (x1, x2, x3)
obj._transformation = transformation
obj._transformation_lambda = lambda_transformation
obj._lame_coefficients = lambda_lame(x1, x2, x3)
obj._transformation_from_parent_lambda = lambda_inverse
setattr(obj, variable_names[0], x1)
setattr(obj, variable_names[1], x2)
setattr(obj, variable_names[2], x3)
setattr(obj, vector_names[0], v1)
setattr(obj, vector_names[1], v2)
setattr(obj, vector_names[2], v3)
# Assign params
obj._parent = parent
if obj._parent is not None:
obj._root = obj._parent._root
else:
obj._root = obj
obj._parent_rotation_matrix = rotation_matrix
obj._origin = origin
# Return the instance
return obj
def __str__(self, printer=None):
return self._name
__repr__ = __str__
_sympystr = __str__
def __iter__(self):
return iter(self.base_vectors())
@staticmethod
def _check_orthogonality(equations):
"""
Helper method for _connect_to_cartesian. It checks if
set of transformation equations create orthogonal curvilinear
coordinate system
Parameters
==========
equations : Lambda
Lambda of transformation equations
"""
x1, x2, x3 = symbols("x1, x2, x3", cls=Dummy)
equations = equations(x1, x2, x3)
v1 = Matrix([diff(equations[0], x1),
diff(equations[1], x1), diff(equations[2], x1)])
v2 = Matrix([diff(equations[0], x2),
diff(equations[1], x2), diff(equations[2], x2)])
v3 = Matrix([diff(equations[0], x3),
diff(equations[1], x3), diff(equations[2], x3)])
if any(simplify(i[0] + i[1] + i[2]) == 0 for i in (v1, v2, v3)):
return False
else:
if simplify(v1.dot(v2)) == 0 and simplify(v2.dot(v3)) == 0 \
and simplify(v3.dot(v1)) == 0:
return True
else:
return False
@staticmethod
def _set_inv_trans_equations(curv_coord_name):
"""
Store information about inverse transformation equations for
pre-defined coordinate systems.
Parameters
==========
curv_coord_name : str
Name of coordinate system
"""
if curv_coord_name == 'cartesian':
return lambda x, y, z: (x, y, z)
if curv_coord_name == 'spherical':
return lambda x, y, z: (
sqrt(x**2 + y**2 + z**2),
acos(z/sqrt(x**2 + y**2 + z**2)),
atan2(y, x)
)
if curv_coord_name == 'cylindrical':
return lambda x, y, z: (
sqrt(x**2 + y**2),
atan2(y, x),
z
)
raise ValueError('Wrong set of parameters.'
'Type of coordinate system is defined')
def _calculate_inv_trans_equations(self):
"""
Helper method for set_coordinate_type. It calculates inverse
transformation equations for given transformations equations.
"""
x1, x2, x3 = symbols("x1, x2, x3", cls=Dummy, reals=True)
x, y, z = symbols("x, y, z", cls=Dummy)
equations = self._transformation(x1, x2, x3)
solved = solve([equations[0] - x,
equations[1] - y,
equations[2] - z], (x1, x2, x3), dict=True)[0]
solved = solved[x1], solved[x2], solved[x3]
self._transformation_from_parent_lambda = \
lambda x1, x2, x3: tuple(i.subs(list(zip((x, y, z), (x1, x2, x3)))) for i in solved)
@staticmethod
def _get_lame_coeff(curv_coord_name):
"""
Store information about Lame coefficients for pre-defined
coordinate systems.
Parameters
==========
curv_coord_name : str
Name of coordinate system
"""
if isinstance(curv_coord_name, str):
if curv_coord_name == 'cartesian':
return lambda x, y, z: (S.One, S.One, S.One)
if curv_coord_name == 'spherical':
return lambda r, theta, phi: (S.One, r, r*sin(theta))
if curv_coord_name == 'cylindrical':
return lambda r, theta, h: (S.One, r, S.One)
raise ValueError('Wrong set of parameters.'
' Type of coordinate system is not defined')
return CoordSys3D._calculate_lame_coefficients(curv_coord_name)
@staticmethod
def _calculate_lame_coeff(equations):
"""
It calculates Lame coefficients
for given transformations equations.
Parameters
==========
equations : Lambda
Lambda of transformation equations.
"""
return lambda x1, x2, x3: (
sqrt(diff(equations(x1, x2, x3)[0], x1)**2 +
diff(equations(x1, x2, x3)[1], x1)**2 +
diff(equations(x1, x2, x3)[2], x1)**2),
sqrt(diff(equations(x1, x2, x3)[0], x2)**2 +
diff(equations(x1, x2, x3)[1], x2)**2 +
diff(equations(x1, x2, x3)[2], x2)**2),
sqrt(diff(equations(x1, x2, x3)[0], x3)**2 +
diff(equations(x1, x2, x3)[1], x3)**2 +
diff(equations(x1, x2, x3)[2], x3)**2)
)
def _inverse_rotation_matrix(self):
"""
Returns inverse rotation matrix.
"""
return simplify(self._parent_rotation_matrix**-1)
@staticmethod
def _get_transformation_lambdas(curv_coord_name):
"""
Store information about transformation equations for pre-defined
coordinate systems.
Parameters
==========
curv_coord_name : str
Name of coordinate system
"""
if isinstance(curv_coord_name, str):
if curv_coord_name == 'cartesian':
return lambda x, y, z: (x, y, z)
if curv_coord_name == 'spherical':
return lambda r, theta, phi: (
r*sin(theta)*cos(phi),
r*sin(theta)*sin(phi),
r*cos(theta)
)
if curv_coord_name == 'cylindrical':
return lambda r, theta, h: (
r*cos(theta),
r*sin(theta),
h
)
raise ValueError('Wrong set of parameters.'
'Type of coordinate system is defined')
@classmethod
def _rotation_trans_equations(cls, matrix, equations):
"""
Returns the transformation equations obtained from rotation matrix.
Parameters
==========
matrix : Matrix
Rotation matrix
equations : tuple
Transformation equations
"""
return tuple(matrix * Matrix(equations))
@property
def origin(self):
return self._origin
@property
def delop(self):
SymPyDeprecationWarning(
feature="coord_system.delop has been replaced.",
useinstead="Use the Del() class",
deprecated_since_version="1.1",
issue=12866,
).warn()
from sympy.vector.deloperator import Del
return Del()
def base_vectors(self):
return self._base_vectors
def base_scalars(self):
return self._base_scalars
def lame_coefficients(self):
return self._lame_coefficients
def transformation_to_parent(self):
return self._transformation_lambda(*self.base_scalars())
def transformation_from_parent(self):
if self._parent is None:
raise ValueError("no parent coordinate system, use "
"`transformation_from_parent_function()`")
return self._transformation_from_parent_lambda(
*self._parent.base_scalars())
def transformation_from_parent_function(self):
return self._transformation_from_parent_lambda
def rotation_matrix(self, other):
"""
Returns the direction cosine matrix(DCM), also known as the
'rotation matrix' of this coordinate system with respect to
another system.
If v_a is a vector defined in system 'A' (in matrix format)
and v_b is the same vector defined in system 'B', then
v_a = A.rotation_matrix(B) * v_b.
A SymPy Matrix is returned.
Parameters
==========
other : CoordSys3D
The system which the DCM is generated to.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSys3D('N')
>>> A = N.orient_new_axis('A', q1, N.i)
>>> N.rotation_matrix(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
"""
from sympy.vector.functions import _path
if not isinstance(other, CoordSys3D):
raise TypeError(str(other) +
" is not a CoordSys3D")
# Handle special cases
if other == self:
return eye(3)
elif other == self._parent:
return self._parent_rotation_matrix
elif other._parent == self:
return other._parent_rotation_matrix.T
# Else, use tree to calculate position
rootindex, path = _path(self, other)
result = eye(3)
i = -1
for i in range(rootindex):
result *= path[i]._parent_rotation_matrix
i += 2
while i < len(path):
result *= path[i]._parent_rotation_matrix.T
i += 1
return result
@cacheit
def position_wrt(self, other):
"""
Returns the position vector of the origin of this coordinate
system with respect to another Point/CoordSys3D.
Parameters
==========
other : Point/CoordSys3D
If other is a Point, the position of this system's origin
wrt it is returned. If its an instance of CoordSyRect,
the position wrt its origin is returned.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> N1 = N.locate_new('N1', 10 * N.i)
>>> N.position_wrt(N1)
(-10)*N.i
"""
return self.origin.position_wrt(other)
def scalar_map(self, other):
"""
Returns a dictionary which expresses the coordinate variables
(base scalars) of this frame in terms of the variables of
otherframe.
Parameters
==========
otherframe : CoordSys3D
The other system to map the variables to.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import Symbol
>>> A = CoordSys3D('A')
>>> q = Symbol('q')
>>> B = A.orient_new_axis('B', q, A.k)
>>> A.scalar_map(B)
{A.x: B.x*cos(q) - B.y*sin(q), A.y: B.x*sin(q) + B.y*cos(q), A.z: B.z}
"""
relocated_scalars = []
origin_coords = tuple(self.position_wrt(other).to_matrix(other))
for i, x in enumerate(other.base_scalars()):
relocated_scalars.append(x - origin_coords[i])
vars_matrix = (self.rotation_matrix(other) *
Matrix(relocated_scalars))
mapping = {}
for i, x in enumerate(self.base_scalars()):
mapping[x] = trigsimp(vars_matrix[i])
return mapping
def locate_new(self, name, position, vector_names=None,
variable_names=None):
"""
Returns a CoordSys3D with its origin located at the given
position wrt this coordinate system's origin.
Parameters
==========
name : str
The name of the new CoordSys3D instance.
position : Vector
The position vector of the new system's origin wrt this
one.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> A = CoordSys3D('A')
>>> B = A.locate_new('B', 10 * A.i)
>>> B.origin.position_wrt(A.origin)
10*A.i
"""
if variable_names is None:
variable_names = self._variable_names
if vector_names is None:
vector_names = self._vector_names
return CoordSys3D(name, location=position,
vector_names=vector_names,
variable_names=variable_names,
parent=self)
def orient_new(self, name, orienters, location=None,
vector_names=None, variable_names=None):
"""
Creates a new CoordSys3D oriented in the user-specified way
with respect to this system.
Please refer to the documentation of the orienter classes
for more information about the orientation procedure.
Parameters
==========
name : str
The name of the new CoordSys3D instance.
orienters : iterable/Orienter
An Orienter or an iterable of Orienters for orienting the
new coordinate system.
If an Orienter is provided, it is applied to get the new
system.
If an iterable is provided, the orienters will be applied
in the order in which they appear in the iterable.
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSys3D('N')
Using an AxisOrienter
>>> from sympy.vector import AxisOrienter
>>> axis_orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> A = N.orient_new('A', (axis_orienter, ))
Using a BodyOrienter
>>> from sympy.vector import BodyOrienter
>>> body_orienter = BodyOrienter(q1, q2, q3, '123')
>>> B = N.orient_new('B', (body_orienter, ))
Using a SpaceOrienter
>>> from sympy.vector import SpaceOrienter
>>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
>>> C = N.orient_new('C', (space_orienter, ))
Using a QuaternionOrienter
>>> from sympy.vector import QuaternionOrienter
>>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
>>> D = N.orient_new('D', (q_orienter, ))
"""
if variable_names is None:
variable_names = self._variable_names
if vector_names is None:
vector_names = self._vector_names
if isinstance(orienters, Orienter):
if isinstance(orienters, AxisOrienter):
final_matrix = orienters.rotation_matrix(self)
else:
final_matrix = orienters.rotation_matrix()
# TODO: trigsimp is needed here so that the matrix becomes
# canonical (scalar_map also calls trigsimp; without this, you can
# end up with the same CoordinateSystem that compares differently
# due to a differently formatted matrix). However, this is
# probably not so good for performance.
final_matrix = trigsimp(final_matrix)
else:
final_matrix = Matrix(eye(3))
for orienter in orienters:
if isinstance(orienter, AxisOrienter):
final_matrix *= orienter.rotation_matrix(self)
else:
final_matrix *= orienter.rotation_matrix()
return CoordSys3D(name, rotation_matrix=final_matrix,
vector_names=vector_names,
variable_names=variable_names,
location=location,
parent=self)
def orient_new_axis(self, name, angle, axis, location=None,
vector_names=None, variable_names=None):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a SymPy expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
name : string
The name of the new coordinate system
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSys3D('N')
>>> B = N.orient_new_axis('B', q1, N.i + 2 * N.j)
"""
if variable_names is None:
variable_names = self._variable_names
if vector_names is None:
vector_names = self._vector_names
orienter = AxisOrienter(angle, axis)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_body(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Body orientation takes this coordinate system through three
successive simple rotations.
Body fixed rotations include both Euler Angles and
Tait-Bryan Angles, see https://en.wikipedia.org/wiki/Euler_angles.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
A 'Body' fixed rotation is described by three angles and
three body-fixed rotation axes. To orient a coordinate system D
with respect to N, each sequential rotation is always about
the orthogonal unit vectors fixed to D. For example, a '123'
rotation will specify rotations about N.i, then D.j, then
D.k. (Initially, D.i is same as N.i)
Therefore,
>>> D = N.orient_new_body('D', q1, q2, q3, '123')
is same as
>>> D = N.orient_new_axis('D', q1, N.i)
>>> D = D.orient_new_axis('D', q2, D.j)
>>> D = D.orient_new_axis('D', q3, D.k)
Acceptable rotation orders are of length 3, expressed in XYZ or
123, and cannot have a rotation about about an axis twice in a row.
>>> B = N.orient_new_body('B', q1, q2, q3, '123')
>>> B = N.orient_new_body('B', q1, q2, 0, 'ZXZ')
>>> B = N.orient_new_body('B', 0, 0, 0, 'XYX')
"""
orienter = BodyOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_space(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Space rotation is similar to Body rotation, but the rotations
are applied in the opposite order.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
See Also
========
CoordSys3D.orient_new_body : method to orient via Euler
angles
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
To orient a coordinate system D with respect to N, each
sequential rotation is always about N's orthogonal unit vectors.
For example, a '123' rotation will specify rotations about
N.i, then N.j, then N.k.
Therefore,
>>> D = N.orient_new_space('D', q1, q2, q3, '312')
is same as
>>> B = N.orient_new_axis('B', q1, N.i)
>>> C = B.orient_new_axis('C', q2, N.j)
>>> D = C.orient_new_axis('D', q3, N.k)
"""
orienter = SpaceOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_quaternion(self, name, q0, q1, q2, q3, location=None,
vector_names=None, variable_names=None):
"""
Quaternion orientation orients the new CoordSys3D with
Quaternions, defined as a finite rotation about lambda, a unit
vector, by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
Parameters
==========
name : string
The name of the new coordinate system
q0, q1, q2, q3 : Expr
The quaternions to rotate the coordinate system by
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSys3D('N')
>>> B = N.orient_new_quaternion('B', q0, q1, q2, q3)
"""
orienter = QuaternionOrienter(q0, q1, q2, q3)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def create_new(self, name, transformation, variable_names=None, vector_names=None):
"""
Returns a CoordSys3D which is connected to self by transformation.
Parameters
==========
name : str
The name of the new CoordSys3D instance.
transformation : Lambda, Tuple, str
Transformation defined by transformation equations or chosen
from predefined ones.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> a = CoordSys3D('a')
>>> b = a.create_new('b', transformation='spherical')
>>> b.transformation_to_parent()
(b.r*sin(b.theta)*cos(b.phi), b.r*sin(b.phi)*sin(b.theta), b.r*cos(b.theta))
>>> b.transformation_from_parent()
(sqrt(a.x**2 + a.y**2 + a.z**2), acos(a.z/sqrt(a.x**2 + a.y**2 + a.z**2)), atan2(a.y, a.x))
"""
return CoordSys3D(name, parent=self, transformation=transformation,
variable_names=variable_names, vector_names=vector_names)
def __init__(self, name, location=None, rotation_matrix=None,
parent=None, vector_names=None, variable_names=None,
latex_vects=None, pretty_vects=None, latex_scalars=None,
pretty_scalars=None, transformation=None):
# Dummy initializer for setting docstring
pass
__init__.__doc__ = __new__.__doc__
@staticmethod
def _compose_rotation_and_translation(rot, translation, parent):
r = lambda x, y, z: CoordSys3D._rotation_trans_equations(rot, (x, y, z))
if parent is None:
return r
dx, dy, dz = [translation.dot(i) for i in parent.base_vectors()]
t = lambda x, y, z: (
x + dx,
y + dy,
z + dz,
)
return lambda x, y, z: t(*r(x, y, z))
def _check_strings(arg_name, arg):
errorstr = arg_name + " must be an iterable of 3 string-types"
if len(arg) != 3:
raise ValueError(errorstr)
for s in arg:
if not isinstance(s, str):
raise TypeError(errorstr)
# Delayed import to avoid cyclic import problems:
from sympy.vector.vector import BaseVector
|
6f981b194f4edda082b53bd63c8011479c547bea3077d43b387b9d22725e5de0 | from sympy.vector.coordsysrect import CoordSys3D, CoordSysCartesian
from sympy.vector.vector import (Vector, VectorAdd, VectorMul,
BaseVector, VectorZero, Cross, Dot, cross, dot)
from sympy.vector.dyadic import (Dyadic, DyadicAdd, DyadicMul,
BaseDyadic, DyadicZero)
from sympy.vector.scalar import BaseScalar
from sympy.vector.deloperator import Del
from sympy.vector.functions import (express, matrix_to_vector,
laplacian, is_conservative,
is_solenoidal, scalar_potential,
directional_derivative,
scalar_potential_difference)
from sympy.vector.point import Point
from sympy.vector.orienters import (AxisOrienter, BodyOrienter,
SpaceOrienter, QuaternionOrienter)
from sympy.vector.operators import Gradient, Divergence, Curl, Laplacian, gradient, curl, divergence
__all__ = [
'Vector', 'VectorAdd', 'VectorMul', 'BaseVector', 'VectorZero', 'Cross',
'Dot', 'cross', 'dot',
'Dyadic', 'DyadicAdd', 'DyadicMul', 'BaseDyadic', 'DyadicZero',
'BaseScalar',
'Del',
'CoordSys3D', 'CoordSysCartesian',
'express', 'matrix_to_vector', 'laplacian', 'is_conservative',
'is_solenoidal', 'scalar_potential', 'directional_derivative',
'scalar_potential_difference',
'Point',
'AxisOrienter', 'BodyOrienter', 'SpaceOrienter', 'QuaternionOrienter',
'Gradient', 'Divergence', 'Curl', 'Laplacian', 'gradient', 'curl',
'divergence',
]
|
7811f27b20cc6dad313cde47533f5f0f528da1171859e935126be705a0128a36 | from typing import Any, Dict
from sympy.simplify import simplify as simp, trigsimp as tsimp
from sympy.core.decorators import call_highest_priority, _sympifyit
from sympy.core.assumptions import StdFactKB
from sympy import factor as fctr, diff as df, Integral
from sympy.core import S, Add, Mul
from sympy.core.expr import Expr
class BasisDependent(Expr):
"""
Super class containing functionality common to vectors and
dyadics.
Named so because the representation of these quantities in
sympy.vector is dependent on the basis they are expressed in.
"""
@call_highest_priority('__radd__')
def __add__(self, other):
return self._add_func(self, other)
@call_highest_priority('__add__')
def __radd__(self, other):
return self._add_func(other, self)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return self._add_func(self, -other)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return self._add_func(other, -self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return self._mul_func(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return self._mul_func(other, self)
def __neg__(self):
return self._mul_func(S.NegativeOne, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return self._div_helper(other)
@call_highest_priority('__div__')
def __rdiv__(self, other):
return TypeError("Invalid divisor for division")
__truediv__ = __div__
__rtruediv__ = __rdiv__
def evalf(self, n=15, subs=None, maxn=100, chop=False, strict=False, quad=None, verbose=False):
"""
Implements the SymPy evalf routine for this quantity.
evalf's documentation
=====================
"""
options = {'subs':subs, 'maxn':maxn, 'chop':chop, 'strict':strict,
'quad':quad, 'verbose':verbose}
vec = self.zero
for k, v in self.components.items():
vec += v.evalf(n, **options) * k
return vec
evalf.__doc__ += Expr.evalf.__doc__ # type: ignore
n = evalf
def simplify(self, **kwargs):
"""
Implements the SymPy simplify routine for this quantity.
simplify's documentation
========================
"""
simp_components = [simp(v, **kwargs) * k for
k, v in self.components.items()]
return self._add_func(*simp_components)
simplify.__doc__ += simp.__doc__ # type: ignore
def trigsimp(self, **opts):
"""
Implements the SymPy trigsimp routine, for this quantity.
trigsimp's documentation
========================
"""
trig_components = [tsimp(v, **opts) * k for
k, v in self.components.items()]
return self._add_func(*trig_components)
trigsimp.__doc__ += tsimp.__doc__ # type: ignore
def _eval_simplify(self, **kwargs):
return self.simplify(**kwargs)
def _eval_trigsimp(self, **opts):
return self.trigsimp(**opts)
def _eval_derivative(self, wrt):
return self.diff(wrt)
def _eval_Integral(self, *symbols, **assumptions):
integral_components = [Integral(v, *symbols, **assumptions) * k
for k, v in self.components.items()]
return self._add_func(*integral_components)
def as_numer_denom(self):
"""
Returns the expression as a tuple wrt the following
transformation -
expression -> a/b -> a, b
"""
return self, S.One
def factor(self, *args, **kwargs):
"""
Implements the SymPy factor routine, on the scalar parts
of a basis-dependent expression.
factor's documentation
========================
"""
fctr_components = [fctr(v, *args, **kwargs) * k for
k, v in self.components.items()]
return self._add_func(*fctr_components)
factor.__doc__ += fctr.__doc__ # type: ignore
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return (S.One, self)
def as_coeff_add(self, *deps):
"""Efficiently extract the coefficient of a summation. """
l = [x * self.components[x] for x in self.components]
return 0, tuple(l)
def diff(self, *args, **kwargs):
"""
Implements the SymPy diff routine, for vectors.
diff's documentation
========================
"""
for x in args:
if isinstance(x, BasisDependent):
raise TypeError("Invalid arg for differentiation")
diff_components = [df(v, *args, **kwargs) * k for
k, v in self.components.items()]
return self._add_func(*diff_components)
diff.__doc__ += df.__doc__ # type: ignore
def doit(self, **hints):
"""Calls .doit() on each term in the Dyadic"""
doit_components = [self.components[x].doit(**hints) * x
for x in self.components]
return self._add_func(*doit_components)
class BasisDependentAdd(BasisDependent, Add):
"""
Denotes sum of basis dependent quantities such that they cannot
be expressed as base or Mul instances.
"""
def __new__(cls, *args, **options):
components = {}
# Check each arg and simultaneously learn the components
for i, arg in enumerate(args):
if not isinstance(arg, cls._expr_type):
if isinstance(arg, Mul):
arg = cls._mul_func(*(arg.args))
elif isinstance(arg, Add):
arg = cls._add_func(*(arg.args))
else:
raise TypeError(str(arg) +
" cannot be interpreted correctly")
# If argument is zero, ignore
if arg == cls.zero:
continue
# Else, update components accordingly
if hasattr(arg, "components"):
for x in arg.components:
components[x] = components.get(x, 0) + arg.components[x]
temp = list(components.keys())
for x in temp:
if components[x] == 0:
del components[x]
# Handle case of zero vector
if len(components) == 0:
return cls.zero
# Build object
newargs = [x * components[x] for x in components]
obj = super(BasisDependentAdd, cls).__new__(cls,
*newargs, **options)
if isinstance(obj, Mul):
return cls._mul_func(*obj.args)
assumptions = {'commutative': True}
obj._assumptions = StdFactKB(assumptions)
obj._components = components
obj._sys = (list(components.keys()))[0]._sys
return obj
class BasisDependentMul(BasisDependent, Mul):
"""
Denotes product of base- basis dependent quantity with a scalar.
"""
def __new__(cls, *args, **options):
from sympy.vector import Cross, Dot, Curl, Gradient
count = 0
measure_number = S.One
zeroflag = False
extra_args = []
# Determine the component and check arguments
# Also keep a count to ensure two vectors aren't
# being multiplied
for arg in args:
if isinstance(arg, cls._zero_func):
count += 1
zeroflag = True
elif arg == S.Zero:
zeroflag = True
elif isinstance(arg, (cls._base_func, cls._mul_func)):
count += 1
expr = arg._base_instance
measure_number *= arg._measure_number
elif isinstance(arg, cls._add_func):
count += 1
expr = arg
elif isinstance(arg, (Cross, Dot, Curl, Gradient)):
extra_args.append(arg)
else:
measure_number *= arg
# Make sure incompatible types weren't multiplied
if count > 1:
raise ValueError("Invalid multiplication")
elif count == 0:
return Mul(*args, **options)
# Handle zero vector case
if zeroflag:
return cls.zero
# If one of the args was a VectorAdd, return an
# appropriate VectorAdd instance
if isinstance(expr, cls._add_func):
newargs = [cls._mul_func(measure_number, x) for
x in expr.args]
return cls._add_func(*newargs)
obj = super(BasisDependentMul, cls).__new__(cls, measure_number,
expr._base_instance,
*extra_args,
**options)
if isinstance(obj, Add):
return cls._add_func(*obj.args)
obj._base_instance = expr._base_instance
obj._measure_number = measure_number
assumptions = {'commutative': True}
obj._assumptions = StdFactKB(assumptions)
obj._components = {expr._base_instance: measure_number}
obj._sys = expr._base_instance._sys
return obj
def __str__(self, printer=None):
measure_str = self._measure_number.__str__()
if ('(' in measure_str or '-' in measure_str or
'+' in measure_str):
measure_str = '(' + measure_str + ')'
return measure_str + '*' + self._base_instance.__str__(printer)
__repr__ = __str__
_sympystr = __str__
class BasisDependentZero(BasisDependent):
"""
Class to denote a zero basis dependent instance.
"""
# XXX: Can't type the keys as BaseVector because of cyclic import
# problems.
components = {} # type: Dict[Any, Expr]
def __new__(cls):
obj = super(BasisDependentZero, cls).__new__(cls)
# Pre-compute a specific hash value for the zero vector
# Use the same one always
obj._hash = tuple([S.Zero, cls]).__hash__()
return obj
def __hash__(self):
return self._hash
@call_highest_priority('__req__')
def __eq__(self, other):
return isinstance(other, self._zero_func)
__req__ = __eq__
@call_highest_priority('__radd__')
def __add__(self, other):
if isinstance(other, self._expr_type):
return other
else:
raise TypeError("Invalid argument types for addition")
@call_highest_priority('__add__')
def __radd__(self, other):
if isinstance(other, self._expr_type):
return other
else:
raise TypeError("Invalid argument types for addition")
@call_highest_priority('__rsub__')
def __sub__(self, other):
if isinstance(other, self._expr_type):
return -other
else:
raise TypeError("Invalid argument types for subtraction")
@call_highest_priority('__sub__')
def __rsub__(self, other):
if isinstance(other, self._expr_type):
return other
else:
raise TypeError("Invalid argument types for subtraction")
def __neg__(self):
return self
def normalize(self):
"""
Returns the normalized version of this vector.
"""
return self
def __str__(self, printer=None):
return '0'
__repr__ = __str__
_sympystr = __str__
|
f4215f323c9567ee2d409a3b097b32da723eaaffa91d16198817f4a44467ea99 | from typing import Type
from sympy.core.assumptions import StdFactKB
from sympy.core import S, Pow, sympify
from sympy.core.expr import AtomicExpr, Expr
from sympy.core.compatibility import default_sort_key
from sympy import sqrt, ImmutableMatrix as Matrix, Add
from sympy.vector.coordsysrect import CoordSys3D
from sympy.vector.basisdependent import (BasisDependent, BasisDependentAdd,
BasisDependentMul, BasisDependentZero)
from sympy.vector.dyadic import BaseDyadic, Dyadic, DyadicAdd
class Vector(BasisDependent):
"""
Super class for all Vector classes.
Ideally, neither this class nor any of its subclasses should be
instantiated by the user.
"""
is_Vector = True
_op_priority = 12.0
_expr_type = None # type: Type[Vector]
_mul_func = None # type: Type[Vector]
_add_func = None # type: Type[Vector]
_zero_func = None # type: Type[Vector]
_base_func = None # type: Type[Vector]
zero = None # type: VectorZero
@property
def components(self):
"""
Returns the components of this vector in the form of a
Python dictionary mapping BaseVector instances to the
corresponding measure numbers.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> C = CoordSys3D('C')
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v.components
{C.i: 3, C.j: 4, C.k: 5}
"""
# The '_components' attribute is defined according to the
# subclass of Vector the instance belongs to.
return self._components
def magnitude(self):
"""
Returns the magnitude of this vector.
"""
return sqrt(self & self)
def normalize(self):
"""
Returns the normalized version of this vector.
"""
return self / self.magnitude()
def dot(self, other):
"""
Returns the dot product of this Vector, either with another
Vector, or a Dyadic, or a Del operator.
If 'other' is a Vector, returns the dot product scalar (Sympy
expression).
If 'other' is a Dyadic, the dot product is returned as a Vector.
If 'other' is an instance of Del, returns the directional
derivative operator as a Python function. If this function is
applied to a scalar expression, it returns the directional
derivative of the scalar field wrt this Vector.
Parameters
==========
other: Vector/Dyadic/Del
The Vector or Dyadic we are dotting with, or a Del operator .
Examples
========
>>> from sympy.vector import CoordSys3D, Del
>>> C = CoordSys3D('C')
>>> delop = Del()
>>> C.i.dot(C.j)
0
>>> C.i & C.i
1
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v.dot(C.k)
5
>>> (C.i & delop)(C.x*C.y*C.z)
C.y*C.z
>>> d = C.i.outer(C.i)
>>> C.i.dot(d)
C.i
"""
# Check special cases
if isinstance(other, Dyadic):
if isinstance(self, VectorZero):
return Vector.zero
outvec = Vector.zero
for k, v in other.components.items():
vect_dot = k.args[0].dot(self)
outvec += vect_dot * v * k.args[1]
return outvec
from sympy.vector.deloperator import Del
if not isinstance(other, Vector) and not isinstance(other, Del):
raise TypeError(str(other) + " is not a vector, dyadic or " +
"del operator")
# Check if the other is a del operator
if isinstance(other, Del):
def directional_derivative(field):
from sympy.vector.functions import directional_derivative
return directional_derivative(field, self)
return directional_derivative
return dot(self, other)
def __and__(self, other):
return self.dot(other)
__and__.__doc__ = dot.__doc__
def cross(self, other):
"""
Returns the cross product of this Vector with another Vector or
Dyadic instance.
The cross product is a Vector, if 'other' is a Vector. If 'other'
is a Dyadic, this returns a Dyadic instance.
Parameters
==========
other: Vector/Dyadic
The Vector or Dyadic we are crossing with.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> C = CoordSys3D('C')
>>> C.i.cross(C.j)
C.k
>>> C.i ^ C.i
0
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v ^ C.i
5*C.j + (-4)*C.k
>>> d = C.i.outer(C.i)
>>> C.j.cross(d)
(-1)*(C.k|C.i)
"""
# Check special cases
if isinstance(other, Dyadic):
if isinstance(self, VectorZero):
return Dyadic.zero
outdyad = Dyadic.zero
for k, v in other.components.items():
cross_product = self.cross(k.args[0])
outer = cross_product.outer(k.args[1])
outdyad += v * outer
return outdyad
return cross(self, other)
def __xor__(self, other):
return self.cross(other)
__xor__.__doc__ = cross.__doc__
def outer(self, other):
"""
Returns the outer product of this vector with another, in the
form of a Dyadic instance.
Parameters
==========
other : Vector
The Vector with respect to which the outer product is to
be computed.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> N.i.outer(N.j)
(N.i|N.j)
"""
# Handle the special cases
if not isinstance(other, Vector):
raise TypeError("Invalid operand for outer product")
elif (isinstance(self, VectorZero) or
isinstance(other, VectorZero)):
return Dyadic.zero
# Iterate over components of both the vectors to generate
# the required Dyadic instance
args = []
for k1, v1 in self.components.items():
for k2, v2 in other.components.items():
args.append((v1 * v2) * BaseDyadic(k1, k2))
return DyadicAdd(*args)
def projection(self, other, scalar=False):
"""
Returns the vector or scalar projection of the 'other' on 'self'.
Examples
========
>>> from sympy.vector.coordsysrect import CoordSys3D
>>> from sympy.vector.vector import Vector, BaseVector
>>> C = CoordSys3D('C')
>>> i, j, k = C.base_vectors()
>>> v1 = i + j + k
>>> v2 = 3*i + 4*j
>>> v1.projection(v2)
7/3*C.i + 7/3*C.j + 7/3*C.k
>>> v1.projection(v2, scalar=True)
7/3
"""
if self.equals(Vector.zero):
return S.zero if scalar else Vector.zero
if scalar:
return self.dot(other) / self.dot(self)
else:
return self.dot(other) / self.dot(self) * self
@property
def _projections(self):
"""
Returns the components of this vector but the output includes
also zero values components.
Examples
========
>>> from sympy.vector import CoordSys3D, Vector
>>> C = CoordSys3D('C')
>>> v1 = 3*C.i + 4*C.j + 5*C.k
>>> v1._projections
(3, 4, 5)
>>> v2 = C.x*C.y*C.z*C.i
>>> v2._projections
(C.x*C.y*C.z, 0, 0)
>>> v3 = Vector.zero
>>> v3._projections
(0, 0, 0)
"""
from sympy.vector.operators import _get_coord_sys_from_expr
if isinstance(self, VectorZero):
return (S.Zero, S.Zero, S.Zero)
base_vec = next(iter(_get_coord_sys_from_expr(self))).base_vectors()
return tuple([self.dot(i) for i in base_vec])
def __or__(self, other):
return self.outer(other)
__or__.__doc__ = outer.__doc__
def to_matrix(self, system):
"""
Returns the matrix form of this vector with respect to the
specified coordinate system.
Parameters
==========
system : CoordSys3D
The system wrt which the matrix form is to be computed
Examples
========
>>> from sympy.vector import CoordSys3D
>>> C = CoordSys3D('C')
>>> from sympy.abc import a, b, c
>>> v = a*C.i + b*C.j + c*C.k
>>> v.to_matrix(C)
Matrix([
[a],
[b],
[c]])
"""
return Matrix([self.dot(unit_vec) for unit_vec in
system.base_vectors()])
def separate(self):
"""
The constituents of this vector in different coordinate systems,
as per its definition.
Returns a dict mapping each CoordSys3D to the corresponding
constituent Vector.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> R1 = CoordSys3D('R1')
>>> R2 = CoordSys3D('R2')
>>> v = R1.i + R2.i
>>> v.separate() == {R1: R1.i, R2: R2.i}
True
"""
parts = {}
for vect, measure in self.components.items():
parts[vect.system] = (parts.get(vect.system, Vector.zero) +
vect * measure)
return parts
def _div_helper(one, other):
""" Helper for division involving vectors. """
if isinstance(one, Vector) and isinstance(other, Vector):
raise TypeError("Cannot divide two vectors")
elif isinstance(one, Vector):
if other == S.Zero:
raise ValueError("Cannot divide a vector by zero")
return VectorMul(one, Pow(other, S.NegativeOne))
else:
raise TypeError("Invalid division involving a vector")
class BaseVector(Vector, AtomicExpr):
"""
Class to denote a base vector.
Unicode pretty forms in Python 2 should use the prefix ``u``.
"""
def __new__(cls, index, system, pretty_str=None, latex_str=None):
if pretty_str is None:
pretty_str = "x{0}".format(index)
if latex_str is None:
latex_str = "x_{0}".format(index)
pretty_str = str(pretty_str)
latex_str = str(latex_str)
# Verify arguments
if index not in range(0, 3):
raise ValueError("index must be 0, 1 or 2")
if not isinstance(system, CoordSys3D):
raise TypeError("system should be a CoordSys3D")
name = system._vector_names[index]
# Initialize an object
obj = super(BaseVector, cls).__new__(cls, S(index), system)
# Assign important attributes
obj._base_instance = obj
obj._components = {obj: S.One}
obj._measure_number = S.One
obj._name = system._name + '.' + name
obj._pretty_form = u'' + pretty_str
obj._latex_form = latex_str
obj._system = system
# The _id is used for printing purposes
obj._id = (index, system)
assumptions = {'commutative': True}
obj._assumptions = StdFactKB(assumptions)
# This attr is used for re-expression to one of the systems
# involved in the definition of the Vector. Applies to
# VectorMul and VectorAdd too.
obj._sys = system
return obj
@property
def system(self):
return self._system
def __str__(self, printer=None):
return self._name
@property
def free_symbols(self):
return {self}
__repr__ = __str__
_sympystr = __str__
class VectorAdd(BasisDependentAdd, Vector):
"""
Class to denote sum of Vector instances.
"""
def __new__(cls, *args, **options):
obj = BasisDependentAdd.__new__(cls, *args, **options)
return obj
def __str__(self, printer=None):
ret_str = ''
items = list(self.separate().items())
items.sort(key=lambda x: x[0].__str__())
for system, vect in items:
base_vects = system.base_vectors()
for x in base_vects:
if x in vect.components:
temp_vect = self.components[x] * x
ret_str += temp_vect.__str__(printer) + " + "
return ret_str[:-3]
__repr__ = __str__
_sympystr = __str__
class VectorMul(BasisDependentMul, Vector):
"""
Class to denote products of scalars and BaseVectors.
"""
def __new__(cls, *args, **options):
obj = BasisDependentMul.__new__(cls, *args, **options)
return obj
@property
def base_vector(self):
""" The BaseVector involved in the product. """
return self._base_instance
@property
def measure_number(self):
""" The scalar expression involved in the definition of
this VectorMul.
"""
return self._measure_number
class VectorZero(BasisDependentZero, Vector):
"""
Class to denote a zero vector
"""
_op_priority = 12.1
_pretty_form = u'0'
_latex_form = r'\mathbf{\hat{0}}'
def __new__(cls):
obj = BasisDependentZero.__new__(cls)
return obj
class Cross(Vector):
"""
Represents unevaluated Cross product.
Examples
========
>>> from sympy.vector import CoordSys3D, Cross
>>> R = CoordSys3D('R')
>>> v1 = R.i + R.j + R.k
>>> v2 = R.x * R.i + R.y * R.j + R.z * R.k
>>> Cross(v1, v2)
Cross(R.i + R.j + R.k, R.x*R.i + R.y*R.j + R.z*R.k)
>>> Cross(v1, v2).doit()
(-R.y + R.z)*R.i + (R.x - R.z)*R.j + (-R.x + R.y)*R.k
"""
def __new__(cls, expr1, expr2):
expr1 = sympify(expr1)
expr2 = sympify(expr2)
if default_sort_key(expr1) > default_sort_key(expr2):
return -Cross(expr2, expr1)
obj = Expr.__new__(cls, expr1, expr2)
obj._expr1 = expr1
obj._expr2 = expr2
return obj
def doit(self, **kwargs):
return cross(self._expr1, self._expr2)
class Dot(Expr):
"""
Represents unevaluated Dot product.
Examples
========
>>> from sympy.vector import CoordSys3D, Dot
>>> from sympy import symbols
>>> R = CoordSys3D('R')
>>> a, b, c = symbols('a b c')
>>> v1 = R.i + R.j + R.k
>>> v2 = a * R.i + b * R.j + c * R.k
>>> Dot(v1, v2)
Dot(R.i + R.j + R.k, a*R.i + b*R.j + c*R.k)
>>> Dot(v1, v2).doit()
a + b + c
"""
def __new__(cls, expr1, expr2):
expr1 = sympify(expr1)
expr2 = sympify(expr2)
expr1, expr2 = sorted([expr1, expr2], key=default_sort_key)
obj = Expr.__new__(cls, expr1, expr2)
obj._expr1 = expr1
obj._expr2 = expr2
return obj
def doit(self, **kwargs):
return dot(self._expr1, self._expr2)
def cross(vect1, vect2):
"""
Returns cross product of two vectors.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector.vector import cross
>>> R = CoordSys3D('R')
>>> v1 = R.i + R.j + R.k
>>> v2 = R.x * R.i + R.y * R.j + R.z * R.k
>>> cross(v1, v2)
(-R.y + R.z)*R.i + (R.x - R.z)*R.j + (-R.x + R.y)*R.k
"""
if isinstance(vect1, Add):
return VectorAdd.fromiter(cross(i, vect2) for i in vect1.args)
if isinstance(vect2, Add):
return VectorAdd.fromiter(cross(vect1, i) for i in vect2.args)
if isinstance(vect1, BaseVector) and isinstance(vect2, BaseVector):
if vect1._sys == vect2._sys:
n1 = vect1.args[0]
n2 = vect2.args[0]
if n1 == n2:
return Vector.zero
n3 = ({0,1,2}.difference({n1, n2})).pop()
sign = 1 if ((n1 + 1) % 3 == n2) else -1
return sign*vect1._sys.base_vectors()[n3]
from .functions import express
try:
v = express(vect1, vect2._sys)
except ValueError:
return Cross(vect1, vect2)
else:
return cross(v, vect2)
if isinstance(vect1, VectorZero) or isinstance(vect2, VectorZero):
return Vector.zero
if isinstance(vect1, VectorMul):
v1, m1 = next(iter(vect1.components.items()))
return m1*cross(v1, vect2)
if isinstance(vect2, VectorMul):
v2, m2 = next(iter(vect2.components.items()))
return m2*cross(vect1, v2)
return Cross(vect1, vect2)
def dot(vect1, vect2):
"""
Returns dot product of two vectors.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector.vector import dot
>>> R = CoordSys3D('R')
>>> v1 = R.i + R.j + R.k
>>> v2 = R.x * R.i + R.y * R.j + R.z * R.k
>>> dot(v1, v2)
R.x + R.y + R.z
"""
if isinstance(vect1, Add):
return Add.fromiter(dot(i, vect2) for i in vect1.args)
if isinstance(vect2, Add):
return Add.fromiter(dot(vect1, i) for i in vect2.args)
if isinstance(vect1, BaseVector) and isinstance(vect2, BaseVector):
if vect1._sys == vect2._sys:
return S.One if vect1 == vect2 else S.Zero
from .functions import express
try:
v = express(vect2, vect1._sys)
except ValueError:
return Dot(vect1, vect2)
else:
return dot(vect1, v)
if isinstance(vect1, VectorZero) or isinstance(vect2, VectorZero):
return S.Zero
if isinstance(vect1, VectorMul):
v1, m1 = next(iter(vect1.components.items()))
return m1*dot(v1, vect2)
if isinstance(vect2, VectorMul):
v2, m2 = next(iter(vect2.components.items()))
return m2*dot(vect1, v2)
return Dot(vect1, vect2)
Vector._expr_type = Vector
Vector._mul_func = VectorMul
Vector._add_func = VectorAdd
Vector._zero_func = VectorZero
Vector._base_func = BaseVector
Vector.zero = VectorZero()
|
857d99d28f731c95fc83cca3f44b859fb02b098c678518b33484d661ed6e0840 | from sympy.core.basic import Basic
from sympy.vector.vector import Vector
from sympy.vector.coordsysrect import CoordSys3D
from sympy.vector.functions import _path
from sympy import Symbol
from sympy.core.cache import cacheit
class Point(Basic):
"""
Represents a point in 3-D space.
"""
def __new__(cls, name, position=Vector.zero, parent_point=None):
name = str(name)
# Check the args first
if not isinstance(position, Vector):
raise TypeError(
"position should be an instance of Vector, not %s" % type(
position))
if (not isinstance(parent_point, Point) and
parent_point is not None):
raise TypeError(
"parent_point should be an instance of Point, not %s" % type(
parent_point))
# Super class construction
if parent_point is None:
obj = super(Point, cls).__new__(cls, Symbol(name), position)
else:
obj = super(Point, cls).__new__(cls, Symbol(name),
position, parent_point)
# Decide the object parameters
obj._name = name
obj._pos = position
if parent_point is None:
obj._parent = None
obj._root = obj
else:
obj._parent = parent_point
obj._root = parent_point._root
# Return object
return obj
@cacheit
def position_wrt(self, other):
"""
Returns the position vector of this Point with respect to
another Point/CoordSys3D.
Parameters
==========
other : Point/CoordSys3D
If other is a Point, the position of this Point wrt it is
returned. If its an instance of CoordSyRect, the position
wrt its origin is returned.
Examples
========
>>> from sympy.vector import Point, CoordSys3D
>>> N = CoordSys3D('N')
>>> p1 = N.origin.locate_new('p1', 10 * N.i)
>>> N.origin.position_wrt(p1)
(-10)*N.i
"""
if (not isinstance(other, Point) and
not isinstance(other, CoordSys3D)):
raise TypeError(str(other) +
"is not a Point or CoordSys3D")
if isinstance(other, CoordSys3D):
other = other.origin
# Handle special cases
if other == self:
return Vector.zero
elif other == self._parent:
return self._pos
elif other._parent == self:
return -1 * other._pos
# Else, use point tree to calculate position
rootindex, path = _path(self, other)
result = Vector.zero
i = -1
for i in range(rootindex):
result += path[i]._pos
i += 2
while i < len(path):
result -= path[i]._pos
i += 1
return result
def locate_new(self, name, position):
"""
Returns a new Point located at the given position wrt this
Point.
Thus, the position vector of the new Point wrt this one will
be equal to the given 'position' parameter.
Parameters
==========
name : str
Name of the new point
position : Vector
The position vector of the new Point wrt this one
Examples
========
>>> from sympy.vector import Point, CoordSys3D
>>> N = CoordSys3D('N')
>>> p1 = N.origin.locate_new('p1', 10 * N.i)
>>> p1.position_wrt(N.origin)
10*N.i
"""
return Point(name, position, self)
def express_coordinates(self, coordinate_system):
"""
Returns the Cartesian/rectangular coordinates of this point
wrt the origin of the given CoordSys3D instance.
Parameters
==========
coordinate_system : CoordSys3D
The coordinate system to express the coordinates of this
Point in.
Examples
========
>>> from sympy.vector import Point, CoordSys3D
>>> N = CoordSys3D('N')
>>> p1 = N.origin.locate_new('p1', 10 * N.i)
>>> p2 = p1.locate_new('p2', 5 * N.j)
>>> p2.express_coordinates(N)
(10, 5, 0)
"""
# Determine the position vector
pos_vect = self.position_wrt(coordinate_system.origin)
# Express it in the given coordinate system
return tuple(pos_vect.to_matrix(coordinate_system))
def __str__(self, printer=None):
return self._name
__repr__ = __str__
_sympystr = __str__
|
574af5bebbc088426869f40267f1b043ddc53ceca33e1a85be42e495a322aa37 | from sympy.core import AtomicExpr, Symbol, S
from sympy.core.sympify import _sympify
from sympy.printing.pretty.stringpict import prettyForm
from sympy.printing.precedence import PRECEDENCE
class BaseScalar(AtomicExpr):
"""
A coordinate symbol/base scalar.
Ideally, users should not instantiate this class.
Unicode pretty forms in Python 2 should use the `u` prefix.
"""
def __new__(cls, index, system, pretty_str=None, latex_str=None):
from sympy.vector.coordsysrect import CoordSys3D
if pretty_str is None:
pretty_str = "x{0}".format(index)
elif isinstance(pretty_str, Symbol):
pretty_str = pretty_str.name
if latex_str is None:
latex_str = "x_{0}".format(index)
elif isinstance(latex_str, Symbol):
latex_str = latex_str.name
index = _sympify(index)
system = _sympify(system)
obj = super(BaseScalar, cls).__new__(cls, index, system)
if not isinstance(system, CoordSys3D):
raise TypeError("system should be a CoordSys3D")
if index not in range(0, 3):
raise ValueError("Invalid index specified.")
# The _id is used for equating purposes, and for hashing
obj._id = (index, system)
obj._name = obj.name = system._name + '.' + system._variable_names[index]
obj._pretty_form = u'' + pretty_str
obj._latex_form = latex_str
obj._system = system
return obj
is_commutative = True
is_symbol = True
@property
def free_symbols(self):
return {self}
_diff_wrt = True
def _eval_derivative(self, s):
if self == s:
return S.One
return S.Zero
def _latex(self, printer=None):
return self._latex_form
def _pretty(self, printer=None):
return prettyForm(self._pretty_form)
precedence = PRECEDENCE['Atom']
@property
def system(self):
return self._system
def __str__(self, printer=None):
return self._name
__repr__ = __str__
_sympystr = __str__
|
f7f69429700de127fd3a4ff3a35ece625ffce45100cf58574ca0caa4fd6cb472 | from typing import Type
from sympy.vector.basisdependent import (BasisDependent, BasisDependentAdd,
BasisDependentMul, BasisDependentZero)
from sympy.core import S, Pow
from sympy.core.expr import AtomicExpr
from sympy import ImmutableMatrix as Matrix
import sympy.vector
class Dyadic(BasisDependent):
"""
Super class for all Dyadic-classes.
References
==========
.. [1] https://en.wikipedia.org/wiki/Dyadic_tensor
.. [2] Kane, T., Levinson, D. Dynamics Theory and Applications. 1985
McGraw-Hill
"""
_op_priority = 13.0
_expr_type = None # type: Type[Dyadic]
_mul_func = None # type: Type[Dyadic]
_add_func = None # type: Type[Dyadic]
_zero_func = None # type: Type[Dyadic]
_base_func = None # type: Type[Dyadic]
zero = None # type: DyadicZero
@property
def components(self):
"""
Returns the components of this dyadic in the form of a
Python dictionary mapping BaseDyadic instances to the
corresponding measure numbers.
"""
# The '_components' attribute is defined according to the
# subclass of Dyadic the instance belongs to.
return self._components
def dot(self, other):
"""
Returns the dot product(also called inner product) of this
Dyadic, with another Dyadic or Vector.
If 'other' is a Dyadic, this returns a Dyadic. Else, it returns
a Vector (unless an error is encountered).
Parameters
==========
other : Dyadic/Vector
The other Dyadic or Vector to take the inner product with
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> D1 = N.i.outer(N.j)
>>> D2 = N.j.outer(N.j)
>>> D1.dot(D2)
(N.i|N.j)
>>> D1.dot(N.j)
N.i
"""
Vector = sympy.vector.Vector
if isinstance(other, BasisDependentZero):
return Vector.zero
elif isinstance(other, Vector):
outvec = Vector.zero
for k, v in self.components.items():
vect_dot = k.args[1].dot(other)
outvec += vect_dot * v * k.args[0]
return outvec
elif isinstance(other, Dyadic):
outdyad = Dyadic.zero
for k1, v1 in self.components.items():
for k2, v2 in other.components.items():
vect_dot = k1.args[1].dot(k2.args[0])
outer_product = k1.args[0].outer(k2.args[1])
outdyad += vect_dot * v1 * v2 * outer_product
return outdyad
else:
raise TypeError("Inner product is not defined for " +
str(type(other)) + " and Dyadics.")
def __and__(self, other):
return self.dot(other)
__and__.__doc__ = dot.__doc__
def cross(self, other):
"""
Returns the cross product between this Dyadic, and a Vector, as a
Vector instance.
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> d = N.i.outer(N.i)
>>> d.cross(N.j)
(N.i|N.k)
"""
Vector = sympy.vector.Vector
if other == Vector.zero:
return Dyadic.zero
elif isinstance(other, Vector):
outdyad = Dyadic.zero
for k, v in self.components.items():
cross_product = k.args[1].cross(other)
outer = k.args[0].outer(cross_product)
outdyad += v * outer
return outdyad
else:
raise TypeError(str(type(other)) + " not supported for " +
"cross with dyadics")
def __xor__(self, other):
return self.cross(other)
__xor__.__doc__ = cross.__doc__
def to_matrix(self, system, second_system=None):
"""
Returns the matrix form of the dyadic with respect to one or two
coordinate systems.
Parameters
==========
system : CoordSys3D
The coordinate system that the rows and columns of the matrix
correspond to. If a second system is provided, this
only corresponds to the rows of the matrix.
second_system : CoordSys3D, optional, default=None
The coordinate system that the columns of the matrix correspond
to.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> v = N.i + 2*N.j
>>> d = v.outer(N.i)
>>> d.to_matrix(N)
Matrix([
[1, 0, 0],
[2, 0, 0],
[0, 0, 0]])
>>> from sympy import Symbol
>>> q = Symbol('q')
>>> P = N.orient_new_axis('P', q, N.k)
>>> d.to_matrix(N, P)
Matrix([
[ cos(q), -sin(q), 0],
[2*cos(q), -2*sin(q), 0],
[ 0, 0, 0]])
"""
if second_system is None:
second_system = system
return Matrix([i.dot(self).dot(j) for i in system for j in
second_system]).reshape(3, 3)
def _div_helper(one, other):
""" Helper for division involving dyadics """
if isinstance(one, Dyadic) and isinstance(other, Dyadic):
raise TypeError("Cannot divide two dyadics")
elif isinstance(one, Dyadic):
return DyadicMul(one, Pow(other, S.NegativeOne))
else:
raise TypeError("Cannot divide by a dyadic")
class BaseDyadic(Dyadic, AtomicExpr):
"""
Class to denote a base dyadic tensor component.
"""
def __new__(cls, vector1, vector2):
Vector = sympy.vector.Vector
BaseVector = sympy.vector.BaseVector
VectorZero = sympy.vector.VectorZero
# Verify arguments
if not isinstance(vector1, (BaseVector, VectorZero)) or \
not isinstance(vector2, (BaseVector, VectorZero)):
raise TypeError("BaseDyadic cannot be composed of non-base " +
"vectors")
# Handle special case of zero vector
elif vector1 == Vector.zero or vector2 == Vector.zero:
return Dyadic.zero
# Initialize instance
obj = super(BaseDyadic, cls).__new__(cls, vector1, vector2)
obj._base_instance = obj
obj._measure_number = 1
obj._components = {obj: S.One}
obj._sys = vector1._sys
obj._pretty_form = (u'(' + vector1._pretty_form + '|' +
vector2._pretty_form + ')')
obj._latex_form = ('(' + vector1._latex_form + "{|}" +
vector2._latex_form + ')')
return obj
def __str__(self, printer=None):
return "(" + str(self.args[0]) + "|" + str(self.args[1]) + ")"
_sympystr = __str__
_sympyrepr = _sympystr
class DyadicMul(BasisDependentMul, Dyadic):
""" Products of scalars and BaseDyadics """
def __new__(cls, *args, **options):
obj = BasisDependentMul.__new__(cls, *args, **options)
return obj
@property
def base_dyadic(self):
""" The BaseDyadic involved in the product. """
return self._base_instance
@property
def measure_number(self):
""" The scalar expression involved in the definition of
this DyadicMul.
"""
return self._measure_number
class DyadicAdd(BasisDependentAdd, Dyadic):
""" Class to hold dyadic sums """
def __new__(cls, *args, **options):
obj = BasisDependentAdd.__new__(cls, *args, **options)
return obj
def __str__(self, printer=None):
ret_str = ''
items = list(self.components.items())
items.sort(key=lambda x: x[0].__str__())
for k, v in items:
temp_dyad = k * v
ret_str += temp_dyad.__str__(printer) + " + "
return ret_str[:-3]
__repr__ = __str__
_sympystr = __str__
class DyadicZero(BasisDependentZero, Dyadic):
"""
Class to denote a zero dyadic
"""
_op_priority = 13.1
_pretty_form = u'(0|0)'
_latex_form = r'(\mathbf{\hat{0}}|\mathbf{\hat{0}})'
def __new__(cls):
obj = BasisDependentZero.__new__(cls)
return obj
Dyadic._expr_type = Dyadic
Dyadic._mul_func = DyadicMul
Dyadic._add_func = DyadicAdd
Dyadic._zero_func = DyadicZero
Dyadic._base_func = BaseDyadic
Dyadic.zero = DyadicZero()
|
1ce70f0a0c6f237624921c9f23d8b0b9140ce690fbed6e0d07f9391480d0283c | """Parabolic geometrical entity.
Contains
* Parabola
"""
from __future__ import division, print_function
from sympy.core import S
from sympy.core.compatibility import ordered
from sympy.core.symbol import _symbol
from sympy import symbols, simplify, solve # type:ignore
from sympy.geometry.entity import GeometryEntity, GeometrySet
from sympy.geometry.point import Point, Point2D
from sympy.geometry.line import Line, Line2D, Ray2D, Segment2D, LinearEntity3D
from sympy.geometry.ellipse import Ellipse
from sympy.functions import sign
class Parabola(GeometrySet):
"""A parabolic GeometryEntity.
A parabola is declared with a point, that is called 'focus', and
a line, that is called 'directrix'.
Only vertical or horizontal parabolas are currently supported.
Parameters
==========
focus : Point
Default value is Point(0, 0)
directrix : Line
Attributes
==========
focus
directrix
axis of symmetry
focal length
p parameter
vertex
eccentricity
Raises
======
ValueError
When `focus` is not a two dimensional point.
When `focus` is a point of directrix.
NotImplementedError
When `directrix` is neither horizontal nor vertical.
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7,8)))
>>> p1.focus
Point2D(0, 0)
>>> p1.directrix
Line2D(Point2D(5, 8), Point2D(7, 8))
"""
def __new__(cls, focus=None, directrix=None, **kwargs):
if focus:
focus = Point(focus, dim=2)
else:
focus = Point(0, 0)
directrix = Line(directrix)
if (directrix.slope != 0 and directrix.slope != S.Infinity):
raise NotImplementedError('The directrix must be a horizontal'
' or vertical line')
if directrix.contains(focus):
raise ValueError('The focus must not be a point of directrix')
return GeometryEntity.__new__(cls, focus, directrix, **kwargs)
@property
def ambient_dimension(self):
"""Returns the ambient dimension of parabola.
Returns
=======
ambient_dimension : integer
Examples
========
>>> from sympy import Parabola, Point, Line
>>> f1 = Point(0, 0)
>>> p1 = Parabola(f1, Line(Point(5, 8), Point(7, 8)))
>>> p1.ambient_dimension
2
"""
return S(2)
@property
def axis_of_symmetry(self):
"""The axis of symmetry of the parabola.
Returns
=======
axis_of_symmetry : Line
See Also
========
sympy.geometry.line.Line
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.axis_of_symmetry
Line2D(Point2D(0, 0), Point2D(0, 1))
"""
return self.directrix.perpendicular_line(self.focus)
@property
def directrix(self):
"""The directrix of the parabola.
Returns
=======
directrix : Line
See Also
========
sympy.geometry.line.Line
Examples
========
>>> from sympy import Parabola, Point, Line
>>> l1 = Line(Point(5, 8), Point(7, 8))
>>> p1 = Parabola(Point(0, 0), l1)
>>> p1.directrix
Line2D(Point2D(5, 8), Point2D(7, 8))
"""
return self.args[1]
@property
def eccentricity(self):
"""The eccentricity of the parabola.
Returns
=======
eccentricity : number
A parabola may also be characterized as a conic section with an
eccentricity of 1. As a consequence of this, all parabolas are
similar, meaning that while they can be different sizes,
they are all the same shape.
See Also
========
https://en.wikipedia.org/wiki/Parabola
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.eccentricity
1
Notes
-----
The eccentricity for every Parabola is 1 by definition.
"""
return S.One
def equation(self, x='x', y='y'):
"""The equation of the parabola.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.equation()
-x**2 - 16*y + 64
>>> p1.equation('f')
-f**2 - 16*y + 64
>>> p1.equation(y='z')
-x**2 - 16*z + 64
"""
x = _symbol(x, real=True)
y = _symbol(y, real=True)
if (self.axis_of_symmetry.slope == 0):
t1 = 4 * (self.p_parameter) * (x - self.vertex.x)
t2 = (y - self.vertex.y)**2
else:
t1 = 4 * (self.p_parameter) * (y - self.vertex.y)
t2 = (x - self.vertex.x)**2
return t1 - t2
@property
def focal_length(self):
"""The focal length of the parabola.
Returns
=======
focal_lenght : number or symbolic expression
Notes
=====
The distance between the vertex and the focus
(or the vertex and directrix), measured along the axis
of symmetry, is the "focal length".
See Also
========
https://en.wikipedia.org/wiki/Parabola
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.focal_length
4
"""
distance = self.directrix.distance(self.focus)
focal_length = distance/2
return focal_length
@property
def focus(self):
"""The focus of the parabola.
Returns
=======
focus : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Parabola, Point, Line
>>> f1 = Point(0, 0)
>>> p1 = Parabola(f1, Line(Point(5, 8), Point(7, 8)))
>>> p1.focus
Point2D(0, 0)
"""
return self.args[0]
def intersection(self, o):
"""The intersection of the parabola and another geometrical entity `o`.
Parameters
==========
o : GeometryEntity, LinearEntity
Returns
=======
intersection : list of GeometryEntity objects
Examples
========
>>> from sympy import Parabola, Point, Ellipse, Line, Segment
>>> p1 = Point(0,0)
>>> l1 = Line(Point(1, -2), Point(-1,-2))
>>> parabola1 = Parabola(p1, l1)
>>> parabola1.intersection(Ellipse(Point(0, 0), 2, 5))
[Point2D(-2, 0), Point2D(2, 0)]
>>> parabola1.intersection(Line(Point(-7, 3), Point(12, 3)))
[Point2D(-4, 3), Point2D(4, 3)]
>>> parabola1.intersection(Segment((-12, -65), (14, -68)))
[]
"""
x, y = symbols('x y', real=True)
parabola_eq = self.equation()
if isinstance(o, Parabola):
if o in self:
return [o]
else:
return list(ordered([Point(i) for i in solve([parabola_eq, o.equation()], [x, y])]))
elif isinstance(o, Point2D):
if simplify(parabola_eq.subs(([(x, o._args[0]), (y, o._args[1])]))) == 0:
return [o]
else:
return []
elif isinstance(o, (Segment2D, Ray2D)):
result = solve([parabola_eq, Line2D(o.points[0], o.points[1]).equation()], [x, y])
return list(ordered([Point2D(i) for i in result if i in o]))
elif isinstance(o, (Line2D, Ellipse)):
return list(ordered([Point2D(i) for i in solve([parabola_eq, o.equation()], [x, y])]))
elif isinstance(o, LinearEntity3D):
raise TypeError('Entity must be two dimensional, not three dimensional')
else:
raise TypeError('Wrong type of argument were put')
@property
def p_parameter(self):
"""P is a parameter of parabola.
Returns
=======
p : number or symbolic expression
Notes
=====
The absolute value of p is the focal length. The sign on p tells
which way the parabola faces. Vertical parabolas that open up
and horizontal that open right, give a positive value for p.
Vertical parabolas that open down and horizontal that open left,
give a negative value for p.
See Also
========
http://www.sparknotes.com/math/precalc/conicsections/section2.rhtml
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.p_parameter
-4
"""
if self.axis_of_symmetry.slope == 0:
x = self.directrix.coefficients[2]
p = sign(self.focus.args[0] + x)
else:
y = self.directrix.coefficients[2]
p = sign(self.focus.args[1] + y)
return p * self.focal_length
@property
def vertex(self):
"""The vertex of the parabola.
Returns
=======
vertex : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Parabola, Point, Line
>>> p1 = Parabola(Point(0, 0), Line(Point(5, 8), Point(7, 8)))
>>> p1.vertex
Point2D(0, 4)
"""
focus = self.focus
if (self.axis_of_symmetry.slope == 0):
vertex = Point(focus.args[0] - self.p_parameter, focus.args[1])
else:
vertex = Point(focus.args[0], focus.args[1] - self.p_parameter)
return vertex
|
90b75c09fcd2422b8dc49e04cf8ae1c92603022389b3b8a63d1eb59b9c4a3f2c | """Curves in 2-dimensional Euclidean space.
Contains
========
Curve
"""
from __future__ import division, print_function
from sympy import sqrt
from sympy.core import sympify, diff
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.symbol import _symbol
from sympy.geometry.entity import GeometryEntity, GeometrySet
from sympy.geometry.point import Point
from sympy.integrals import integrate
class Curve(GeometrySet):
"""A curve in space.
A curve is defined by parametric functions for the coordinates, a
parameter and the lower and upper bounds for the parameter value.
Parameters
==========
function : list of functions
limits : 3-tuple
Function parameter and lower and upper bounds.
Attributes
==========
functions
parameter
limits
Raises
======
ValueError
When `functions` are specified incorrectly.
When `limits` are specified incorrectly.
See Also
========
sympy.core.function.Function
sympy.polys.polyfuncs.interpolate
Examples
========
>>> from sympy import sin, cos, Symbol, interpolate
>>> from sympy.abc import t, a
>>> from sympy.geometry import Curve
>>> C = Curve((sin(t), cos(t)), (t, 0, 2))
>>> C.functions
(sin(t), cos(t))
>>> C.limits
(t, 0, 2)
>>> C.parameter
t
>>> C = Curve((t, interpolate([1, 4, 9, 16], t)), (t, 0, 1)); C
Curve((t, t**2), (t, 0, 1))
>>> C.subs(t, 4)
Point2D(4, 16)
>>> C.arbitrary_point(a)
Point2D(a, a**2)
"""
def __new__(cls, function, limits):
fun = sympify(function)
if not is_sequence(fun) or len(fun) != 2:
raise ValueError("Function argument should be (x(t), y(t)) "
"but got %s" % str(function))
if not is_sequence(limits) or len(limits) != 3:
raise ValueError("Limit argument should be (t, tmin, tmax) "
"but got %s" % str(limits))
return GeometryEntity.__new__(cls, Tuple(*fun), Tuple(*limits))
def __call__(self, f):
return self.subs(self.parameter, f)
def _eval_subs(self, old, new):
if old == self.parameter:
return Point(*[f.subs(old, new) for f in self.functions])
def arbitrary_point(self, parameter='t'):
"""
A parameterized point on the curve.
Parameters
==========
parameter : str or Symbol, optional
Default value is 't';
the Curve's parameter is selected with None or self.parameter
otherwise the provided symbol is used.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the functions.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import s
>>> from sympy.geometry import Curve
>>> C = Curve([2*s, s**2], (s, 0, 2))
>>> C.arbitrary_point()
Point2D(2*t, t**2)
>>> C.arbitrary_point(C.parameter)
Point2D(2*s, s**2)
>>> C.arbitrary_point(None)
Point2D(2*s, s**2)
>>> C.arbitrary_point(Symbol('a'))
Point2D(2*a, a**2)
"""
if parameter is None:
return Point(*self.functions)
tnew = _symbol(parameter, self.parameter, real=True)
t = self.parameter
if (tnew.name != t.name and
tnew.name in (f.name for f in self.free_symbols)):
raise ValueError('Symbol %s already appears in object '
'and cannot be used as a parameter.' % tnew.name)
return Point(*[w.subs(t, tnew) for w in self.functions])
@property
def free_symbols(self):
"""
Return a set of symbols other than the bound symbols used to
parametrically define the Curve.
Examples
========
>>> from sympy.abc import t, a
>>> from sympy.geometry import Curve
>>> Curve((t, t**2), (t, 0, 2)).free_symbols
set()
>>> Curve((t, t**2), (t, a, 2)).free_symbols
{a}
"""
free = set()
for a in self.functions + self.limits[1:]:
free |= a.free_symbols
free = free.difference({self.parameter})
return free
@property
def ambient_dimension(self):
return len(self.args[0])
@property
def functions(self):
"""The functions specifying the curve.
Returns
=======
functions : list of parameterized coordinate functions.
See Also
========
parameter
Examples
========
>>> from sympy.abc import t
>>> from sympy.geometry import Curve
>>> C = Curve((t, t**2), (t, 0, 2))
>>> C.functions
(t, t**2)
"""
return self.args[0]
@property
def limits(self):
"""The limits for the curve.
Returns
=======
limits : tuple
Contains parameter and lower and upper limits.
See Also
========
plot_interval
Examples
========
>>> from sympy.abc import t
>>> from sympy.geometry import Curve
>>> C = Curve([t, t**3], (t, -2, 2))
>>> C.limits
(t, -2, 2)
"""
return self.args[1]
@property
def parameter(self):
"""The curve function variable.
Returns
=======
parameter : SymPy symbol
See Also
========
functions
Examples
========
>>> from sympy.abc import t
>>> from sympy.geometry import Curve
>>> C = Curve([t, t**2], (t, 0, 2))
>>> C.parameter
t
"""
return self.args[1][0]
@property
def length(self):
"""The curve length.
Examples
========
>>> from sympy.geometry.curve import Curve
>>> from sympy import cos, sin
>>> from sympy.abc import t
>>> Curve((t, t), (t, 0, 1)).length
sqrt(2)
"""
integrand = sqrt(sum(diff(func, self.limits[0])**2 for func in self.functions))
return integrate(integrand, self.limits)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the curve.
Parameters
==========
parameter : str or Symbol, optional
Default value is 't';
otherwise the provided symbol is used.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
See Also
========
limits : Returns limits of the parameter interval
Examples
========
>>> from sympy import Curve, sin
>>> from sympy.abc import x, t, s
>>> Curve((x, sin(x)), (x, 1, 2)).plot_interval()
[t, 1, 2]
>>> Curve((x, sin(x)), (x, 1, 2)).plot_interval(s)
[s, 1, 2]
"""
t = _symbol(parameter, self.parameter, real=True)
return [t] + list(self.limits[1:])
def rotate(self, angle=0, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
The default pt is the origin, Point(0, 0).
Examples
========
>>> from sympy.geometry.curve import Curve
>>> from sympy.abc import x
>>> from sympy import pi
>>> Curve((x, x), (x, 0, 1)).rotate(pi/2)
Curve((-x, x), (x, 0, 1))
"""
from sympy.matrices import Matrix, rot_axis3
if pt:
pt = -Point(pt, dim=2)
else:
pt = Point(0,0)
rv = self.translate(*pt.args)
f = list(rv.functions)
f.append(0)
f = Matrix(1, 3, f)
f *= rot_axis3(angle)
rv = self.func(f[0, :2].tolist()[0], self.limits)
if pt is not None:
pt = -pt
return rv.translate(*pt.args)
return rv
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since Curve is not made up of Points.
Examples
========
>>> from sympy.geometry.curve import Curve
>>> from sympy import pi
>>> from sympy.abc import x
>>> Curve((x, x), (x, 0, 1)).scale(2)
Curve((2*x, x), (x, 0, 1))
"""
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
fx, fy = self.functions
return self.func((fx*x, fy*y), self.limits)
def translate(self, x=0, y=0):
"""Translate the Curve by (x, y).
Examples
========
>>> from sympy.geometry.curve import Curve
>>> from sympy import pi
>>> from sympy.abc import x
>>> Curve((x, x), (x, 0, 1)).translate(1, 2)
Curve((x + 1, x + 2), (x, 0, 1))
"""
fx, fy = self.functions
return self.func((fx + x, fy + y), self.limits)
|
701a55a3df8b677ab45a244e91fdd17f242bb5e6e05c34da8d6e47c83ba920fa | """Geometrical Planes.
Contains
========
Plane
"""
from __future__ import division, print_function
from sympy import simplify # type:ignore
from sympy.core import Dummy, Rational, S, Symbol
from sympy.core.symbol import _symbol
from sympy.core.compatibility import is_sequence
from sympy.functions.elementary.trigonometric import cos, sin, acos, asin, sqrt
from sympy.matrices import Matrix
from sympy.polys.polytools import cancel
from sympy.solvers import solve, linsolve
from sympy.utilities.iterables import uniq
from sympy.utilities.misc import filldedent, func_name, Undecidable
from .entity import GeometryEntity
from .point import Point, Point3D
from .line import Line, Ray, Segment, Line3D, LinearEntity3D, Ray3D, Segment3D
class Plane(GeometryEntity):
"""
A plane is a flat, two-dimensional surface. A plane is the two-dimensional
analogue of a point (zero-dimensions), a line (one-dimension) and a solid
(three-dimensions). A plane can generally be constructed by two types of
inputs. They are three non-collinear points and a point and the plane's
normal vector.
Attributes
==========
p1
normal_vector
Examples
========
>>> from sympy import Plane, Point3D
>>> from sympy.abc import x
>>> Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane((1, 1, 1), (2, 3, 4), (2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane(Point3D(1, 1, 1), normal_vector=(1,4,7))
Plane(Point3D(1, 1, 1), (1, 4, 7))
"""
def __new__(cls, p1, a=None, b=None, **kwargs):
p1 = Point3D(p1, dim=3)
if a and b:
p2 = Point(a, dim=3)
p3 = Point(b, dim=3)
if Point3D.are_collinear(p1, p2, p3):
raise ValueError('Enter three non-collinear points')
a = p1.direction_ratio(p2)
b = p1.direction_ratio(p3)
normal_vector = tuple(Matrix(a).cross(Matrix(b)))
else:
a = kwargs.pop('normal_vector', a)
if is_sequence(a) and len(a) == 3:
normal_vector = Point3D(a).args
else:
raise ValueError(filldedent('''
Either provide 3 3D points or a point with a
normal vector expressed as a sequence of length 3'''))
if all(coord.is_zero for coord in normal_vector):
raise ValueError('Normal vector cannot be zero vector')
return GeometryEntity.__new__(cls, p1, normal_vector, **kwargs)
def __contains__(self, o):
from sympy.geometry.line import LinearEntity, LinearEntity3D
x, y, z = map(Dummy, 'xyz')
k = self.equation(x, y, z)
if isinstance(o, (LinearEntity, LinearEntity3D)):
t = Dummy()
d = Point3D(o.arbitrary_point(t))
e = k.subs([(x, d.x), (y, d.y), (z, d.z)])
return e.equals(0)
try:
o = Point(o, dim=3, strict=True)
d = k.xreplace(dict(zip((x, y, z), o.args)))
return d.equals(0)
except TypeError:
return False
def angle_between(self, o):
"""Angle between the plane and other geometric entity.
Parameters
==========
LinearEntity3D, Plane.
Returns
=======
angle : angle in radians
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the angle between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the angle.
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 2), normal_vector=(1, 2, 3))
>>> b = Line3D(Point3D(1, 3, 4), Point3D(2, 2, 2))
>>> a.angle_between(b)
-asin(sqrt(21)/6)
"""
from sympy.geometry.line import LinearEntity3D
if isinstance(o, LinearEntity3D):
a = Matrix(self.normal_vector)
b = Matrix(o.direction_ratio)
c = a.dot(b)
d = sqrt(sum([i**2 for i in self.normal_vector]))
e = sqrt(sum([i**2 for i in o.direction_ratio]))
return asin(c/(d*e))
if isinstance(o, Plane):
a = Matrix(self.normal_vector)
b = Matrix(o.normal_vector)
c = a.dot(b)
d = sqrt(sum([i**2 for i in self.normal_vector]))
e = sqrt(sum([i**2 for i in o.normal_vector]))
return acos(c/(d*e))
def arbitrary_point(self, u=None, v=None):
""" Returns an arbitrary point on the Plane. If given two
parameters, the point ranges over the entire plane. If given 1
or no parameters, returns a point with one parameter which,
when varying from 0 to 2*pi, moves the point in a circle of
radius 1 about p1 of the Plane.
Examples
========
>>> from sympy.geometry import Plane, Ray
>>> from sympy.abc import u, v, t, r
>>> p = Plane((1, 1, 1), normal_vector=(1, 0, 0))
>>> p.arbitrary_point(u, v)
Point3D(1, u + 1, v + 1)
>>> p.arbitrary_point(t)
Point3D(1, cos(t) + 1, sin(t) + 1)
While arbitrary values of u and v can move the point anywhere in
the plane, the single-parameter point can be used to construct a
ray whose arbitrary point can be located at angle t and radius
r from p.p1:
>>> Ray(p.p1, _).arbitrary_point(r)
Point3D(1, r*cos(t) + 1, r*sin(t) + 1)
Returns
=======
Point3D
"""
circle = v is None
if circle:
u = _symbol(u or 't', real=True)
else:
u = _symbol(u or 'u', real=True)
v = _symbol(v or 'v', real=True)
x, y, z = self.normal_vector
a, b, c = self.p1.args
# x1, y1, z1 is a nonzero vector parallel to the plane
if x.is_zero and y.is_zero:
x1, y1, z1 = S.One, S.Zero, S.Zero
else:
x1, y1, z1 = -y, x, S.Zero
# x2, y2, z2 is also parallel to the plane, and orthogonal to x1, y1, z1
x2, y2, z2 = tuple(Matrix((x, y, z)).cross(Matrix((x1, y1, z1))))
if circle:
x1, y1, z1 = (w/sqrt(x1**2 + y1**2 + z1**2) for w in (x1, y1, z1))
x2, y2, z2 = (w/sqrt(x2**2 + y2**2 + z2**2) for w in (x2, y2, z2))
p = Point3D(a + x1*cos(u) + x2*sin(u), \
b + y1*cos(u) + y2*sin(u), \
c + z1*cos(u) + z2*sin(u))
else:
p = Point3D(a + x1*u + x2*v, b + y1*u + y2*v, c + z1*u + z2*v)
return p
@staticmethod
def are_concurrent(*planes):
"""Is a sequence of Planes concurrent?
Two or more Planes are concurrent if their intersections
are a common line.
Parameters
==========
planes: list
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(5, 0, 0), normal_vector=(1, -1, 1))
>>> b = Plane(Point3D(0, -2, 0), normal_vector=(3, 1, 1))
>>> c = Plane(Point3D(0, -1, 0), normal_vector=(5, -1, 9))
>>> Plane.are_concurrent(a, b)
True
>>> Plane.are_concurrent(a, b, c)
False
"""
planes = list(uniq(planes))
for i in planes:
if not isinstance(i, Plane):
raise ValueError('All objects should be Planes but got %s' % i.func)
if len(planes) < 2:
return False
planes = list(planes)
first = planes.pop(0)
sol = first.intersection(planes[0])
if sol == []:
return False
else:
line = sol[0]
for i in planes[1:]:
l = first.intersection(i)
if not l or not l[0] in line:
return False
return True
def distance(self, o):
"""Distance between the plane and another geometric entity.
Parameters
==========
Point3D, LinearEntity3D, Plane.
Returns
=======
distance
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the distance between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the distance.
Examples
========
>>> from sympy import Point, Point3D, Line, Line3D, Plane
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.distance(b)
sqrt(3)
>>> c = Line3D(Point3D(2, 3, 1), Point3D(1, 2, 2))
>>> a.distance(c)
0
"""
if self.intersection(o) != []:
return S.Zero
if isinstance(o, (Segment3D, Ray3D)):
a, b = o.p1, o.p2
pi, = self.intersection(Line3D(a, b))
if pi in o:
return self.distance(pi)
elif a in Segment3D(pi, b):
return self.distance(a)
else:
assert isinstance(o, Segment3D) is True
return self.distance(b)
# following code handles `Point3D`, `LinearEntity3D`, `Plane`
a = o if isinstance(o, Point3D) else o.p1
n = Point3D(self.normal_vector).unit
d = (a - self.p1).dot(n)
return abs(d)
def equals(self, o):
"""
Returns True if self and o are the same mathematical entities.
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1, 2, 3), normal_vector=(1, 1, 1))
>>> b = Plane(Point3D(1, 2, 3), normal_vector=(2, 2, 2))
>>> c = Plane(Point3D(1, 2, 3), normal_vector=(-1, 4, 6))
>>> a.equals(a)
True
>>> a.equals(b)
True
>>> a.equals(c)
False
"""
if isinstance(o, Plane):
a = self.equation()
b = o.equation()
return simplify(a / b).is_constant()
else:
return False
def equation(self, x=None, y=None, z=None):
"""The equation of the Plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 2), Point3D(2, 4, 7), Point3D(3, 5, 1))
>>> a.equation()
-23*x + 11*y - 2*z + 16
>>> a = Plane(Point3D(1, 4, 2), normal_vector=(6, 6, 6))
>>> a.equation()
6*x + 6*y + 6*z - 42
"""
x, y, z = [i if i else Symbol(j, real=True) for i, j in zip((x, y, z), 'xyz')]
a = Point3D(x, y, z)
b = self.p1.direction_ratio(a)
c = self.normal_vector
return (sum(i*j for i, j in zip(b, c)))
def intersection(self, o):
""" The intersection with other geometrical entity.
Parameters
==========
Point, Point3D, LinearEntity, LinearEntity3D, Plane
Returns
=======
List
Examples
========
>>> from sympy import Point, Point3D, Line, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 3), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.intersection(b)
[Point3D(1, 2, 3)]
>>> c = Line3D(Point3D(1, 4, 7), Point3D(2, 2, 2))
>>> a.intersection(c)
[Point3D(2, 2, 2)]
>>> d = Plane(Point3D(6, 0, 0), normal_vector=(2, -5, 3))
>>> e = Plane(Point3D(2, 0, 0), normal_vector=(3, 4, -3))
>>> d.intersection(e)
[Line3D(Point3D(78/23, -24/23, 0), Point3D(147/23, 321/23, 23))]
"""
from sympy.geometry.line import LinearEntity, LinearEntity3D
if not isinstance(o, GeometryEntity):
o = Point(o, dim=3)
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
if isinstance(o, (LinearEntity, LinearEntity3D)):
# recast to 3D
p1, p2 = o.p1, o.p2
if isinstance(o, Segment):
o = Segment3D(p1, p2)
elif isinstance(o, Ray):
o = Ray3D(p1, p2)
elif isinstance(o, Line):
o = Line3D(p1, p2)
else:
raise ValueError('unhandled linear entity: %s' % o.func)
if o in self:
return [o]
else:
t = Dummy() # unnamed else it may clash with a symbol in o
a = Point3D(o.arbitrary_point(t))
p1, n = self.p1, Point3D(self.normal_vector)
# TODO: Replace solve with solveset, when this line is tested
c = solve((a - p1).dot(n), t)
if not c:
return []
else:
c = [i for i in c if i.is_real is not False]
if len(c) > 1:
c = [i for i in c if i.is_real]
if len(c) != 1:
raise Undecidable("not sure which point is real")
p = a.subs(t, c[0])
if p not in o:
return [] # e.g. a segment might not intersect a plane
return [p]
if isinstance(o, Plane):
if self.equals(o):
return [self]
if self.is_parallel(o):
return []
else:
x, y, z = map(Dummy, 'xyz')
a, b = Matrix([self.normal_vector]), Matrix([o.normal_vector])
c = list(a.cross(b))
d = self.equation(x, y, z)
e = o.equation(x, y, z)
result = list(linsolve([d, e], x, y, z))[0]
for i in (x, y, z): result = result.subs(i, 0)
return [Line3D(Point3D(result), direction_ratio=c)]
def is_coplanar(self, o):
""" Returns True if `o` is coplanar with self, else False.
Examples
========
>>> from sympy import Plane, Point3D
>>> o = (0, 0, 0)
>>> p = Plane(o, (1, 1, 1))
>>> p2 = Plane(o, (2, 2, 2))
>>> p == p2
False
>>> p.is_coplanar(p2)
True
"""
if isinstance(o, Plane):
x, y, z = map(Dummy, 'xyz')
return not cancel(self.equation(x, y, z)/o.equation(x, y, z)).has(x, y, z)
if isinstance(o, Point3D):
return o in self
elif isinstance(o, LinearEntity3D):
return all(i in self for i in self)
elif isinstance(o, GeometryEntity): # XXX should only be handling 2D objects now
return all(i == 0 for i in self.normal_vector[:2])
def is_parallel(self, l):
"""Is the given geometric entity parallel to the plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(3,1,3), normal_vector=(4, 8, 12))
>>> a.is_parallel(b)
True
"""
from sympy.geometry.line import LinearEntity3D
if isinstance(l, LinearEntity3D):
a = l.direction_ratio
b = self.normal_vector
c = sum([i*j for i, j in zip(a, b)])
if c == 0:
return True
else:
return False
elif isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
if a.cross(b).is_zero_matrix:
return True
else:
return False
def is_perpendicular(self, l):
"""is the given geometric entity perpendicualar to the given plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(2, 2, 2), normal_vector=(-1, 2, -1))
>>> a.is_perpendicular(b)
True
"""
from sympy.geometry.line import LinearEntity3D
if isinstance(l, LinearEntity3D):
a = Matrix(l.direction_ratio)
b = Matrix(self.normal_vector)
if a.cross(b).is_zero_matrix:
return True
else:
return False
elif isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
if a.dot(b) == 0:
return True
else:
return False
else:
return False
@property
def normal_vector(self):
"""Normal vector of the given plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.normal_vector
(-1, 2, -1)
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 4, 7))
>>> a.normal_vector
(1, 4, 7)
"""
return self.args[1]
@property
def p1(self):
"""The only defining point of the plane. Others can be obtained from the
arbitrary_point method.
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.p1
Point3D(1, 1, 1)
"""
return self.args[0]
def parallel_plane(self, pt):
"""
Plane parallel to the given plane and passing through the point pt.
Parameters
==========
pt: Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1, 4, 6), normal_vector=(2, 4, 6))
>>> a.parallel_plane(Point3D(2, 3, 5))
Plane(Point3D(2, 3, 5), (2, 4, 6))
"""
a = self.normal_vector
return Plane(pt, normal_vector=a)
def perpendicular_line(self, pt):
"""A line perpendicular to the given plane.
Parameters
==========
pt: Point3D
Returns
=======
Line3D
Examples
========
>>> from sympy import Plane, Point3D, Line3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> a.perpendicular_line(Point3D(9, 8, 7))
Line3D(Point3D(9, 8, 7), Point3D(11, 12, 13))
"""
a = self.normal_vector
return Line3D(pt, direction_ratio=a)
def perpendicular_plane(self, *pts):
"""
Return a perpendicular passing through the given points. If the
direction ratio between the points is the same as the Plane's normal
vector then, to select from the infinite number of possible planes,
a third point will be chosen on the z-axis (or the y-axis
if the normal vector is already parallel to the z-axis). If less than
two points are given they will be supplied as follows: if no point is
given then pt1 will be self.p1; if a second point is not given it will
be a point through pt1 on a line parallel to the z-axis (if the normal
is not already the z-axis, otherwise on the line parallel to the
y-axis).
Parameters
==========
pts: 0, 1 or 2 Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D, Line3D
>>> a, b = Point3D(0, 0, 0), Point3D(0, 1, 0)
>>> Z = (0, 0, 1)
>>> p = Plane(a, normal_vector=Z)
>>> p.perpendicular_plane(a, b)
Plane(Point3D(0, 0, 0), (1, 0, 0))
"""
if len(pts) > 2:
raise ValueError('No more than 2 pts should be provided.')
pts = list(pts)
if len(pts) == 0:
pts.append(self.p1)
if len(pts) == 1:
x, y, z = self.normal_vector
if x == y == 0:
dir = (0, 1, 0)
else:
dir = (0, 0, 1)
pts.append(pts[0] + Point3D(*dir))
p1, p2 = [Point(i, dim=3) for i in pts]
l = Line3D(p1, p2)
n = Line3D(p1, direction_ratio=self.normal_vector)
if l in n: # XXX should an error be raised instead?
# there are infinitely many perpendicular planes;
x, y, z = self.normal_vector
if x == y == 0:
# the z axis is the normal so pick a pt on the y-axis
p3 = Point3D(0, 1, 0) # case 1
else:
# else pick a pt on the z axis
p3 = Point3D(0, 0, 1) # case 2
# in case that point is already given, move it a bit
if p3 in l:
p3 *= 2 # case 3
else:
p3 = p1 + Point3D(*self.normal_vector) # case 4
return Plane(p1, p2, p3)
def projection_line(self, line):
"""Project the given line onto the plane through the normal plane
containing the line.
Parameters
==========
LinearEntity or LinearEntity3D
Returns
=======
Point3D, Line3D, Ray3D or Segment3D
Notes
=====
For the interaction between 2D and 3D lines(segments, rays), you should
convert the line to 3D by using this method. For example for finding the
intersection between a 2D and a 3D line, convert the 2D line to a 3D line
by projecting it on a required plane and then proceed to find the
intersection between those lines.
Examples
========
>>> from sympy import Plane, Line, Line3D, Point, Point3D
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Line(Point3D(1, 1), Point3D(2, 2))
>>> a.projection_line(b)
Line3D(Point3D(4/3, 4/3, 1/3), Point3D(5/3, 5/3, -1/3))
>>> c = Line3D(Point3D(1, 1, 1), Point3D(2, 2, 2))
>>> a.projection_line(c)
Point3D(1, 1, 1)
"""
from sympy.geometry.line import LinearEntity, LinearEntity3D
if not isinstance(line, (LinearEntity, LinearEntity3D)):
raise NotImplementedError('Enter a linear entity only')
a, b = self.projection(line.p1), self.projection(line.p2)
if a == b:
# projection does not imply intersection so for
# this case (line parallel to plane's normal) we
# return the projection point
return a
if isinstance(line, (Line, Line3D)):
return Line3D(a, b)
if isinstance(line, (Ray, Ray3D)):
return Ray3D(a, b)
if isinstance(line, (Segment, Segment3D)):
return Segment3D(a, b)
def projection(self, pt):
"""Project the given point onto the plane along the plane normal.
Parameters
==========
Point or Point3D
Returns
=======
Point3D
Examples
========
>>> from sympy import Plane, Point, Point3D
>>> A = Plane(Point3D(1, 1, 2), normal_vector=(1, 1, 1))
The projection is along the normal vector direction, not the z
axis, so (1, 1) does not project to (1, 1, 2) on the plane A:
>>> b = Point3D(1, 1)
>>> A.projection(b)
Point3D(5/3, 5/3, 2/3)
>>> _ in A
True
But the point (1, 1, 2) projects to (1, 1) on the XY-plane:
>>> XY = Plane((0, 0, 0), (0, 0, 1))
>>> XY.projection((1, 1, 2))
Point3D(1, 1, 0)
"""
rv = Point(pt, dim=3)
if rv in self:
return rv
return self.intersection(Line3D(rv, rv + Point3D(self.normal_vector)))[0]
def random_point(self, seed=None):
""" Returns a random point on the Plane.
Returns
=======
Point3D
Examples
========
>>> from sympy import Plane
>>> p = Plane((1, 0, 0), normal_vector=(0, 1, 0))
>>> r = p.random_point(seed=42) # seed value is optional
>>> r.n(3)
Point3D(2.29, 0, -1.35)
The random point can be moved to lie on the circle of radius
1 centered on p1:
>>> c = p.p1 + (r - p.p1).unit
>>> c.distance(p.p1).equals(1)
True
"""
import random
if seed is not None:
rng = random.Random(seed)
else:
rng = random
u, v = Dummy('u'), Dummy('v')
params = {
u: 2*Rational(rng.gauss(0, 1)) - 1,
v: 2*Rational(rng.gauss(0, 1)) - 1}
return self.arbitrary_point(u, v).subs(params)
def parameter_value(self, other, u, v=None):
"""Return the parameter(s) corresponding to the given point.
Examples
========
>>> from sympy import Plane, Point, pi
>>> from sympy.abc import t, u, v
>>> p = Plane((2, 0, 0), (0, 0, 1), (0, 1, 0))
By default, the parameter value returned defines a point
that is a distance of 1 from the Plane's p1 value and
in line with the given point:
>>> on_circle = p.arbitrary_point(t).subs(t, pi/4)
>>> on_circle.distance(p.p1)
1
>>> p.parameter_value(on_circle, t)
{t: pi/4}
Moving the point twice as far from p1 does not change
the parameter value:
>>> off_circle = p.p1 + (on_circle - p.p1)*2
>>> off_circle.distance(p.p1)
2
>>> p.parameter_value(off_circle, t)
{t: pi/4}
If the 2-value parameter is desired, supply the two
parameter symbols and a replacement dictionary will
be returned:
>>> p.parameter_value(on_circle, u, v)
{u: sqrt(10)/10, v: sqrt(10)/30}
>>> p.parameter_value(off_circle, u, v)
{u: sqrt(10)/5, v: sqrt(10)/15}
"""
from sympy.geometry.point import Point
from sympy.solvers.solvers import solve
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other, Point):
raise ValueError("other must be a point")
if other == self.p1:
return other
if isinstance(u, Symbol) and v is None:
delta = self.arbitrary_point(u) - self.p1
eq = delta - (other - self.p1).unit
sol = solve(eq, u, dict=True)
elif isinstance(u, Symbol) and isinstance(v, Symbol):
pt = self.arbitrary_point(u, v)
sol = solve(pt - other, (u, v), dict=True)
else:
raise ValueError('expecting 1 or 2 symbols')
if not sol:
raise ValueError("Given point is not on %s" % func_name(self))
return sol[0] # {t: tval} or {u: uval, v: vval}
@property
def ambient_dimension(self):
return self.p1.ambient_dimension
|
ec924c1edcce7f7b35e3ea718eb6aba7bd5c30b7c061d87097e58c480637bd1e | """The definition of the base geometrical entity with attributes common to
all derived geometrical entities.
Contains
========
GeometryEntity
GeometricSet
Notes
=====
A GeometryEntity is any object that has special geometric properties.
A GeometrySet is a superclass of any GeometryEntity that can also
be viewed as a sympy.sets.Set. In particular, points are the only
GeometryEntity not considered a Set.
Rn is a GeometrySet representing n-dimensional Euclidean space. R2 and
R3 are currently the only ambient spaces implemented.
"""
from __future__ import division, print_function
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.sympify import sympify
from sympy.functions import cos, sin
from sympy.matrices import eye
from sympy.multipledispatch import dispatch
from sympy.sets import Set
from sympy.sets.handlers.intersection import intersection_sets
from sympy.sets.handlers.union import union_sets
from sympy.utilities.misc import func_name
# How entities are ordered; used by __cmp__ in GeometryEntity
ordering_of_classes = [
"Point2D",
"Point3D",
"Point",
"Segment2D",
"Ray2D",
"Line2D",
"Segment3D",
"Line3D",
"Ray3D",
"Segment",
"Ray",
"Line",
"Plane",
"Triangle",
"RegularPolygon",
"Polygon",
"Circle",
"Ellipse",
"Curve",
"Parabola"
]
class GeometryEntity(Basic):
"""The base class for all geometrical entities.
This class doesn't represent any particular geometric entity, it only
provides the implementation of some methods common to all subclasses.
"""
def __cmp__(self, other):
"""Comparison of two GeometryEntities."""
n1 = self.__class__.__name__
n2 = other.__class__.__name__
c = (n1 > n2) - (n1 < n2)
if not c:
return 0
i1 = -1
for cls in self.__class__.__mro__:
try:
i1 = ordering_of_classes.index(cls.__name__)
break
except ValueError:
i1 = -1
if i1 == -1:
return c
i2 = -1
for cls in other.__class__.__mro__:
try:
i2 = ordering_of_classes.index(cls.__name__)
break
except ValueError:
i2 = -1
if i2 == -1:
return c
return (i1 > i2) - (i1 < i2)
def __contains__(self, other):
"""Subclasses should implement this method for anything more complex than equality."""
if type(self) == type(other):
return self == other
raise NotImplementedError()
def __getnewargs__(self):
"""Returns a tuple that will be passed to __new__ on unpickling."""
return tuple(self.args)
def __ne__(self, o):
"""Test inequality of two geometrical entities."""
return not self == o
def __new__(cls, *args, **kwargs):
# Points are sequences, but they should not
# be converted to Tuples, so use this detection function instead.
def is_seq_and_not_point(a):
# we cannot use isinstance(a, Point) since we cannot import Point
if hasattr(a, 'is_Point') and a.is_Point:
return False
return is_sequence(a)
args = [Tuple(*a) if is_seq_and_not_point(a) else sympify(a) for a in args]
return Basic.__new__(cls, *args)
def __radd__(self, a):
"""Implementation of reverse add method."""
return a.__add__(self)
def __rdiv__(self, a):
"""Implementation of reverse division method."""
return a.__div__(self)
def __repr__(self):
"""String representation of a GeometryEntity that can be evaluated
by sympy."""
return type(self).__name__ + repr(self.args)
def __rmul__(self, a):
"""Implementation of reverse multiplication method."""
return a.__mul__(self)
def __rsub__(self, a):
"""Implementation of reverse subtraction method."""
return a.__sub__(self)
def __str__(self):
"""String representation of a GeometryEntity."""
from sympy.printing import sstr
return type(self).__name__ + sstr(self.args)
def _eval_subs(self, old, new):
from sympy.geometry.point import Point, Point3D
if is_sequence(old) or is_sequence(new):
if isinstance(self, Point3D):
old = Point3D(old)
new = Point3D(new)
else:
old = Point(old)
new = Point(new)
return self._subs(old, new)
def _repr_svg_(self):
"""SVG representation of a GeometryEntity suitable for IPython"""
from sympy.core.evalf import N
try:
bounds = self.bounds
except (NotImplementedError, TypeError):
# if we have no SVG representation, return None so IPython
# will fall back to the next representation
return None
svg_top = '''<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
width="{1}" height="{2}" viewBox="{0}"
preserveAspectRatio="xMinYMin meet">
<defs>
<marker id="markerCircle" markerWidth="8" markerHeight="8"
refx="5" refy="5" markerUnits="strokeWidth">
<circle cx="5" cy="5" r="1.5" style="stroke: none; fill:#000000;"/>
</marker>
<marker id="markerArrow" markerWidth="13" markerHeight="13" refx="2" refy="4"
orient="auto" markerUnits="strokeWidth">
<path d="M2,2 L2,6 L6,4" style="fill: #000000;" />
</marker>
<marker id="markerReverseArrow" markerWidth="13" markerHeight="13" refx="6" refy="4"
orient="auto" markerUnits="strokeWidth">
<path d="M6,2 L6,6 L2,4" style="fill: #000000;" />
</marker>
</defs>'''
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = map(N, bounds)
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = xmin - .5, ymin -.5, xmax + .5, ymax + .5
else:
# Expand bounds by a fraction of the data ranges
expand = 0.1 # or 10%; this keeps arrowheads in view (R plots use 4%)
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100., dx]), 300])
height = min([max([100., dy]), 300])
scale_factor = 1. if max(width, height) == 0 else max(dx, dy) / max(width, height)
try:
svg = self._svg(scale_factor)
except (NotImplementedError, TypeError):
# if we have no SVG representation, return None so IPython
# will fall back to the next representation
return None
view_box = "{0} {1} {2} {3}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{0})".format(ymax + ymin)
svg_top = svg_top.format(view_box, width, height)
return svg_top + (
'<g transform="{0}">{1}</g></svg>'
).format(transform, svg)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the GeometryEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
raise NotImplementedError()
def _sympy_(self):
return self
@property
def ambient_dimension(self):
"""What is the dimension of the space that the object is contained in?"""
raise NotImplementedError()
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
raise NotImplementedError()
def encloses(self, o):
"""
Return True if o is inside (not on or outside) the boundaries of self.
The object will be decomposed into Points and individual Entities need
only define an encloses_point method for their class.
See Also
========
sympy.geometry.ellipse.Ellipse.encloses_point
sympy.geometry.polygon.Polygon.encloses_point
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t2 = Polygon(*RegularPolygon(Point(0, 0), 2, 3).vertices)
>>> t2.encloses(t)
True
>>> t.encloses(t2)
False
"""
from sympy.geometry.point import Point
from sympy.geometry.line import Segment, Ray, Line
from sympy.geometry.ellipse import Ellipse
from sympy.geometry.polygon import Polygon, RegularPolygon
if isinstance(o, Point):
return self.encloses_point(o)
elif isinstance(o, Segment):
return all(self.encloses_point(x) for x in o.points)
elif isinstance(o, Ray) or isinstance(o, Line):
return False
elif isinstance(o, Ellipse):
return self.encloses_point(o.center) and \
self.encloses_point(
Point(o.center.x + o.hradius, o.center.y)) and \
not self.intersection(o)
elif isinstance(o, Polygon):
if isinstance(o, RegularPolygon):
if not self.encloses_point(o.center):
return False
return all(self.encloses_point(v) for v in o.vertices)
raise NotImplementedError()
def equals(self, o):
return self == o
def intersection(self, o):
"""
Returns a list of all of the intersections of self with o.
Notes
=====
An entity is not required to implement this method.
If two different types of entities can intersect, the item with
higher index in ordering_of_classes should implement
intersections with anything having a lower index.
See Also
========
sympy.geometry.util.intersection
"""
raise NotImplementedError()
def is_similar(self, other):
"""Is this geometrical entity similar to another geometrical entity?
Two entities are similar if a uniform scaling (enlarging or
shrinking) of one of the entities will allow one to obtain the other.
Notes
=====
This method is not intended to be used directly but rather
through the `are_similar` function found in util.py.
An entity is not required to implement this method.
If two different types of entities can be similar, it is only
required that one of them be able to determine this.
See Also
========
scale
"""
raise NotImplementedError()
def reflect(self, line):
"""
Reflects an object across a line.
Parameters
==========
line: Line
Examples
========
>>> from sympy import pi, sqrt, Line, RegularPolygon
>>> l = Line((0, pi), slope=sqrt(2))
>>> pent = RegularPolygon((1, 2), 1, 5)
>>> rpent = pent.reflect(l)
>>> rpent
RegularPolygon(Point2D(-2*sqrt(2)*pi/3 - 1/3 + 4*sqrt(2)/3, 2/3 + 2*sqrt(2)/3 + 2*pi/3), -1, 5, -atan(2*sqrt(2)) + 3*pi/5)
>>> from sympy import pi, Line, Circle, Point
>>> l = Line((0, pi), slope=1)
>>> circ = Circle(Point(0, 0), 5)
>>> rcirc = circ.reflect(l)
>>> rcirc
Circle(Point2D(-pi, pi), -5)
"""
from sympy import atan, Point, Dummy, oo
g = self
l = line
o = Point(0, 0)
if l.slope.is_zero:
y = l.args[0].y
if not y: # x-axis
return g.scale(y=-1)
reps = [(p, p.translate(y=2*(y - p.y))) for p in g.atoms(Point)]
elif l.slope is oo:
x = l.args[0].x
if not x: # y-axis
return g.scale(x=-1)
reps = [(p, p.translate(x=2*(x - p.x))) for p in g.atoms(Point)]
else:
if not hasattr(g, 'reflect') and not all(
isinstance(arg, Point) for arg in g.args):
raise NotImplementedError(
'reflect undefined or non-Point args in %s' % g)
a = atan(l.slope)
c = l.coefficients
d = -c[-1]/c[1] # y-intercept
# apply the transform to a single point
x, y = Dummy(), Dummy()
xf = Point(x, y)
xf = xf.translate(y=-d).rotate(-a, o).scale(y=-1
).rotate(a, o).translate(y=d)
# replace every point using that transform
reps = [(p, xf.xreplace({x: p.x, y: p.y})) for p in g.atoms(Point)]
return g.xreplace(dict(reps))
def rotate(self, angle, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
The default pt is the origin, Point(0, 0)
See Also
========
scale, translate
Examples
========
>>> from sympy import Point, RegularPolygon, Polygon, pi
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t # vertex on x axis
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.rotate(pi/2) # vertex on y axis now
Triangle(Point2D(0, 1), Point2D(-sqrt(3)/2, -1/2), Point2D(sqrt(3)/2, -1/2))
"""
newargs = []
for a in self.args:
if isinstance(a, GeometryEntity):
newargs.append(a.rotate(angle, pt))
else:
newargs.append(a)
return type(self)(*newargs)
def scale(self, x=1, y=1, pt=None):
"""Scale the object by multiplying the x,y-coordinates by x and y.
If pt is given, the scaling is done relative to that point; the
object is shifted by -pt, scaled, and shifted by pt.
See Also
========
rotate, translate
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.scale(2)
Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)/2), Point2D(-1, -sqrt(3)/2))
>>> t.scale(2, 2)
Triangle(Point2D(2, 0), Point2D(-1, sqrt(3)), Point2D(-1, -sqrt(3)))
"""
from sympy.geometry.point import Point
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
return type(self)(*[a.scale(x, y) for a in self.args]) # if this fails, override this class
def translate(self, x=0, y=0):
"""Shift the object by adding to the x,y-coordinates the values x and y.
See Also
========
rotate, scale
Examples
========
>>> from sympy import RegularPolygon, Point, Polygon
>>> t = Polygon(*RegularPolygon(Point(0, 0), 1, 3).vertices)
>>> t
Triangle(Point2D(1, 0), Point2D(-1/2, sqrt(3)/2), Point2D(-1/2, -sqrt(3)/2))
>>> t.translate(2)
Triangle(Point2D(3, 0), Point2D(3/2, sqrt(3)/2), Point2D(3/2, -sqrt(3)/2))
>>> t.translate(2, 2)
Triangle(Point2D(3, 2), Point2D(3/2, sqrt(3)/2 + 2), Point2D(3/2, 2 - sqrt(3)/2))
"""
newargs = []
for a in self.args:
if isinstance(a, GeometryEntity):
newargs.append(a.translate(x, y))
else:
newargs.append(a)
return self.func(*newargs)
def parameter_value(self, other, t):
"""Return the parameter corresponding to the given point.
Evaluating an arbitrary point of the entity at this parameter
value will return the given point.
Examples
========
>>> from sympy import Line, Point
>>> from sympy.abc import t
>>> a = Point(0, 0)
>>> b = Point(2, 2)
>>> Line(a, b).parameter_value((1, 1), t)
{t: 1/2}
>>> Line(a, b).arbitrary_point(t).subs(_)
Point2D(1, 1)
"""
from sympy.geometry.point import Point
from sympy.core.symbol import Dummy
from sympy.solvers.solvers import solve
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other, Point):
raise ValueError("other must be a point")
T = Dummy('t', real=True)
sol = solve(self.arbitrary_point(T) - other, T, dict=True)
if not sol:
raise ValueError("Given point is not on %s" % func_name(self))
return {t: sol[0][T]}
class GeometrySet(GeometryEntity, Set):
"""Parent class of all GeometryEntity that are also Sets
(compatible with sympy.sets)
"""
def _contains(self, other):
"""sympy.sets uses the _contains method, so include it for compatibility."""
if isinstance(other, Set) and other.is_FiniteSet:
return all(self.__contains__(i) for i in other)
return self.__contains__(other)
@dispatch(GeometrySet, Set) # type:ignore # noqa:F811
def union_sets(self, o): # noqa:F811
""" Returns the union of self and o
for use with sympy.sets.Set, if possible. """
from sympy.sets import Union, FiniteSet
# if its a FiniteSet, merge any points
# we contain and return a union with the rest
if o.is_FiniteSet:
other_points = [p for p in o if not self._contains(p)]
if len(other_points) == len(o):
return None
return Union(self, FiniteSet(*other_points))
if self._contains(o):
return self
return None
@dispatch(GeometrySet, Set) # type: ignore # noqa:F811
def intersection_sets(self, o): # noqa:F811
""" Returns a sympy.sets.Set of intersection objects,
if possible. """
from sympy.sets import FiniteSet, Union
from sympy.geometry import Point
try:
# if o is a FiniteSet, find the intersection directly
# to avoid infinite recursion
if o.is_FiniteSet:
inter = FiniteSet(*(p for p in o if self.contains(p)))
else:
inter = self.intersection(o)
except NotImplementedError:
# sympy.sets.Set.reduce expects None if an object
# doesn't know how to simplify
return None
# put the points in a FiniteSet
points = FiniteSet(*[p for p in inter if isinstance(p, Point)])
non_points = [p for p in inter if not isinstance(p, Point)]
return Union(*(non_points + [points]))
def translate(x, y):
"""Return the matrix to translate a 2-D point by x and y."""
rv = eye(3)
rv[2, 0] = x
rv[2, 1] = y
return rv
def scale(x, y, pt=None):
"""Return the matrix to multiply a 2-D point's coordinates by x and y.
If pt is given, the scaling is done relative to that point."""
rv = eye(3)
rv[0, 0] = x
rv[1, 1] = y
if pt:
from sympy.geometry.point import Point
pt = Point(pt, dim=2)
tr1 = translate(*(-pt).args)
tr2 = translate(*pt.args)
return tr1*rv*tr2
return rv
def rotate(th):
"""Return the matrix to rotate a 2-D point about the origin by ``angle``.
The angle is measured in radians. To Point a point about a point other
then the origin, translate the Point, do the rotation, and
translate it back:
>>> from sympy.geometry.entity import rotate, translate
>>> from sympy import Point, pi
>>> rot_about_11 = translate(-1, -1)*rotate(pi/2)*translate(1, 1)
>>> Point(1, 1).transform(rot_about_11)
Point2D(1, 1)
>>> Point(0, 0).transform(rot_about_11)
Point2D(2, 0)
"""
s = sin(th)
rv = eye(3)*cos(th)
rv[0, 1] = s
rv[1, 0] = -s
rv[2, 2] = 1
return rv
|
c9edf1826d14602d286c3afaf1b771ded2c2452b773307693c4197e6e46f9f36 | """Utility functions for geometrical entities.
Contains
========
intersection
convex_hull
closest_points
farthest_points
are_coplanar
are_similar
"""
from __future__ import division, print_function
from sympy import Function, Symbol, solve, sqrt
from sympy.core.compatibility import (
is_sequence, ordered)
from sympy.core.containers import OrderedSet
from .point import Point, Point2D
def find(x, equation):
"""
Checks whether the parameter 'x' is present in 'equation' or not.
If it is present then it returns the passed parameter 'x' as a free
symbol, else, it returns a ValueError.
"""
free = equation.free_symbols
xs = [i for i in free if (i.name if isinstance(x, str) else i) == x]
if not xs:
raise ValueError('could not find %s' % x)
if len(xs) != 1:
raise ValueError('ambiguous %s' % x)
return xs[0]
def _ordered_points(p):
"""Return the tuple of points sorted numerically according to args"""
return tuple(sorted(p, key=lambda x: x.args))
def are_coplanar(*e):
""" Returns True if the given entities are coplanar otherwise False
Parameters
==========
e: entities to be checked for being coplanar
Returns
=======
Boolean
Examples
========
>>> from sympy import Point3D, Line3D
>>> from sympy.geometry.util import are_coplanar
>>> a = Line3D(Point3D(5, 0, 0), Point3D(1, -1, 1))
>>> b = Line3D(Point3D(0, -2, 0), Point3D(3, 1, 1))
>>> c = Line3D(Point3D(0, -1, 0), Point3D(5, -1, 9))
>>> are_coplanar(a, b, c)
False
"""
from sympy.geometry.line import LinearEntity3D
from sympy.geometry.entity import GeometryEntity
from sympy.geometry.point import Point3D
from sympy.geometry.plane import Plane
# XXX update tests for coverage
e = set(e)
# first work with a Plane if present
for i in list(e):
if isinstance(i, Plane):
e.remove(i)
return all(p.is_coplanar(i) for p in e)
if all(isinstance(i, Point3D) for i in e):
if len(e) < 3:
return False
# remove pts that are collinear with 2 pts
a, b = e.pop(), e.pop()
for i in list(e):
if Point3D.are_collinear(a, b, i):
e.remove(i)
if not e:
return False
else:
# define a plane
p = Plane(a, b, e.pop())
for i in e:
if i not in p:
return False
return True
else:
pt3d = []
for i in e:
if isinstance(i, Point3D):
pt3d.append(i)
elif isinstance(i, LinearEntity3D):
pt3d.extend(i.args)
elif isinstance(i, GeometryEntity): # XXX we should have a GeometryEntity3D class so we can tell the difference between 2D and 3D -- here we just want to deal with 2D objects; if new 3D objects are encountered that we didn't handle above, an error should be raised
# all 2D objects have some Point that defines them; so convert those points to 3D pts by making z=0
for p in i.args:
if isinstance(p, Point):
pt3d.append(Point3D(*(p.args + (0,))))
return are_coplanar(*pt3d)
def are_similar(e1, e2):
"""Are two geometrical entities similar.
Can one geometrical entity be uniformly scaled to the other?
Parameters
==========
e1 : GeometryEntity
e2 : GeometryEntity
Returns
=======
are_similar : boolean
Raises
======
GeometryError
When `e1` and `e2` cannot be compared.
Notes
=====
If the two objects are equal then they are similar.
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar
Examples
========
>>> from sympy import Point, Circle, Triangle, are_similar
>>> c1, c2 = Circle(Point(0, 0), 4), Circle(Point(1, 4), 3)
>>> t1 = Triangle(Point(0, 0), Point(1, 0), Point(0, 1))
>>> t2 = Triangle(Point(0, 0), Point(2, 0), Point(0, 2))
>>> t3 = Triangle(Point(0, 0), Point(3, 0), Point(0, 1))
>>> are_similar(t1, t2)
True
>>> are_similar(t1, t3)
False
"""
from .exceptions import GeometryError
if e1 == e2:
return True
is_similar1 = getattr(e1, 'is_similar', None)
if is_similar1:
return is_similar1(e2)
is_similar2 = getattr(e2, 'is_similar', None)
if is_similar2:
return is_similar2(e1)
n1 = e1.__class__.__name__
n2 = e2.__class__.__name__
raise GeometryError(
"Cannot test similarity between %s and %s" % (n1, n2))
def centroid(*args):
"""Find the centroid (center of mass) of the collection containing only Points,
Segments or Polygons. The centroid is the weighted average of the individual centroid
where the weights are the lengths (of segments) or areas (of polygons).
Overlapping regions will add to the weight of that region.
If there are no objects (or a mixture of objects) then None is returned.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment,
sympy.geometry.polygon.Polygon
Examples
========
>>> from sympy import Point, Segment, Polygon
>>> from sympy.geometry.util import centroid
>>> p = Polygon((0, 0), (10, 0), (10, 10))
>>> q = p.translate(0, 20)
>>> p.centroid, q.centroid
(Point2D(20/3, 10/3), Point2D(20/3, 70/3))
>>> centroid(p, q)
Point2D(20/3, 40/3)
>>> p, q = Segment((0, 0), (2, 0)), Segment((0, 0), (2, 2))
>>> centroid(p, q)
Point2D(1, 2 - sqrt(2))
>>> centroid(Point(0, 0), Point(2, 0))
Point2D(1, 0)
Stacking 3 polygons on top of each other effectively triples the
weight of that polygon:
>>> p = Polygon((0, 0), (1, 0), (1, 1), (0, 1))
>>> q = Polygon((1, 0), (3, 0), (3, 1), (1, 1))
>>> centroid(p, q)
Point2D(3/2, 1/2)
>>> centroid(p, p, p, q) # centroid x-coord shifts left
Point2D(11/10, 1/2)
Stacking the squares vertically above and below p has the same
effect:
>>> centroid(p, p.translate(0, 1), p.translate(0, -1), q)
Point2D(11/10, 1/2)
"""
from sympy.geometry import Polygon, Segment, Point
if args:
if all(isinstance(g, Point) for g in args):
c = Point(0, 0)
for g in args:
c += g
den = len(args)
elif all(isinstance(g, Segment) for g in args):
c = Point(0, 0)
L = 0
for g in args:
l = g.length
c += g.midpoint*l
L += l
den = L
elif all(isinstance(g, Polygon) for g in args):
c = Point(0, 0)
A = 0
for g in args:
a = g.area
c += g.centroid*a
A += a
den = A
c /= den
return c.func(*[i.simplify() for i in c.args])
def closest_points(*args):
"""Return the subset of points from a set of points that were
the closest to each other in the 2D plane.
Parameters
==========
args : a collection of Points on 2D plane.
Notes
=====
This can only be performed on a set of points whose coordinates can
be ordered on the number line. If there are no ties then a single
pair of Points will be in the set.
References
==========
[1] http://www.cs.mcgill.ca/~cs251/ClosestPair/ClosestPairPS.html
[2] Sweep line algorithm
https://en.wikipedia.org/wiki/Sweep_line_algorithm
Examples
========
>>> from sympy.geometry import closest_points, Point2D, Triangle
>>> Triangle(sss=(3, 4, 5)).args
(Point2D(0, 0), Point2D(3, 0), Point2D(3, 4))
>>> closest_points(*_)
{(Point2D(0, 0), Point2D(3, 0))}
"""
from collections import deque
from math import sqrt as _sqrt
from sympy.functions.elementary.miscellaneous import sqrt
p = [Point2D(i) for i in set(args)]
if len(p) < 2:
raise ValueError('At least 2 distinct points must be given.')
try:
p.sort(key=lambda x: x.args)
except TypeError:
raise ValueError("The points could not be sorted.")
if any(not i.is_Rational for j in p for i in j.args):
def hypot(x, y):
arg = x*x + y*y
if arg.is_Rational:
return _sqrt(arg)
return sqrt(arg)
else:
from math import hypot
rv = [(0, 1)]
best_dist = hypot(p[1].x - p[0].x, p[1].y - p[0].y)
i = 2
left = 0
box = deque([0, 1])
while i < len(p):
while left < i and p[i][0] - p[left][0] > best_dist:
box.popleft()
left += 1
for j in box:
d = hypot(p[i].x - p[j].x, p[i].y - p[j].y)
if d < best_dist:
rv = [(j, i)]
elif d == best_dist:
rv.append((j, i))
else:
continue
best_dist = d
box.append(i)
i += 1
return {tuple([p[i] for i in pair]) for pair in rv}
def convex_hull(*args, **kwargs):
"""The convex hull surrounding the Points contained in the list of entities.
Parameters
==========
args : a collection of Points, Segments and/or Polygons
Returns
=======
convex_hull : Polygon if ``polygon`` is True else as a tuple `(U, L)` where ``L`` and ``U`` are the lower and upper hulls, respectively.
Notes
=====
This can only be performed on a set of points whose coordinates can
be ordered on the number line.
References
==========
[1] https://en.wikipedia.org/wiki/Graham_scan
[2] Andrew's Monotone Chain Algorithm
(A.M. Andrew,
"Another Efficient Algorithm for Convex Hulls in Two Dimensions", 1979)
http://geomalgorithms.com/a10-_hull-1.html
See Also
========
sympy.geometry.point.Point, sympy.geometry.polygon.Polygon
Examples
========
>>> from sympy.geometry import Point, convex_hull
>>> points = [(1, 1), (1, 2), (3, 1), (-5, 2), (15, 4)]
>>> convex_hull(*points)
Polygon(Point2D(-5, 2), Point2D(1, 1), Point2D(3, 1), Point2D(15, 4))
>>> convex_hull(*points, **dict(polygon=False))
([Point2D(-5, 2), Point2D(15, 4)],
[Point2D(-5, 2), Point2D(1, 1), Point2D(3, 1), Point2D(15, 4)])
"""
from .entity import GeometryEntity
from .point import Point
from .line import Segment
from .polygon import Polygon
polygon = kwargs.get('polygon', True)
p = OrderedSet()
for e in args:
if not isinstance(e, GeometryEntity):
try:
e = Point(e)
except NotImplementedError:
raise ValueError('%s is not a GeometryEntity and cannot be made into Point' % str(e))
if isinstance(e, Point):
p.add(e)
elif isinstance(e, Segment):
p.update(e.points)
elif isinstance(e, Polygon):
p.update(e.vertices)
else:
raise NotImplementedError(
'Convex hull for %s not implemented.' % type(e))
# make sure all our points are of the same dimension
if any(len(x) != 2 for x in p):
raise ValueError('Can only compute the convex hull in two dimensions')
p = list(p)
if len(p) == 1:
return p[0] if polygon else (p[0], None)
elif len(p) == 2:
s = Segment(p[0], p[1])
return s if polygon else (s, None)
def _orientation(p, q, r):
'''Return positive if p-q-r are clockwise, neg if ccw, zero if
collinear.'''
return (q.y - p.y)*(r.x - p.x) - (q.x - p.x)*(r.y - p.y)
# scan to find upper and lower convex hulls of a set of 2d points.
U = []
L = []
try:
p.sort(key=lambda x: x.args)
except TypeError:
raise ValueError("The points could not be sorted.")
for p_i in p:
while len(U) > 1 and _orientation(U[-2], U[-1], p_i) <= 0:
U.pop()
while len(L) > 1 and _orientation(L[-2], L[-1], p_i) >= 0:
L.pop()
U.append(p_i)
L.append(p_i)
U.reverse()
convexHull = tuple(L + U[1:-1])
if len(convexHull) == 2:
s = Segment(convexHull[0], convexHull[1])
return s if polygon else (s, None)
if polygon:
return Polygon(*convexHull)
else:
U.reverse()
return (U, L)
def farthest_points(*args):
"""Return the subset of points from a set of points that were
the furthest apart from each other in the 2D plane.
Parameters
==========
args : a collection of Points on 2D plane.
Notes
=====
This can only be performed on a set of points whose coordinates can
be ordered on the number line. If there are no ties then a single
pair of Points will be in the set.
References
==========
[1] http://code.activestate.com/recipes/117225-convex-hull-and-diameter-of-2d-point-sets/
[2] Rotating Callipers Technique
https://en.wikipedia.org/wiki/Rotating_calipers
Examples
========
>>> from sympy.geometry import farthest_points, Point2D, Triangle
>>> Triangle(sss=(3, 4, 5)).args
(Point2D(0, 0), Point2D(3, 0), Point2D(3, 4))
>>> farthest_points(*_)
{(Point2D(0, 0), Point2D(3, 4))}
"""
from math import sqrt as _sqrt
def rotatingCalipers(Points):
U, L = convex_hull(*Points, **dict(polygon=False))
if L is None:
if isinstance(U, Point):
raise ValueError('At least two distinct points must be given.')
yield U.args
else:
i = 0
j = len(L) - 1
while i < len(U) - 1 or j > 0:
yield U[i], L[j]
# if all the way through one side of hull, advance the other side
if i == len(U) - 1:
j -= 1
elif j == 0:
i += 1
# still points left on both lists, compare slopes of next hull edges
# being careful to avoid divide-by-zero in slope calculation
elif (U[i+1].y - U[i].y) * (L[j].x - L[j-1].x) > \
(L[j].y - L[j-1].y) * (U[i+1].x - U[i].x):
i += 1
else:
j -= 1
p = [Point2D(i) for i in set(args)]
if any(not i.is_Rational for j in p for i in j.args):
def hypot(x, y):
arg = x*x + y*y
if arg.is_Rational:
return _sqrt(arg)
return sqrt(arg)
else:
from math import hypot
rv = []
diam = 0
for pair in rotatingCalipers(args):
h, q = _ordered_points(pair)
d = hypot(h.x - q.x, h.y - q.y)
if d > diam:
rv = [(h, q)]
elif d == diam:
rv.append((h, q))
else:
continue
diam = d
return set(rv)
def idiff(eq, y, x, n=1):
"""Return ``dy/dx`` assuming that ``eq == 0``.
Parameters
==========
y : the dependent variable or a list of dependent variables (with y first)
x : the variable that the derivative is being taken with respect to
n : the order of the derivative (default is 1)
Examples
========
>>> from sympy.abc import x, y, a
>>> from sympy.geometry.util import idiff
>>> circ = x**2 + y**2 - 4
>>> idiff(circ, y, x)
-x/y
>>> idiff(circ, y, x, 2).simplify()
-(x**2 + y**2)/y**3
Here, ``a`` is assumed to be independent of ``x``:
>>> idiff(x + a + y, y, x)
-1
Now the x-dependence of ``a`` is made explicit by listing ``a`` after
``y`` in a list.
>>> idiff(x + a + y, [y, a], x)
-Derivative(a, x) - 1
See Also
========
sympy.core.function.Derivative: represents unevaluated derivatives
sympy.core.function.diff: explicitly differentiates wrt symbols
"""
if is_sequence(y):
dep = set(y)
y = y[0]
elif isinstance(y, Symbol):
dep = {y}
elif isinstance(y, Function):
pass
else:
raise ValueError("expecting x-dependent symbol(s) or function(s) but got: %s" % y)
f = {s: Function(s.name)(x) for s in eq.free_symbols
if s != x and s in dep}
if isinstance(y, Symbol):
dydx = Function(y.name)(x).diff(x)
else:
dydx = y.diff(x)
eq = eq.subs(f)
derivs = {}
for i in range(n):
yp = solve(eq.diff(x), dydx)[0].subs(derivs)
if i == n - 1:
return yp.subs([(v, k) for k, v in f.items()])
derivs[dydx] = yp
eq = dydx - yp
dydx = dydx.diff(x)
def intersection(*entities, **kwargs):
"""The intersection of a collection of GeometryEntity instances.
Parameters
==========
entities : sequence of GeometryEntity
pairwise (keyword argument) : Can be either True or False
Returns
=======
intersection : list of GeometryEntity
Raises
======
NotImplementedError
When unable to calculate intersection.
Notes
=====
The intersection of any geometrical entity with itself should return
a list with one item: the entity in question.
An intersection requires two or more entities. If only a single
entity is given then the function will return an empty list.
It is possible for `intersection` to miss intersections that one
knows exists because the required quantities were not fully
simplified internally.
Reals should be converted to Rationals, e.g. Rational(str(real_num))
or else failures due to floating point issues may result.
Case 1: When the keyword argument 'pairwise' is False (default value):
In this case, the function returns a list of intersections common to
all entities.
Case 2: When the keyword argument 'pairwise' is True:
In this case, the functions returns a list intersections that occur
between any pair of entities.
See Also
========
sympy.geometry.entity.GeometryEntity.intersection
Examples
========
>>> from sympy.geometry import Ray, Circle, intersection
>>> c = Circle((0, 1), 1)
>>> intersection(c, c.center)
[]
>>> right = Ray((0, 0), (1, 0))
>>> up = Ray((0, 0), (0, 1))
>>> intersection(c, right, up)
[Point2D(0, 0)]
>>> intersection(c, right, up, pairwise=True)
[Point2D(0, 0), Point2D(0, 2)]
>>> left = Ray((1, 0), (0, 0))
>>> intersection(right, left)
[Segment2D(Point2D(0, 0), Point2D(1, 0))]
"""
from .entity import GeometryEntity
from .point import Point
pairwise = kwargs.pop('pairwise', False)
if len(entities) <= 1:
return []
# entities may be an immutable tuple
entities = list(entities)
for i, e in enumerate(entities):
if not isinstance(e, GeometryEntity):
entities[i] = Point(e)
if not pairwise:
# find the intersection common to all objects
res = entities[0].intersection(entities[1])
for entity in entities[2:]:
newres = []
for x in res:
newres.extend(x.intersection(entity))
res = newres
return res
# find all pairwise intersections
ans = []
for j in range(0, len(entities)):
for k in range(j + 1, len(entities)):
ans.extend(intersection(entities[j], entities[k]))
return list(ordered(set(ans)))
|
f5ab37347b84e89758b50c4b829c98107ab0bf9ff3a60fdf83add035604d349e | from __future__ import division, print_function
from sympy.core import Expr, S, Symbol, oo, pi, sympify
from sympy.core.compatibility import as_int, ordered
from sympy.core.symbol import _symbol, Dummy, symbols
from sympy.functions.elementary.complexes import sign
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import cos, sin, tan
from sympy.geometry.exceptions import GeometryError
from sympy.logic import And
from sympy.matrices import Matrix
from sympy.simplify import simplify
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import has_dups, has_variety, uniq, rotate_left, least_rotation
from sympy.utilities.misc import func_name
from .entity import GeometryEntity, GeometrySet
from .point import Point
from .ellipse import Circle
from .line import Line, Segment, Ray
import warnings
class Polygon(GeometrySet):
"""A two-dimensional polygon.
A simple polygon in space. Can be constructed from a sequence of points
or from a center, radius, number of sides and rotation angle.
Parameters
==========
vertices : sequence of Points
Attributes
==========
area
angles
perimeter
vertices
centroid
sides
Raises
======
GeometryError
If all parameters are not Points.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment, Triangle
Notes
=====
Polygons are treated as closed paths rather than 2D areas so
some calculations can be be negative or positive (e.g., area)
based on the orientation of the points.
Any consecutive identical points are reduced to a single point
and any points collinear and between two points will be removed
unless they are needed to define an explicit intersection (see examples).
A Triangle, Segment or Point will be returned when there are 3 or
fewer points provided.
Examples
========
>>> from sympy import Point, Polygon, pi
>>> p1, p2, p3, p4, p5 = [(0, 0), (1, 0), (5, 1), (0, 1), (3, 0)]
>>> Polygon(p1, p2, p3, p4)
Polygon(Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1))
>>> Polygon(p1, p2)
Segment2D(Point2D(0, 0), Point2D(1, 0))
>>> Polygon(p1, p2, p5)
Segment2D(Point2D(0, 0), Point2D(3, 0))
The area of a polygon is calculated as positive when vertices are
traversed in a ccw direction. When the sides of a polygon cross the
area will have positive and negative contributions. The following
defines a Z shape where the bottom right connects back to the top
left.
>>> Polygon((0, 2), (2, 2), (0, 0), (2, 0)).area
0
When the the keyword `n` is used to define the number of sides of the
Polygon then a RegularPolygon is created and the other arguments are
interpreted as center, radius and rotation. The unrotated RegularPolygon
will always have a vertex at Point(r, 0) where `r` is the radius of the
circle that circumscribes the RegularPolygon. Its method `spin` can be
used to increment that angle.
>>> p = Polygon((0,0), 1, n=3)
>>> p
RegularPolygon(Point2D(0, 0), 1, 3, 0)
>>> p.vertices[0]
Point2D(1, 0)
>>> p.args[0]
Point2D(0, 0)
>>> p.spin(pi/2)
>>> p.vertices[0]
Point2D(0, 1)
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('n', 0):
n = kwargs.pop('n')
args = list(args)
# return a virtual polygon with n sides
if len(args) == 2: # center, radius
args.append(n)
elif len(args) == 3: # center, radius, rotation
args.insert(2, n)
return RegularPolygon(*args, **kwargs)
vertices = [Point(a, dim=2, **kwargs) for a in args]
# remove consecutive duplicates
nodup = []
for p in vertices:
if nodup and p == nodup[-1]:
continue
nodup.append(p)
if len(nodup) > 1 and nodup[-1] == nodup[0]:
nodup.pop() # last point was same as first
# remove collinear points
i = -3
while i < len(nodup) - 3 and len(nodup) > 2:
a, b, c = nodup[i], nodup[i + 1], nodup[i + 2]
if Point.is_collinear(a, b, c):
nodup.pop(i + 1)
if a == c:
nodup.pop(i)
else:
i += 1
vertices = list(nodup)
if len(vertices) > 3:
return GeometryEntity.__new__(cls, *vertices, **kwargs)
elif len(vertices) == 3:
return Triangle(*vertices, **kwargs)
elif len(vertices) == 2:
return Segment(*vertices, **kwargs)
else:
return Point(*vertices, **kwargs)
@property
def area(self):
"""
The area of the polygon.
Notes
=====
The area calculation can be positive or negative based on the
orientation of the points. If any side of the polygon crosses
any other side, there will be areas having opposite signs.
See Also
========
sympy.geometry.ellipse.Ellipse.area
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.area
3
In the Z shaped polygon (with the lower right connecting back
to the upper left) the areas cancel out:
>>> Z = Polygon((0, 1), (1, 1), (0, 0), (1, 0))
>>> Z.area
0
In the M shaped polygon, areas do not cancel because no side
crosses any other (though there is a point of contact).
>>> M = Polygon((0, 0), (0, 1), (2, 0), (3, 1), (3, 0))
>>> M.area
-3/2
"""
area = 0
args = self.args
for i in range(len(args)):
x1, y1 = args[i - 1].args
x2, y2 = args[i].args
area += x1*y2 - x2*y1
return simplify(area) / 2
@staticmethod
def _isright(a, b, c):
"""Return True/False for cw/ccw orientation.
Examples
========
>>> from sympy import Point, Polygon
>>> a, b, c = [Point(i) for i in [(0, 0), (1, 1), (1, 0)]]
>>> Polygon._isright(a, b, c)
True
>>> Polygon._isright(a, c, b)
False
"""
ba = b - a
ca = c - a
t_area = simplify(ba.x*ca.y - ca.x*ba.y)
res = t_area.is_nonpositive
if res is None:
raise ValueError("Can't determine orientation")
return res
@property
def angles(self):
"""The internal angle at each vertex.
Returns
=======
angles : dict
A dictionary where each key is a vertex and each value is the
internal angle at that vertex. The vertices are represented as
Points.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.angles[p1]
pi/2
>>> poly.angles[p2]
acos(-4*sqrt(17)/17)
"""
# Determine orientation of points
args = self.vertices
cw = self._isright(args[-1], args[0], args[1])
ret = {}
for i in range(len(args)):
a, b, c = args[i - 2], args[i - 1], args[i]
ang = Ray(b, a).angle_between(Ray(b, c))
if cw ^ self._isright(a, b, c):
ret[b] = 2*S.Pi - ang
else:
ret[b] = ang
return ret
@property
def ambient_dimension(self):
return self.vertices[0].ambient_dimension
@property
def perimeter(self):
"""The perimeter of the polygon.
Returns
=======
perimeter : number or Basic instance
See Also
========
sympy.geometry.line.Segment.length
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.perimeter
sqrt(17) + 7
"""
p = 0
args = self.vertices
for i in range(len(args)):
p += args[i - 1].distance(args[i])
return simplify(p)
@property
def vertices(self):
"""The vertices of the polygon.
Returns
=======
vertices : list of Points
Notes
=====
When iterating over the vertices, it is more efficient to index self
rather than to request the vertices and index them. Only use the
vertices when you want to process all of them at once. This is even
more important with RegularPolygons that calculate each vertex.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.vertices
[Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1)]
>>> poly.vertices[0]
Point2D(0, 0)
"""
return list(self.args)
@property
def centroid(self):
"""The centroid of the polygon.
Returns
=======
centroid : Point
See Also
========
sympy.geometry.point.Point, sympy.geometry.util.centroid
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.centroid
Point2D(31/18, 11/18)
"""
A = 1/(6*self.area)
cx, cy = 0, 0
args = self.args
for i in range(len(args)):
x1, y1 = args[i - 1].args
x2, y2 = args[i].args
v = x1*y2 - x2*y1
cx += v*(x1 + x2)
cy += v*(y1 + y2)
return Point(simplify(A*cx), simplify(A*cy))
def second_moment_of_area(self, point=None):
"""Returns the second moment and product moment of area of a two dimensional polygon.
Parameters
==========
point : Point, two-tuple of sympifyable objects, or None(default=None)
point is the point about which second moment of area is to be found.
If "point=None" it will be calculated about the axis passing through the
centroid of the polygon.
Returns
=======
I_xx, I_yy, I_xy : number or sympy expression
I_xx, I_yy are second moment of area of a two dimensional polygon.
I_xy is product moment of area of a two dimensional polygon.
Examples
========
>>> from sympy import Point, Polygon, symbols
>>> a, b = symbols('a, b')
>>> p1, p2, p3, p4, p5 = [(0, 0), (a, 0), (a, b), (0, b), (a/3, b/3)]
>>> rectangle = Polygon(p1, p2, p3, p4)
>>> rectangle.second_moment_of_area()
(a*b**3/12, a**3*b/12, 0)
>>> rectangle.second_moment_of_area(p5)
(a*b**3/9, a**3*b/9, a**2*b**2/36)
References
==========
https://en.wikipedia.org/wiki/Second_moment_of_area
"""
I_xx, I_yy, I_xy = 0, 0, 0
args = self.vertices
for i in range(len(args)):
x1, y1 = args[i-1].args
x2, y2 = args[i].args
v = x1*y2 - x2*y1
I_xx += (y1**2 + y1*y2 + y2**2)*v
I_yy += (x1**2 + x1*x2 + x2**2)*v
I_xy += (x1*y2 + 2*x1*y1 + 2*x2*y2 + x2*y1)*v
A = self.area
c_x = self.centroid[0]
c_y = self.centroid[1]
# parallel axis theorem
I_xx_c = (I_xx/12) - (A*(c_y**2))
I_yy_c = (I_yy/12) - (A*(c_x**2))
I_xy_c = (I_xy/24) - (A*(c_x*c_y))
if point is None:
return I_xx_c, I_yy_c, I_xy_c
I_xx = (I_xx_c + A*((point[1]-c_y)**2))
I_yy = (I_yy_c + A*((point[0]-c_x)**2))
I_xy = (I_xy_c + A*((point[0]-c_x)*(point[1]-c_y)))
return I_xx, I_yy, I_xy
def first_moment_of_area(self, point=None):
"""
Returns the first moment of area of a two-dimensional polygon with
respect to a certain point of interest.
First moment of area is a measure of the distribution of the area
of a polygon in relation to an axis. The first moment of area of
the entire polygon about its own centroid is always zero. Therefore,
here it is calculated for an area, above or below a certain point
of interest, that makes up a smaller portion of the polygon. This
area is bounded by the point of interest and the extreme end
(top or bottom) of the polygon. The first moment for this area is
is then determined about the centroidal axis of the initial polygon.
References
==========
https://skyciv.com/docs/tutorials/section-tutorials/calculating-the-statical-or-first-moment-of-area-of-beam-sections/?cc=BMD
https://mechanicalc.com/reference/cross-sections
Parameters
==========
point: Point, two-tuple of sympifyable objects, or None (default=None)
point is the point above or below which the area of interest lies
If ``point=None`` then the centroid acts as the point of interest.
Returns
=======
Q_x, Q_y: number or sympy expressions
Q_x is the first moment of area about the x-axis
Q_y is the first moment of area about the y-axis
A negetive sign indicates that the section modulus is
determined for a section below (or left of) the centroidal axis
Examples
========
>>> from sympy import Point, Polygon
>>> a, b = 50, 10
>>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)]
>>> p = Polygon(p1, p2, p3, p4)
>>> p.first_moment_of_area()
(625, 3125)
>>> p.first_moment_of_area(point=Point(30, 7))
(525, 3000)
"""
if point:
xc, yc = self.centroid
else:
point = self.centroid
xc, yc = point
h_line = Line(point, slope=0)
v_line = Line(point, slope=S.Infinity)
h_poly = self.cut_section(h_line)
v_poly = self.cut_section(v_line)
x_min, y_min, x_max, y_max = self.bounds
poly_1 = h_poly[0] if h_poly[0].area <= h_poly[1].area else h_poly[1]
poly_2 = v_poly[0] if v_poly[0].area <= v_poly[1].area else v_poly[1]
Q_x = (poly_1.centroid.y - yc)*poly_1.area
Q_y = (poly_2.centroid.x - xc)*poly_2.area
return Q_x, Q_y
def polar_second_moment_of_area(self):
"""Returns the polar modulus of a two-dimensional polygon
It is a constituent of the second moment of area, linked through
the perpendicular axis theorem. While the planar second moment of
area describes an object's resistance to deflection (bending) when
subjected to a force applied to a plane parallel to the central
axis, the polar second moment of area describes an object's
resistance to deflection when subjected to a moment applied in a
plane perpendicular to the object's central axis (i.e. parallel to
the cross-section)
References
==========
https://en.wikipedia.org/wiki/Polar_moment_of_inertia
Examples
========
>>> from sympy import Polygon, symbols
>>> a, b = symbols('a, b')
>>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b))
>>> rectangle.polar_second_moment_of_area()
a**3*b/12 + a*b**3/12
"""
second_moment = self.second_moment_of_area()
return second_moment[0] + second_moment[1]
def section_modulus(self, point=None):
"""Returns a tuple with the section modulus of a two-dimensional
polygon.
Section modulus is a geometric property of a polygon defined as the
ratio of second moment of area to the distance of the extreme end of
the polygon from the centroidal axis.
References
==========
https://en.wikipedia.org/wiki/Section_modulus
Parameters
==========
point : Point, two-tuple of sympifyable objects, or None(default=None)
point is the point at which section modulus is to be found.
If "point=None" it will be calculated for the point farthest from the
centroidal axis of the polygon.
Returns
=======
S_x, S_y: numbers or SymPy expressions
S_x is the section modulus with respect to the x-axis
S_y is the section modulus with respect to the y-axis
A negetive sign indicates that the section modulus is
determined for a point below the centroidal axis
Examples
========
>>> from sympy import symbols, Polygon, Point
>>> a, b = symbols('a, b', positive=True)
>>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b))
>>> rectangle.section_modulus()
(a*b**2/6, a**2*b/6)
>>> rectangle.section_modulus(Point(a/4, b/4))
(-a*b**2/3, -a**2*b/3)
"""
x_c, y_c = self.centroid
if point is None:
# taking x and y as maximum distances from centroid
x_min, y_min, x_max, y_max = self.bounds
y = max(y_c - y_min, y_max - y_c)
x = max(x_c - x_min, x_max - x_c)
else:
# taking x and y as distances of the given point from the centroid
y = point.y - y_c
x = point.x - x_c
second_moment= self.second_moment_of_area()
S_x = second_moment[0]/y
S_y = second_moment[1]/x
return S_x, S_y
@property
def sides(self):
"""The directed line segments that form the sides of the polygon.
Returns
=======
sides : list of sides
Each side is a directed Segment.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.sides
[Segment2D(Point2D(0, 0), Point2D(1, 0)),
Segment2D(Point2D(1, 0), Point2D(5, 1)),
Segment2D(Point2D(5, 1), Point2D(0, 1)), Segment2D(Point2D(0, 1), Point2D(0, 0))]
"""
res = []
args = self.vertices
for i in range(-len(args), 0):
res.append(Segment(args[i], args[i + 1]))
return res
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
verts = self.vertices
xs = [p.x for p in verts]
ys = [p.y for p in verts]
return (min(xs), min(ys), max(xs), max(ys))
def is_convex(self):
"""Is the polygon convex?
A polygon is convex if all its interior angles are less than 180
degrees and there are no intersections between sides.
Returns
=======
is_convex : boolean
True if this polygon is convex, False otherwise.
See Also
========
sympy.geometry.util.convex_hull
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.is_convex()
True
"""
# Determine orientation of points
args = self.vertices
cw = self._isright(args[-2], args[-1], args[0])
for i in range(1, len(args)):
if cw ^ self._isright(args[i - 2], args[i - 1], args[i]):
return False
# check for intersecting sides
sides = self.sides
for i, si in enumerate(sides):
pts = si.args
# exclude the sides connected to si
for j in range(1 if i == len(sides) - 1 else 0, i - 1):
sj = sides[j]
if sj.p1 not in pts and sj.p2 not in pts:
hit = si.intersection(sj)
if hit:
return False
return True
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
=====
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.encloses_point
Examples
========
>>> from sympy import Polygon, Point
>>> from sympy.abc import t
>>> p = Polygon((0, 0), (4, 0), (4, 4))
>>> p.encloses_point(Point(2, 1))
True
>>> p.encloses_point(Point(2, 2))
False
>>> p.encloses_point(Point(5, 5))
False
References
==========
[1] http://paulbourke.net/geometry/polygonmesh/#insidepoly
"""
p = Point(p, dim=2)
if p in self.vertices or any(p in s for s in self.sides):
return False
# move to p, checking that the result is numeric
lit = []
for v in self.vertices:
lit.append(v - p) # the difference is simplified
if lit[-1].free_symbols:
return None
poly = Polygon(*lit)
# polygon closure is assumed in the following test but Polygon removes duplicate pts so
# the last point has to be added so all sides are computed. Using Polygon.sides is
# not good since Segments are unordered.
args = poly.args
indices = list(range(-len(args), 1))
if poly.is_convex():
orientation = None
for i in indices:
a = args[i]
b = args[i + 1]
test = ((-a.y)*(b.x - a.x) - (-a.x)*(b.y - a.y)).is_negative
if orientation is None:
orientation = test
elif test is not orientation:
return False
return True
hit_odd = False
p1x, p1y = args[0].args
for i in indices[1:]:
p2x, p2y = args[i].args
if 0 > min(p1y, p2y):
if 0 <= max(p1y, p2y):
if 0 <= max(p1x, p2x):
if p1y != p2y:
xinters = (-p1y)*(p2x - p1x)/(p2y - p1y) + p1x
if p1x == p2x or 0 <= xinters:
hit_odd = not hit_odd
p1x, p1y = p2x, p2y
return hit_odd
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the polygon.
The parameter, varying from 0 to 1, assigns points to the position on
the perimeter that is that fraction of the total perimeter. So the
point evaluated at t=1/2 would return the point from the first vertex
that is 1/2 way around the polygon.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the Polygon's definition.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Polygon, S, Symbol
>>> t = Symbol('t', real=True)
>>> tri = Polygon((0, 0), (1, 0), (1, 1))
>>> p = tri.arbitrary_point('t')
>>> perimeter = tri.perimeter
>>> s1, s2 = [s.length for s in tri.sides[:2]]
>>> p.subs(t, (s1 + s2/2)/perimeter)
Point2D(1, 1/2)
"""
t = _symbol(parameter, real=True)
if t.name in (f.name for f in self.free_symbols):
raise ValueError('Symbol %s already appears in object and cannot be used as a parameter.' % t.name)
sides = []
perimeter = self.perimeter
perim_fraction_start = 0
for s in self.sides:
side_perim_fraction = s.length/perimeter
perim_fraction_end = perim_fraction_start + side_perim_fraction
pt = s.arbitrary_point(parameter).subs(
t, (t - perim_fraction_start)/side_perim_fraction)
sides.append(
(pt, (And(perim_fraction_start <= t, t < perim_fraction_end))))
perim_fraction_start = perim_fraction_end
return Piecewise(*sides)
def parameter_value(self, other, t):
from sympy.solvers.solvers import solve
if not isinstance(other,GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other,Point):
raise ValueError("other must be a point")
if other.free_symbols:
raise NotImplementedError('non-numeric coordinates')
unknown = False
T = Dummy('t', real=True)
p = self.arbitrary_point(T)
for pt, cond in p.args:
sol = solve(pt - other, T, dict=True)
if not sol:
continue
value = sol[0][T]
if simplify(cond.subs(T, value)) == True:
return {t: value}
unknown = True
if unknown:
raise ValueError("Given point may not be on %s" % func_name(self))
raise ValueError("Given point is not on %s" % func_name(self))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the polygon.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Polygon
>>> p = Polygon((0, 0), (1, 0), (1, 1))
>>> p.plot_interval()
[t, 0, 1]
"""
t = Symbol(parameter, real=True)
return [t, 0, 1]
def intersection(self, o):
"""The intersection of polygon and geometry entity.
The intersection may be empty and can contain individual Points and
complete Line Segments.
Parameters
==========
other: GeometryEntity
Returns
=======
intersection : list
The list of Segments and Points
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy import Point, Polygon, Line
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly1 = Polygon(p1, p2, p3, p4)
>>> p5, p6, p7 = map(Point, [(3, 2), (1, -1), (0, 2)])
>>> poly2 = Polygon(p5, p6, p7)
>>> poly1.intersection(poly2)
[Point2D(1/3, 1), Point2D(2/3, 0), Point2D(9/5, 1/5), Point2D(7/3, 1)]
>>> poly1.intersection(Line(p1, p2))
[Segment2D(Point2D(0, 0), Point2D(1, 0))]
>>> poly1.intersection(p1)
[Point2D(0, 0)]
"""
intersection_result = []
k = o.sides if isinstance(o, Polygon) else [o]
for side in self.sides:
for side1 in k:
intersection_result.extend(side.intersection(side1))
intersection_result = list(uniq(intersection_result))
points = [entity for entity in intersection_result if isinstance(entity, Point)]
segments = [entity for entity in intersection_result if isinstance(entity, Segment)]
if points and segments:
points_in_segments = list(uniq([point for point in points for segment in segments if point in segment]))
if points_in_segments:
for i in points_in_segments:
points.remove(i)
return list(ordered(segments + points))
else:
return list(ordered(intersection_result))
def cut_section(self, line):
"""
Returns a tuple of two polygon segments that lie above and below
the intersecting line respectively.
Parameters
==========
line: Line object of geometry module
line which cuts the Polygon. The part of the Polygon that lies
above and below this line is returned.
Returns
=======
upper_polygon, lower_polygon: Polygon objects or None
upper_polygon is the polygon that lies above the given line.
lower_polygon is the polygon that lies below the given line.
upper_polygon and lower polygon are ``None`` when no polygon
exists above the line or below the line.
Raises
======
ValueError: When the line does not intersect the polygon
References
==========
https://github.com/sympy/sympy/wiki/A-method-to-return-a-cut-section-of-any-polygon-geometry
Examples
========
>>> from sympy import Point, Symbol, Polygon, Line
>>> a, b = 20, 10
>>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)]
>>> rectangle = Polygon(p1, p2, p3, p4)
>>> t = rectangle.cut_section(Line((0, 5), slope=0))
>>> t
(Polygon(Point2D(0, 10), Point2D(0, 5), Point2D(20, 5), Point2D(20, 10)),
Polygon(Point2D(0, 5), Point2D(0, 0), Point2D(20, 0), Point2D(20, 5)))
>>> upper_segment, lower_segment = t
>>> upper_segment.area
100
>>> upper_segment.centroid
Point2D(10, 15/2)
>>> lower_segment.centroid
Point2D(10, 5/2)
"""
intersection_points = self.intersection(line)
if not intersection_points:
raise ValueError("This line does not intersect the polygon")
points = list(self.vertices)
points.append(points[0])
x, y = symbols('x, y', real=True, cls=Dummy)
eq = line.equation(x, y)
# considering equation of line to be `ax +by + c`
a = eq.coeff(x)
b = eq.coeff(y)
upper_vertices = []
lower_vertices = []
# prev is true when previous point is above the line
prev = True
prev_point = None
for point in points:
# when coefficient of y is 0, right side of the line is
# considered
compare = eq.subs({x: point.x, y: point.y})/b if b \
else eq.subs(x, point.x)/a
# if point lies above line
if compare > 0:
if not prev:
# if previous point lies below the line, the intersection
# point of the polygon egde and the line has to be included
edge = Line(point, prev_point)
new_point = edge.intersection(line)
upper_vertices.append(new_point[0])
lower_vertices.append(new_point[0])
upper_vertices.append(point)
prev = True
else:
if prev and prev_point:
edge = Line(point, prev_point)
new_point = edge.intersection(line)
upper_vertices.append(new_point[0])
lower_vertices.append(new_point[0])
lower_vertices.append(point)
prev = False
prev_point = point
upper_polygon, lower_polygon = None, None
if upper_vertices and isinstance(Polygon(*upper_vertices), Polygon):
upper_polygon = Polygon(*upper_vertices)
if lower_vertices and isinstance(Polygon(*lower_vertices), Polygon):
lower_polygon = Polygon(*lower_vertices)
return upper_polygon, lower_polygon
def distance(self, o):
"""
Returns the shortest distance between self and o.
If o is a point, then self does not need to be convex.
If o is another polygon self and o must be convex.
Examples
========
>>> from sympy import Point, Polygon, RegularPolygon
>>> p1, p2 = map(Point, [(0, 0), (7, 5)])
>>> poly = Polygon(*RegularPolygon(p1, 1, 3).vertices)
>>> poly.distance(p2)
sqrt(61)
"""
if isinstance(o, Point):
dist = oo
for side in self.sides:
current = side.distance(o)
if current == 0:
return S.Zero
elif current < dist:
dist = current
return dist
elif isinstance(o, Polygon) and self.is_convex() and o.is_convex():
return self._do_poly_distance(o)
raise NotImplementedError()
def _do_poly_distance(self, e2):
"""
Calculates the least distance between the exteriors of two
convex polygons e1 and e2. Does not check for the convexity
of the polygons as this is checked by Polygon.distance.
Notes
=====
- Prints a warning if the two polygons possibly intersect as the return
value will not be valid in such a case. For a more through test of
intersection use intersection().
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy.geometry import Point, Polygon
>>> square = Polygon(Point(0, 0), Point(0, 1), Point(1, 1), Point(1, 0))
>>> triangle = Polygon(Point(1, 2), Point(2, 2), Point(2, 1))
>>> square._do_poly_distance(triangle)
sqrt(2)/2
Description of method used
==========================
Method:
[1] http://cgm.cs.mcgill.ca/~orm/mind2p.html
Uses rotating calipers:
[2] https://en.wikipedia.org/wiki/Rotating_calipers
and antipodal points:
[3] https://en.wikipedia.org/wiki/Antipodal_point
"""
e1 = self
'''Tests for a possible intersection between the polygons and outputs a warning'''
e1_center = e1.centroid
e2_center = e2.centroid
e1_max_radius = S.Zero
e2_max_radius = S.Zero
for vertex in e1.vertices:
r = Point.distance(e1_center, vertex)
if e1_max_radius < r:
e1_max_radius = r
for vertex in e2.vertices:
r = Point.distance(e2_center, vertex)
if e2_max_radius < r:
e2_max_radius = r
center_dist = Point.distance(e1_center, e2_center)
if center_dist <= e1_max_radius + e2_max_radius:
warnings.warn("Polygons may intersect producing erroneous output")
'''
Find the upper rightmost vertex of e1 and the lowest leftmost vertex of e2
'''
e1_ymax = Point(0, -oo)
e2_ymin = Point(0, oo)
for vertex in e1.vertices:
if vertex.y > e1_ymax.y or (vertex.y == e1_ymax.y and vertex.x > e1_ymax.x):
e1_ymax = vertex
for vertex in e2.vertices:
if vertex.y < e2_ymin.y or (vertex.y == e2_ymin.y and vertex.x < e2_ymin.x):
e2_ymin = vertex
min_dist = Point.distance(e1_ymax, e2_ymin)
'''
Produce a dictionary with vertices of e1 as the keys and, for each vertex, the points
to which the vertex is connected as its value. The same is then done for e2.
'''
e1_connections = {}
e2_connections = {}
for side in e1.sides:
if side.p1 in e1_connections:
e1_connections[side.p1].append(side.p2)
else:
e1_connections[side.p1] = [side.p2]
if side.p2 in e1_connections:
e1_connections[side.p2].append(side.p1)
else:
e1_connections[side.p2] = [side.p1]
for side in e2.sides:
if side.p1 in e2_connections:
e2_connections[side.p1].append(side.p2)
else:
e2_connections[side.p1] = [side.p2]
if side.p2 in e2_connections:
e2_connections[side.p2].append(side.p1)
else:
e2_connections[side.p2] = [side.p1]
e1_current = e1_ymax
e2_current = e2_ymin
support_line = Line(Point(S.Zero, S.Zero), Point(S.One, S.Zero))
'''
Determine which point in e1 and e2 will be selected after e2_ymin and e1_ymax,
this information combined with the above produced dictionaries determines the
path that will be taken around the polygons
'''
point1 = e1_connections[e1_ymax][0]
point2 = e1_connections[e1_ymax][1]
angle1 = support_line.angle_between(Line(e1_ymax, point1))
angle2 = support_line.angle_between(Line(e1_ymax, point2))
if angle1 < angle2:
e1_next = point1
elif angle2 < angle1:
e1_next = point2
elif Point.distance(e1_ymax, point1) > Point.distance(e1_ymax, point2):
e1_next = point2
else:
e1_next = point1
point1 = e2_connections[e2_ymin][0]
point2 = e2_connections[e2_ymin][1]
angle1 = support_line.angle_between(Line(e2_ymin, point1))
angle2 = support_line.angle_between(Line(e2_ymin, point2))
if angle1 > angle2:
e2_next = point1
elif angle2 > angle1:
e2_next = point2
elif Point.distance(e2_ymin, point1) > Point.distance(e2_ymin, point2):
e2_next = point2
else:
e2_next = point1
'''
Loop which determines the distance between anti-podal pairs and updates the
minimum distance accordingly. It repeats until it reaches the starting position.
'''
while True:
e1_angle = support_line.angle_between(Line(e1_current, e1_next))
e2_angle = pi - support_line.angle_between(Line(
e2_current, e2_next))
if (e1_angle < e2_angle) is True:
support_line = Line(e1_current, e1_next)
e1_segment = Segment(e1_current, e1_next)
min_dist_current = e1_segment.distance(e2_current)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e1_connections[e1_next][0] != e1_current:
e1_current = e1_next
e1_next = e1_connections[e1_next][0]
else:
e1_current = e1_next
e1_next = e1_connections[e1_next][1]
elif (e1_angle > e2_angle) is True:
support_line = Line(e2_next, e2_current)
e2_segment = Segment(e2_current, e2_next)
min_dist_current = e2_segment.distance(e1_current)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e2_connections[e2_next][0] != e2_current:
e2_current = e2_next
e2_next = e2_connections[e2_next][0]
else:
e2_current = e2_next
e2_next = e2_connections[e2_next][1]
else:
support_line = Line(e1_current, e1_next)
e1_segment = Segment(e1_current, e1_next)
e2_segment = Segment(e2_current, e2_next)
min1 = e1_segment.distance(e2_next)
min2 = e2_segment.distance(e1_next)
min_dist_current = min(min1, min2)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e1_connections[e1_next][0] != e1_current:
e1_current = e1_next
e1_next = e1_connections[e1_next][0]
else:
e1_current = e1_next
e1_next = e1_connections[e1_next][1]
if e2_connections[e2_next][0] != e2_current:
e2_current = e2_next
e2_next = e2_connections[e2_next][0]
else:
e2_current = e2_next
e2_next = e2_connections[e2_next][1]
if e1_current == e1_ymax and e2_current == e2_ymin:
break
return min_dist
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the Polygon.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
from sympy.core.evalf import N
verts = map(N, self.vertices)
coords = ["{0},{1}".format(p.x, p.y) for p in verts]
path = "M {0} L {1} z".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" />'
).format(2. * scale_factor, path, fill_color)
def _hashable_content(self):
D = {}
def ref_list(point_list):
kee = {}
for i, p in enumerate(ordered(set(point_list))):
kee[p] = i
D[i] = p
return [kee[p] for p in point_list]
S1 = ref_list(self.args)
r_nor = rotate_left(S1, least_rotation(S1))
S2 = ref_list(list(reversed(self.args)))
r_rev = rotate_left(S2, least_rotation(S2))
if r_nor < r_rev:
r = r_nor
else:
r = r_rev
canonical_args = [ D[order] for order in r ]
return tuple(canonical_args)
def __contains__(self, o):
"""
Return True if o is contained within the boundary lines of self.altitudes
Parameters
==========
other : GeometryEntity
Returns
=======
contained in : bool
The points (and sides, if applicable) are contained in self.
See Also
========
sympy.geometry.entity.GeometryEntity.encloses
Examples
========
>>> from sympy import Line, Segment, Point
>>> p = Point(0, 0)
>>> q = Point(1, 1)
>>> s = Segment(p, q*2)
>>> l = Line(p, q)
>>> p in q
False
>>> p in s
True
>>> q*3 in s
False
>>> s in l
True
"""
if isinstance(o, Polygon):
return self == o
elif isinstance(o, Segment):
return any(o in s for s in self.sides)
elif isinstance(o, Point):
if o in self.vertices:
return True
for side in self.sides:
if o in side:
return True
return False
def bisectors(p, prec=None):
"""Returns angle bisectors of a polygon. If prec is given
then approximate the point defining the ray to that precision.
The distance between the points defining the bisector ray is 1.
Examples
========
>>> from sympy import Polygon, Point
>>> p = Polygon(Point(0, 0), Point(2, 0), Point(1, 1), Point(0, 3))
>>> p.bisectors(2)
{Point2D(0, 0): Ray2D(Point2D(0, 0), Point2D(0.71, 0.71)),
Point2D(0, 3): Ray2D(Point2D(0, 3), Point2D(0.23, 2.0)),
Point2D(1, 1): Ray2D(Point2D(1, 1), Point2D(0.19, 0.42)),
Point2D(2, 0): Ray2D(Point2D(2, 0), Point2D(1.1, 0.38))}
"""
b = {}
pts = list(p.args)
pts.append(pts[0]) # close it
cw = Polygon._isright(*pts[:3])
if cw:
pts = list(reversed(pts))
for v, a in p.angles.items():
i = pts.index(v)
p1, p2 = Point._normalize_dimension(pts[i], pts[i + 1])
ray = Ray(p1, p2).rotate(a/2, v)
dir = ray.direction
ray = Ray(ray.p1, ray.p1 + dir/dir.distance((0, 0)))
if prec is not None:
ray = Ray(ray.p1, ray.p2.n(prec))
b[v] = ray
return b
class RegularPolygon(Polygon):
"""
A regular polygon.
Such a polygon has all internal angles equal and all sides the same length.
Parameters
==========
center : Point
radius : number or Basic instance
The distance from the center to a vertex
n : int
The number of sides
Attributes
==========
vertices
center
radius
rotation
apothem
interior_angle
exterior_angle
circumcircle
incircle
angles
Raises
======
GeometryError
If the `center` is not a Point, or the `radius` is not a number or Basic
instance, or the number of sides, `n`, is less than three.
Notes
=====
A RegularPolygon can be instantiated with Polygon with the kwarg n.
Regular polygons are instantiated with a center, radius, number of sides
and a rotation angle. Whereas the arguments of a Polygon are vertices, the
vertices of the RegularPolygon must be obtained with the vertices method.
See Also
========
sympy.geometry.point.Point, Polygon
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r
RegularPolygon(Point2D(0, 0), 5, 3, 0)
>>> r.vertices[0]
Point2D(5, 0)
"""
__slots__ = ('_n', '_center', '_radius', '_rot')
def __new__(self, c, r, n, rot=0, **kwargs):
r, n, rot = map(sympify, (r, n, rot))
c = Point(c, dim=2, **kwargs)
if not isinstance(r, Expr):
raise GeometryError("r must be an Expr object, not %s" % r)
if n.is_Number:
as_int(n) # let an error raise if necessary
if n < 3:
raise GeometryError("n must be a >= 3, not %s" % n)
obj = GeometryEntity.__new__(self, c, r, n, **kwargs)
obj._n = n
obj._center = c
obj._radius = r
obj._rot = rot % (2*S.Pi/n) if rot.is_number else rot
return obj
@property
def args(self):
"""
Returns the center point, the radius,
the number of sides, and the orientation angle.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.args
(Point2D(0, 0), 5, 3, 0)
"""
return self._center, self._radius, self._n, self._rot
def __str__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
def __repr__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
@property
def area(self):
"""Returns the area.
Examples
========
>>> from sympy.geometry import RegularPolygon
>>> square = RegularPolygon((0, 0), 1, 4)
>>> square.area
2
>>> _ == square.length**2
True
"""
c, r, n, rot = self.args
return sign(r)*n*self.length**2/(4*tan(pi/n))
@property
def length(self):
"""Returns the length of the sides.
The half-length of the side and the apothem form two legs
of a right triangle whose hypotenuse is the radius of the
regular polygon.
Examples
========
>>> from sympy.geometry import RegularPolygon
>>> from sympy import sqrt
>>> s = square_in_unit_circle = RegularPolygon((0, 0), 1, 4)
>>> s.length
sqrt(2)
>>> sqrt((_/2)**2 + s.apothem**2) == s.radius
True
"""
return self.radius*2*sin(pi/self._n)
@property
def center(self):
"""The center of the RegularPolygon
This is also the center of the circumscribing circle.
Returns
=======
center : Point
See Also
========
sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.center
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.center
Point2D(0, 0)
"""
return self._center
centroid = center
@property
def circumcenter(self):
"""
Alias for center.
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.circumcenter
Point2D(0, 0)
"""
return self.center
@property
def radius(self):
"""Radius of the RegularPolygon
This is also the radius of the circumscribing circle.
Returns
=======
radius : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.radius
r
"""
return self._radius
@property
def circumradius(self):
"""
Alias for radius.
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.circumradius
r
"""
return self.radius
@property
def rotation(self):
"""CCW angle by which the RegularPolygon is rotated
Returns
=======
rotation : number or instance of Basic
Examples
========
>>> from sympy import pi
>>> from sympy.abc import a
>>> from sympy.geometry import RegularPolygon, Point
>>> RegularPolygon(Point(0, 0), 3, 4, pi/4).rotation
pi/4
Numerical rotation angles are made canonical:
>>> RegularPolygon(Point(0, 0), 3, 4, a).rotation
a
>>> RegularPolygon(Point(0, 0), 3, 4, pi).rotation
0
"""
return self._rot
@property
def apothem(self):
"""The inradius of the RegularPolygon.
The apothem/inradius is the radius of the inscribed circle.
Returns
=======
apothem : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.apothem
sqrt(2)*r/2
"""
return self.radius * cos(S.Pi/self._n)
@property
def inradius(self):
"""
Alias for apothem.
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.inradius
sqrt(2)*r/2
"""
return self.apothem
@property
def interior_angle(self):
"""Measure of the interior angles.
Returns
=======
interior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.interior_angle
3*pi/4
"""
return (self._n - 2)*S.Pi/self._n
@property
def exterior_angle(self):
"""Measure of the exterior angles.
Returns
=======
exterior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.exterior_angle
pi/4
"""
return 2*S.Pi/self._n
@property
def circumcircle(self):
"""The circumcircle of the RegularPolygon.
Returns
=======
circumcircle : Circle
See Also
========
circumcenter, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.circumcircle
Circle(Point2D(0, 0), 4)
"""
return Circle(self.center, self.radius)
@property
def incircle(self):
"""The incircle of the RegularPolygon.
Returns
=======
incircle : Circle
See Also
========
inradius, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 7)
>>> rp.incircle
Circle(Point2D(0, 0), 4*cos(pi/7))
"""
return Circle(self.center, self.apothem)
@property
def angles(self):
"""
Returns a dictionary with keys, the vertices of the Polygon,
and values, the interior angle at each vertex.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.angles
{Point2D(-5/2, -5*sqrt(3)/2): pi/3,
Point2D(-5/2, 5*sqrt(3)/2): pi/3,
Point2D(5, 0): pi/3}
"""
ret = {}
ang = self.interior_angle
for v in self.vertices:
ret[v] = ang
return ret
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
=====
Being on the border of self is considered False.
The general Polygon.encloses_point method is called only if
a point is not within or beyond the incircle or circumcircle,
respectively.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.ellipse.Ellipse.encloses_point
Examples
========
>>> from sympy import RegularPolygon, S, Point, Symbol
>>> p = RegularPolygon((0, 0), 3, 4)
>>> p.encloses_point(Point(0, 0))
True
>>> r, R = p.inradius, p.circumradius
>>> p.encloses_point(Point((r + R)/2, 0))
True
>>> p.encloses_point(Point(R/2, R/2 + (R - r)/10))
False
>>> t = Symbol('t', real=True)
>>> p.encloses_point(p.arbitrary_point().subs(t, S.Half))
False
>>> p.encloses_point(Point(5, 5))
False
"""
c = self.center
d = Segment(c, p).length
if d >= self.radius:
return False
elif d < self.inradius:
return True
else:
# now enumerate the RegularPolygon like a general polygon.
return Polygon.encloses_point(self, p)
def spin(self, angle):
"""Increment *in place* the virtual Polygon's rotation by ccw angle.
See also: rotate method which moves the center.
>>> from sympy import Polygon, Point, pi
>>> r = Polygon(Point(0,0), 1, n=3)
>>> r.vertices[0]
Point2D(1, 0)
>>> r.spin(pi/6)
>>> r.vertices[0]
Point2D(sqrt(3)/2, 1/2)
See Also
========
rotation
rotate : Creates a copy of the RegularPolygon rotated about a Point
"""
self._rot += angle
def rotate(self, angle, pt=None):
"""Override GeometryEntity.rotate to first rotate the RegularPolygon
about its center.
>>> from sympy import Point, RegularPolygon, Polygon, pi
>>> t = RegularPolygon(Point(1, 0), 1, 3)
>>> t.vertices[0] # vertex on x-axis
Point2D(2, 0)
>>> t.rotate(pi/2).vertices[0] # vertex on y axis now
Point2D(0, 2)
See Also
========
rotation
spin : Rotates a RegularPolygon in place
"""
r = type(self)(*self.args) # need a copy or else changes are in-place
r._rot += angle
return GeometryEntity.rotate(r, angle, pt)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the radius that must be
scaled (if x == y) or else a new Polygon must be returned.
>>> from sympy import RegularPolygon
Symmetric scaling returns a RegularPolygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 2)
RegularPolygon(Point2D(0, 0), 2, 4, 0)
Asymmetric scaling returns a kite as a Polygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 1)
Polygon(Point2D(2, 0), Point2D(0, 1), Point2D(-2, 0), Point2D(0, -1))
"""
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
if x != y:
return Polygon(*self.vertices).scale(x, y)
c, r, n, rot = self.args
r *= x
return self.func(c, r, n, rot)
def reflect(self, line):
"""Override GeometryEntity.reflect since this is not made of only
points.
Examples
========
>>> from sympy import RegularPolygon, Line
>>> RegularPolygon((0, 0), 1, 4).reflect(Line((0, 1), slope=-2))
RegularPolygon(Point2D(4/5, 2/5), -1, 4, atan(4/3))
"""
c, r, n, rot = self.args
v = self.vertices[0]
d = v - c
cc = c.reflect(line)
vv = v.reflect(line)
dd = vv - cc
# calculate rotation about the new center
# which will align the vertices
l1 = Ray((0, 0), dd)
l2 = Ray((0, 0), d)
ang = l1.closing_angle(l2)
rot += ang
# change sign of radius as point traversal is reversed
return self.func(cc, -r, n, rot)
@property
def vertices(self):
"""The vertices of the RegularPolygon.
Returns
=======
vertices : list
Each vertex is a Point.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.vertices
[Point2D(5, 0), Point2D(0, 5), Point2D(-5, 0), Point2D(0, -5)]
"""
c = self._center
r = abs(self._radius)
rot = self._rot
v = 2*S.Pi/self._n
return [Point(c.x + r*cos(k*v + rot), c.y + r*sin(k*v + rot))
for k in range(self._n)]
def __eq__(self, o):
if not isinstance(o, Polygon):
return False
elif not isinstance(o, RegularPolygon):
return Polygon.__eq__(o, self)
return self.args == o.args
def __hash__(self):
return super(RegularPolygon, self).__hash__()
class Triangle(Polygon):
"""
A polygon with three vertices and three sides.
Parameters
==========
points : sequence of Points
keyword: asa, sas, or sss to specify sides/angles of the triangle
Attributes
==========
vertices
altitudes
orthocenter
circumcenter
circumradius
circumcircle
inradius
incircle
exradii
medians
medial
nine_point_circle
Raises
======
GeometryError
If the number of vertices is not equal to three, or one of the vertices
is not a Point, or a valid keyword is not given.
See Also
========
sympy.geometry.point.Point, Polygon
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
Triangle(Point2D(0, 0), Point2D(4, 0), Point2D(4, 3))
Keywords sss, sas, or asa can be used to give the desired
side lengths (in order) and interior angles (in degrees) that
define the triangle:
>>> Triangle(sss=(3, 4, 5))
Triangle(Point2D(0, 0), Point2D(3, 0), Point2D(3, 4))
>>> Triangle(asa=(30, 1, 30))
Triangle(Point2D(0, 0), Point2D(1, 0), Point2D(1/2, sqrt(3)/6))
>>> Triangle(sas=(1, 45, 2))
Triangle(Point2D(0, 0), Point2D(2, 0), Point2D(sqrt(2)/2, sqrt(2)/2))
"""
def __new__(cls, *args, **kwargs):
if len(args) != 3:
if 'sss' in kwargs:
return _sss(*[simplify(a) for a in kwargs['sss']])
if 'asa' in kwargs:
return _asa(*[simplify(a) for a in kwargs['asa']])
if 'sas' in kwargs:
return _sas(*[simplify(a) for a in kwargs['sas']])
msg = "Triangle instantiates with three points or a valid keyword."
raise GeometryError(msg)
vertices = [Point(a, dim=2, **kwargs) for a in args]
# remove consecutive duplicates
nodup = []
for p in vertices:
if nodup and p == nodup[-1]:
continue
nodup.append(p)
if len(nodup) > 1 and nodup[-1] == nodup[0]:
nodup.pop() # last point was same as first
# remove collinear points
i = -3
while i < len(nodup) - 3 and len(nodup) > 2:
a, b, c = sorted(
[nodup[i], nodup[i + 1], nodup[i + 2]], key=default_sort_key)
if Point.is_collinear(a, b, c):
nodup[i] = a
nodup[i + 1] = None
nodup.pop(i + 1)
i += 1
vertices = list(filter(lambda x: x is not None, nodup))
if len(vertices) == 3:
return GeometryEntity.__new__(cls, *vertices, **kwargs)
elif len(vertices) == 2:
return Segment(*vertices, **kwargs)
else:
return Point(*vertices, **kwargs)
@property
def vertices(self):
"""The triangle's vertices
Returns
=======
vertices : tuple
Each element in the tuple is a Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t.vertices
(Point2D(0, 0), Point2D(4, 0), Point2D(4, 3))
"""
return self.args
def is_similar(t1, t2):
"""Is another triangle similar to this one.
Two triangles are similar if one can be uniformly scaled to the other.
Parameters
==========
other: Triangle
Returns
=======
is_similar : boolean
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t2 = Triangle(Point(0, 0), Point(-4, 0), Point(-4, -3))
>>> t1.is_similar(t2)
True
>>> t2 = Triangle(Point(0, 0), Point(-4, 0), Point(-4, -4))
>>> t1.is_similar(t2)
False
"""
if not isinstance(t2, Polygon):
return False
s1_1, s1_2, s1_3 = [side.length for side in t1.sides]
s2 = [side.length for side in t2.sides]
def _are_similar(u1, u2, u3, v1, v2, v3):
e1 = simplify(u1/v1)
e2 = simplify(u2/v2)
e3 = simplify(u3/v3)
return bool(e1 == e2) and bool(e2 == e3)
# There's only 6 permutations, so write them out
return _are_similar(s1_1, s1_2, s1_3, *s2) or \
_are_similar(s1_1, s1_3, s1_2, *s2) or \
_are_similar(s1_2, s1_1, s1_3, *s2) or \
_are_similar(s1_2, s1_3, s1_1, *s2) or \
_are_similar(s1_3, s1_1, s1_2, *s2) or \
_are_similar(s1_3, s1_2, s1_1, *s2)
def is_equilateral(self):
"""Are all the sides the same length?
Returns
=======
is_equilateral : boolean
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar, RegularPolygon
is_isosceles, is_right, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t1.is_equilateral()
False
>>> from sympy import sqrt
>>> t2 = Triangle(Point(0, 0), Point(10, 0), Point(5, 5*sqrt(3)))
>>> t2.is_equilateral()
True
"""
return not has_variety(s.length for s in self.sides)
def is_isosceles(self):
"""Are two or more of the sides the same length?
Returns
=======
is_isosceles : boolean
See Also
========
is_equilateral, is_right, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(2, 4))
>>> t1.is_isosceles()
True
"""
return has_dups(s.length for s in self.sides)
def is_scalene(self):
"""Are all the sides of the triangle of different lengths?
Returns
=======
is_scalene : boolean
See Also
========
is_equilateral, is_isosceles, is_right
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(1, 4))
>>> t1.is_scalene()
True
"""
return not has_dups(s.length for s in self.sides)
def is_right(self):
"""Is the triangle right-angled.
Returns
=======
is_right : boolean
See Also
========
sympy.geometry.line.LinearEntity.is_perpendicular
is_equilateral, is_isosceles, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t1.is_right()
True
"""
s = self.sides
return Segment.is_perpendicular(s[0], s[1]) or \
Segment.is_perpendicular(s[1], s[2]) or \
Segment.is_perpendicular(s[0], s[2])
@property
def altitudes(self):
"""The altitudes of the triangle.
An altitude of a triangle is a segment through a vertex,
perpendicular to the opposite side, with length being the
height of the vertex measured from the line containing the side.
Returns
=======
altitudes : dict
The dictionary consists of keys which are vertices and values
which are Segments.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment.length
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.altitudes[p1]
Segment2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
s = self.sides
v = self.vertices
return {v[0]: s[1].perpendicular_segment(v[0]),
v[1]: s[2].perpendicular_segment(v[1]),
v[2]: s[0].perpendicular_segment(v[2])}
@property
def orthocenter(self):
"""The orthocenter of the triangle.
The orthocenter is the intersection of the altitudes of a triangle.
It may lie inside, outside or on the triangle.
Returns
=======
orthocenter : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.orthocenter
Point2D(0, 0)
"""
a = self.altitudes
v = self.vertices
return Line(a[v[0]]).intersection(Line(a[v[1]]))[0]
@property
def circumcenter(self):
"""The circumcenter of the triangle
The circumcenter is the center of the circumcircle.
Returns
=======
circumcenter : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.circumcenter
Point2D(1/2, 1/2)
"""
a, b, c = [x.perpendicular_bisector() for x in self.sides]
if not a.intersection(b):
print(a,b,a.intersection(b))
return a.intersection(b)[0]
@property
def circumradius(self):
"""The radius of the circumcircle of the triangle.
Returns
=======
circumradius : number of Basic instance
See Also
========
sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import Point, Triangle
>>> a = Symbol('a')
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, a)
>>> t = Triangle(p1, p2, p3)
>>> t.circumradius
sqrt(a**2/4 + 1/4)
"""
return Point.distance(self.circumcenter, self.vertices[0])
@property
def circumcircle(self):
"""The circle which passes through the three vertices of the triangle.
Returns
=======
circumcircle : Circle
See Also
========
sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.circumcircle
Circle(Point2D(1/2, 1/2), sqrt(2)/2)
"""
return Circle(self.circumcenter, self.circumradius)
def bisectors(self):
"""The angle bisectors of the triangle.
An angle bisector of a triangle is a straight line through a vertex
which cuts the corresponding angle in half.
Returns
=======
bisectors : dict
Each key is a vertex (Point) and each value is the corresponding
bisector (Segment).
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy.geometry import Point, Triangle, Segment
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> from sympy import sqrt
>>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1))
True
"""
# use lines containing sides so containment check during
# intersection calculation can be avoided, thus reducing
# the processing time for calculating the bisectors
s = [Line(l) for l in self.sides]
v = self.vertices
c = self.incenter
l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0])
l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0])
l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0])
return {v[0]: l1, v[1]: l2, v[2]: l3}
@property
def incenter(self):
"""The center of the incircle.
The incircle is the circle which lies inside the triangle and touches
all three sides.
Returns
=======
incenter : Point
See Also
========
incircle, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.incenter
Point2D(1 - sqrt(2)/2, 1 - sqrt(2)/2)
"""
s = self.sides
l = Matrix([s[i].length for i in [1, 2, 0]])
p = sum(l)
v = self.vertices
x = simplify(l.dot(Matrix([vi.x for vi in v]))/p)
y = simplify(l.dot(Matrix([vi.y for vi in v]))/p)
return Point(x, y)
@property
def inradius(self):
"""The radius of the incircle.
Returns
=======
inradius : number of Basic instance
See Also
========
incircle, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(4, 0), Point(0, 3)
>>> t = Triangle(p1, p2, p3)
>>> t.inradius
1
"""
return simplify(2 * self.area / self.perimeter)
@property
def incircle(self):
"""The incircle of the triangle.
The incircle is the circle which lies inside the triangle and touches
all three sides.
Returns
=======
incircle : Circle
See Also
========
sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(2, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.incircle
Circle(Point2D(2 - sqrt(2), 2 - sqrt(2)), 2 - sqrt(2))
"""
return Circle(self.incenter, self.inradius)
@property
def exradii(self):
"""The radius of excircles of a triangle.
An excircle of the triangle is a circle lying outside the triangle,
tangent to one of its sides and tangent to the extensions of the
other two.
Returns
=======
exradii : dict
See Also
========
sympy.geometry.polygon.Triangle.inradius
Examples
========
The exradius touches the side of the triangle to which it is keyed, e.g.
the exradius touching side 2 is:
>>> from sympy.geometry import Point, Triangle, Segment2D, Point2D
>>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.exradii[t.sides[2]]
-2 + sqrt(10)
References
==========
[1] http://mathworld.wolfram.com/Exradius.html
[2] http://mathworld.wolfram.com/Excircles.html
"""
side = self.sides
a = side[0].length
b = side[1].length
c = side[2].length
s = (a+b+c)/2
area = self.area
exradii = {self.sides[0]: simplify(area/(s-a)),
self.sides[1]: simplify(area/(s-b)),
self.sides[2]: simplify(area/(s-c))}
return exradii
@property
def excenters(self):
"""Excenters of the triangle.
An excenter is the center of a circle that is tangent to a side of the
triangle and the extensions of the other two sides.
Returns
=======
excenters : dict
Examples
========
The excenters are keyed to the side of the triangle to which their corresponding
excircle is tangent: The center is keyed, e.g. the excenter of a circle touching
side 0 is:
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.excenters[t.sides[0]]
Point2D(12*sqrt(10), 2/3 + sqrt(10)/3)
See Also
========
sympy.geometry.polygon.Triangle.exradii
References
==========
.. [1] http://mathworld.wolfram.com/Excircles.html
"""
s = self.sides
v = self.vertices
a = s[0].length
b = s[1].length
c = s[2].length
x = [v[0].x, v[1].x, v[2].x]
y = [v[0].y, v[1].y, v[2].y]
exc_coords = {
"x1": simplify(-a*x[0]+b*x[1]+c*x[2]/(-a+b+c)),
"x2": simplify(a*x[0]-b*x[1]+c*x[2]/(a-b+c)),
"x3": simplify(a*x[0]+b*x[1]-c*x[2]/(a+b-c)),
"y1": simplify(-a*y[0]+b*y[1]+c*y[2]/(-a+b+c)),
"y2": simplify(a*y[0]-b*y[1]+c*y[2]/(a-b+c)),
"y3": simplify(a*y[0]+b*y[1]-c*y[2]/(a+b-c))
}
excenters = {
s[0]: Point(exc_coords["x1"], exc_coords["y1"]),
s[1]: Point(exc_coords["x2"], exc_coords["y2"]),
s[2]: Point(exc_coords["x3"], exc_coords["y3"])
}
return excenters
@property
def medians(self):
"""The medians of the triangle.
A median of a triangle is a straight line through a vertex and the
midpoint of the opposite side, and divides the triangle into two
equal areas.
Returns
=======
medians : dict
Each key is a vertex (Point) and each value is the median (Segment)
at that point.
See Also
========
sympy.geometry.point.Point.midpoint, sympy.geometry.line.Segment.midpoint
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.medians[p1]
Segment2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
s = self.sides
v = self.vertices
return {v[0]: Segment(v[0], s[1].midpoint),
v[1]: Segment(v[1], s[2].midpoint),
v[2]: Segment(v[2], s[0].midpoint)}
@property
def medial(self):
"""The medial triangle of the triangle.
The triangle which is formed from the midpoints of the three sides.
Returns
=======
medial : Triangle
See Also
========
sympy.geometry.line.Segment.midpoint
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.medial
Triangle(Point2D(1/2, 0), Point2D(1/2, 1/2), Point2D(0, 1/2))
"""
s = self.sides
return Triangle(s[0].midpoint, s[1].midpoint, s[2].midpoint)
@property
def nine_point_circle(self):
"""The nine-point circle of the triangle.
Nine-point circle is the circumcircle of the medial triangle, which
passes through the feet of altitudes and the middle points of segments
connecting the vertices and the orthocenter.
Returns
=======
nine_point_circle : Circle
See also
========
sympy.geometry.line.Segment.midpoint
sympy.geometry.polygon.Triangle.medial
sympy.geometry.polygon.Triangle.orthocenter
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.nine_point_circle
Circle(Point2D(1/4, 1/4), sqrt(2)/4)
"""
return Circle(*self.medial.vertices)
@property
def eulerline(self):
"""The Euler line of the triangle.
The line which passes through circumcenter, centroid and orthocenter.
Returns
=======
eulerline : Line (or Point for equilateral triangles in which case all
centers coincide)
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.eulerline
Line2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
if self.is_equilateral():
return self.orthocenter
return Line(self.orthocenter, self.circumcenter)
def rad(d):
"""Return the radian value for the given degrees (pi = 180 degrees)."""
return d*pi/180
def deg(r):
"""Return the degree value for the given radians (pi = 180 degrees)."""
return r/pi*180
def _slope(d):
rv = tan(rad(d))
return rv
def _asa(d1, l, d2):
"""Return triangle having side with length l on the x-axis."""
xy = Line((0, 0), slope=_slope(d1)).intersection(
Line((l, 0), slope=_slope(180 - d2)))[0]
return Triangle((0, 0), (l, 0), xy)
def _sss(l1, l2, l3):
"""Return triangle having side of length l1 on the x-axis."""
c1 = Circle((0, 0), l3)
c2 = Circle((l1, 0), l2)
inter = [a for a in c1.intersection(c2) if a.y.is_nonnegative]
if not inter:
return None
pt = inter[0]
return Triangle((0, 0), (l1, 0), pt)
def _sas(l1, d, l2):
"""Return triangle having side with length l2 on the x-axis."""
p1 = Point(0, 0)
p2 = Point(l2, 0)
p3 = Point(cos(rad(d))*l1, sin(rad(d))*l1)
return Triangle(p1, p2, p3)
|
7357bfbae66fcbd851dc27e7a948bb71708d5591efd9c6f1e9ce64589eb7c0a3 | """Recurrence Operators"""
from __future__ import print_function, division
from sympy import symbols, Symbol, S
from sympy.printing import sstr
from sympy.core.sympify import sympify
def RecurrenceOperators(base, generator):
"""
Returns an Algebra of Recurrence Operators and the operator for
shifting i.e. the `Sn` operator.
The first argument needs to be the base polynomial ring for the algebra
and the second argument must be a generator which can be either a
noncommutative Symbol or a string.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.recurrence import RecurrenceOperators
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
"""
ring = RecurrenceOperatorAlgebra(base, generator)
return (ring, ring.shift_operator)
class RecurrenceOperatorAlgebra(object):
"""
A Recurrence Operator Algebra is a set of noncommutative polynomials
in intermediate `Sn` and coefficients in a base ring A. It follows the
commutation rule:
Sn * a(n) = a(n + 1) * Sn
This class represents a Recurrence Operator Algebra and serves as the parent ring
for Recurrence Operators.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.recurrence import RecurrenceOperators
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
>>> R
Univariate Recurrence Operator Algebra in intermediate Sn over the base ring
ZZ[n]
See Also
========
RecurrenceOperator
"""
def __init__(self, base, generator):
# the base ring for the algebra
self.base = base
# the operator representing shift i.e. `Sn`
self.shift_operator = RecurrenceOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = symbols('Sn', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = symbols(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Recurrence Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
def _add_lists(list1, list2):
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
class RecurrenceOperator(object):
"""
The Recurrence Operators are defined by a list of polynomials
in the base ring and the parent ring of the Operator.
Takes a list of polynomials for each power of Sn and the
parent ring which must be an instance of RecurrenceOperatorAlgebra.
A Recurrence Operator can be created easily using
the operator `Sn`. See examples below.
Examples
========
>>> from sympy.holonomic.recurrence import RecurrenceOperator, RecurrenceOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n),'Sn')
>>> RecurrenceOperator([0, 1, n**2], R)
(1)Sn + (n**2)Sn**2
>>> Sn*n
(n + 1)Sn
>>> n*Sn*n + 1 - Sn**2*n
(1) + (n**2 + n)Sn + (-n - 2)Sn**2
See Also
========
DifferentialOperatorAlgebra
"""
_op_priority = 20
def __init__(self, list_of_poly, parent):
# the parent ring for this operator
# must be an RecurrenceOperatorAlgebra object
self.parent = parent
# sequence of polynomials in n for each power of Sn
# represents the operator
# convert the expressions into ring elements using from_sympy
if isinstance(list_of_poly, list):
for i, j in enumerate(list_of_poly):
if isinstance(j, int):
list_of_poly[i] = self.parent.base.from_sympy(S(j))
elif not isinstance(j, self.parent.base.dtype):
list_of_poly[i] = self.parent.base.from_sympy(j)
self.listofpoly = list_of_poly
self.order = len(self.listofpoly) - 1
def __mul__(self, other):
"""
Multiplies two Operators and returns another
RecurrenceOperator instance using the commutation rule
Sn * a(n) = a(n + 1) * Sn
"""
listofself = self.listofpoly
base = self.parent.base
if not isinstance(other, RecurrenceOperator):
if not isinstance(other, self.parent.base.dtype):
listofother = [self.parent.base.from_sympy(sympify(other))]
else:
listofother = [other]
else:
listofother = other.listofpoly
# multiply a polynomial `b` with a list of polynomials
def _mul_dmp_diffop(b, listofother):
if isinstance(listofother, list):
sol = []
for i in listofother:
sol.append(i * b)
return sol
else:
return [b * listofother]
sol = _mul_dmp_diffop(listofself[0], listofother)
# compute Sn^i * b
def _mul_Sni_b(b):
sol = [base.zero]
if isinstance(b, list):
for i in b:
j = base.to_sympy(i).subs(base.gens[0], base.gens[0] + S.One)
sol.append(base.from_sympy(j))
else:
j = b.subs(base.gens[0], base.gens[0] + S.One)
sol.append(base.from_sympy(j))
return sol
for i in range(1, len(listofself)):
# find Sn^i * b in ith iteration
listofother = _mul_Sni_b(listofother)
# solution = solution + listofself[i] * (Sn^i * b)
sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
return RecurrenceOperator(sol, self.parent)
def __rmul__(self, other):
if not isinstance(other, RecurrenceOperator):
if isinstance(other, int):
other = S(other)
if not isinstance(other, self.parent.base.dtype):
other = (self.parent.base).from_sympy(other)
sol = []
for j in self.listofpoly:
sol.append(other * j)
return RecurrenceOperator(sol, self.parent)
def __add__(self, other):
if isinstance(other, RecurrenceOperator):
sol = _add_lists(self.listofpoly, other.listofpoly)
return RecurrenceOperator(sol, self.parent)
else:
if isinstance(other, int):
other = S(other)
list_self = self.listofpoly
if not isinstance(other, self.parent.base.dtype):
list_other = [((self.parent).base).from_sympy(other)]
else:
list_other = [other]
sol = []
sol.append(list_self[0] + list_other[0])
sol += list_self[1:]
return RecurrenceOperator(sol, self.parent)
__radd__ = __add__
def __sub__(self, other):
return self + (-1) * other
def __rsub__(self, other):
return (-1) * self + other
def __pow__(self, n):
if n == 1:
return self
if n == 0:
return RecurrenceOperator([self.parent.base.one], self.parent)
# if self is `Sn`
if self.listofpoly == self.parent.shift_operator.listofpoly:
sol = []
for i in range(0, n):
sol.append(self.parent.base.zero)
sol.append(self.parent.base.one)
return RecurrenceOperator(sol, self.parent)
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def __str__(self):
listofpoly = self.listofpoly
print_str = ''
for i, j in enumerate(listofpoly):
if j == self.parent.base.zero:
continue
if i == 0:
print_str += '(' + sstr(j) + ')'
continue
if print_str:
print_str += ' + '
if i == 1:
print_str += '(' + sstr(j) + ')Sn'
continue
print_str += '(' + sstr(j) + ')' + 'Sn**' + sstr(i)
return print_str
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, RecurrenceOperator):
if self.listofpoly == other.listofpoly and self.parent == other.parent:
return True
else:
return False
else:
if self.listofpoly[0] == other:
for i in self.listofpoly[1:]:
if i is not self.parent.base.zero:
return False
return True
else:
return False
class HolonomicSequence(object):
"""
A Holonomic Sequence is a type of sequence satisfying a linear homogeneous
recurrence relation with Polynomial coefficients. Alternatively, A sequence
is Holonomic if and only if its generating function is a Holonomic Function.
"""
def __init__(self, recurrence, u0=[]):
self.recurrence = recurrence
if not isinstance(u0, list):
self.u0 = [u0]
else:
self.u0 = u0
if len(self.u0) == 0:
self._have_init_cond = False
else:
self._have_init_cond = True
self.n = recurrence.parent.base.gens[0]
def __repr__(self):
str_sol = 'HolonomicSequence(%s, %s)' % ((self.recurrence).__repr__(), sstr(self.n))
if not self._have_init_cond:
return str_sol
else:
cond_str = ''
seq_str = 0
for i in self.u0:
cond_str += ', u(%s) = %s' % (sstr(seq_str), sstr(i))
seq_str += 1
sol = str_sol + cond_str
return sol
__str__ = __repr__
def __eq__(self, other):
if self.recurrence == other.recurrence:
if self.n == other.n:
if self._have_init_cond and other._have_init_cond:
if self.u0 == other.u0:
return True
else:
return False
else:
return True
else:
return False
else:
return False
|
95f88a411b993ec7e43fc6a41abd3f92e879d8ec01ce2c835d6742d44c7d87e9 | """ Linear Solver for Holonomic Functions"""
from __future__ import print_function, division
from sympy.core import S
from sympy.matrices.common import ShapeError
from sympy.matrices.dense import MutableDenseMatrix
class NewMatrix(MutableDenseMatrix):
"""
Supports elements which can't be Sympified.
See docstrings in sympy/matrices/matrices.py
"""
@staticmethod
def _sympify(a):
return a
def row_join(self, rhs):
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(rhs)
if self.rows != rhs.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
newmat = NewMatrix.zeros(self.rows, self.cols + rhs.cols)
newmat[:, :self.cols] = self
newmat[:, self.cols:] = rhs
return type(self)(newmat)
def col_join(self, bott):
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(bott)
if self.cols != bott.cols:
raise ShapeError(
"`self` and `bott` must have the same number of columns.")
newmat = NewMatrix.zeros(self.rows + bott.rows, self.cols)
newmat[:self.rows, :] = self
newmat[self.rows:, :] = bott
return type(self)(newmat)
def gauss_jordan_solve(self, b, freevar=False):
from sympy.matrices import Matrix
aug = self.hstack(self.copy(), b.copy())
row, col = aug[:, :-1].shape
# solve by reduced row echelon form
A, pivots = aug.rref()
A, v = A[:, :-1], A[:, -1]
pivots = list(filter(lambda p: p < col, pivots))
rank = len(pivots)
# Bring to block form
permutation = Matrix(range(col)).T
A = A.vstack(A, permutation)
for i, c in enumerate(pivots):
A.col_swap(i, c)
A, permutation = A[:-1, :], A[-1, :]
# check for existence of solutions
# rank of aug Matrix should be equal to rank of coefficient matrix
if not v[rank:, 0].is_zero_matrix:
raise ValueError("Linear system has no solution")
# Get index of free symbols (free parameters)
free_var_index = permutation[len(pivots):] # non-pivots columns are free variables
# Free parameters
tau = NewMatrix([S.One for k in range(col - rank)]).reshape(col - rank, 1)
# Full parametric solution
V = A[:rank, rank:]
vt = v[:rank, 0]
free_sol = tau.vstack(vt - V*tau, tau)
# Undo permutation
sol = NewMatrix.zeros(col, 1)
for k, v in enumerate(free_sol):
sol[permutation[k], 0] = v
if freevar:
return sol, tau, free_var_index
else:
return sol, tau
|
9826ad152a081a3603339cced06c7f15eddbbf3e376f71a743d84c6ffe000ca3 | """
This module implements Holonomic Functions and
various operations on them.
"""
from __future__ import print_function, division
from sympy import (Symbol, S, Dummy, Order, rf, I,
solve, limit, Float, nsimplify, gamma)
from sympy.core.compatibility import ordered
from sympy.core.numbers import NaN, Infinity, NegativeInfinity
from sympy.core.sympify import sympify
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.functions.elementary.exponential import exp_polar, exp
from sympy.functions.special.hyper import hyper, meijerg
from sympy.integrals import meijerint
from sympy.matrices import Matrix
from sympy.polys.rings import PolyElement
from sympy.polys.fields import FracElement
from sympy.polys.domains import QQ, RR
from sympy.polys.polyclasses import DMF
from sympy.polys.polyroots import roots
from sympy.polys.polytools import Poly
from sympy.printing import sstr
from sympy.simplify.hyperexpand import hyperexpand
from .linearsolver import NewMatrix
from .recurrence import HolonomicSequence, RecurrenceOperator, RecurrenceOperators
from .holonomicerrors import (NotPowerSeriesError, NotHyperSeriesError,
SingularityError, NotHolonomicError)
def DifferentialOperators(base, generator):
r"""
This function is used to create annihilators using ``Dx``.
Returns an Algebra of Differential Operators also called Weyl Algebra
and the operator for differentiation i.e. the ``Dx`` operator.
Parameters
==========
base:
Base polynomial ring for the algebra.
The base polynomial ring is the ring of polynomials in :math:`x` that
will appear as coefficients in the operators.
generator:
Generator of the algebra which can
be either a noncommutative ``Symbol`` or a string. e.g. "Dx" or "D".
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.abc import x
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring ZZ[x]
>>> Dx*x
(1) + (x)*Dx
"""
ring = DifferentialOperatorAlgebra(base, generator)
return (ring, ring.derivative_operator)
class DifferentialOperatorAlgebra(object):
r"""
An Ore Algebra is a set of noncommutative polynomials in the
intermediate ``Dx`` and coefficients in a base polynomial ring :math:`A`.
It follows the commutation rule:
.. math ::
Dxa = \sigma(a)Dx + \delta(a)
for :math:`a \subset A`.
Where :math:`\sigma: A \Rightarrow A` is an endomorphism and :math:`\delta: A \rightarrow A`
is a skew-derivation i.e. :math:`\delta(ab) = \delta(a) b + \sigma(a) \delta(b)`.
If one takes the sigma as identity map and delta as the standard derivation
then it becomes the algebra of Differential Operators also called
a Weyl Algebra i.e. an algebra whose elements are Differential Operators.
This class represents a Weyl Algebra and serves as the parent ring for
Differential Operators.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring
ZZ[x]
See Also
========
DifferentialOperator
"""
def __init__(self, base, generator):
# the base polynomial ring for the algebra
self.base = base
# the operator representing differentiation i.e. `Dx`
self.derivative_operator = DifferentialOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = Symbol('Dx', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = Symbol(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Differential Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
class DifferentialOperator(object):
"""
Differential Operators are elements of Weyl Algebra. The Operators
are defined by a list of polynomials in the base ring and the
parent ring of the Operator i.e. the algebra it belongs to.
Takes a list of polynomials for each power of ``Dx`` and the
parent ring which must be an instance of DifferentialOperatorAlgebra.
A Differential Operator can be created easily using
the operator ``Dx``. See examples below.
Examples
========
>>> from sympy.holonomic.holonomic import DifferentialOperator, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> DifferentialOperator([0, 1, x**2], R)
(1)*Dx + (x**2)*Dx**2
>>> (x*Dx*x + 1 - Dx**2)**2
(2*x**2 + 2*x + 1) + (4*x**3 + 2*x**2 - 4)*Dx + (x**4 - 6*x - 2)*Dx**2 + (-2*x**2)*Dx**3 + (1)*Dx**4
See Also
========
DifferentialOperatorAlgebra
"""
_op_priority = 20
def __init__(self, list_of_poly, parent):
"""
Parameters
==========
list_of_poly:
List of polynomials belonging to the base ring of the algebra.
parent:
Parent algebra of the operator.
"""
# the parent ring for this operator
# must be an DifferentialOperatorAlgebra object
self.parent = parent
base = self.parent.base
self.x = base.gens[0] if isinstance(base.gens[0], Symbol) else base.gens[0][0]
# sequence of polynomials in x for each power of Dx
# the list should not have trailing zeroes
# represents the operator
# convert the expressions into ring elements using from_sympy
for i, j in enumerate(list_of_poly):
if not isinstance(j, base.dtype):
list_of_poly[i] = base.from_sympy(sympify(j))
else:
list_of_poly[i] = base.from_sympy(base.to_sympy(j))
self.listofpoly = list_of_poly
# highest power of `Dx`
self.order = len(self.listofpoly) - 1
def __mul__(self, other):
"""
Multiplies two DifferentialOperator and returns another
DifferentialOperator instance using the commutation rule
Dx*a = a*Dx + a'
"""
listofself = self.listofpoly
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
listofother = [self.parent.base.from_sympy(sympify(other))]
else:
listofother = [other]
else:
listofother = other.listofpoly
# multiplies a polynomial `b` with a list of polynomials
def _mul_dmp_diffop(b, listofother):
if isinstance(listofother, list):
sol = []
for i in listofother:
sol.append(i * b)
return sol
else:
return [b * listofother]
sol = _mul_dmp_diffop(listofself[0], listofother)
# compute Dx^i * b
def _mul_Dxi_b(b):
sol1 = [self.parent.base.zero]
sol2 = []
if isinstance(b, list):
for i in b:
sol1.append(i)
sol2.append(i.diff())
else:
sol1.append(self.parent.base.from_sympy(b))
sol2.append(self.parent.base.from_sympy(b).diff())
return _add_lists(sol1, sol2)
for i in range(1, len(listofself)):
# find Dx^i * b in ith iteration
listofother = _mul_Dxi_b(listofother)
# solution = solution + listofself[i] * (Dx^i * b)
sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
return DifferentialOperator(sol, self.parent)
def __rmul__(self, other):
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
other = (self.parent.base).from_sympy(sympify(other))
sol = []
for j in self.listofpoly:
sol.append(other * j)
return DifferentialOperator(sol, self.parent)
def __add__(self, other):
if isinstance(other, DifferentialOperator):
sol = _add_lists(self.listofpoly, other.listofpoly)
return DifferentialOperator(sol, self.parent)
else:
list_self = self.listofpoly
if not isinstance(other, self.parent.base.dtype):
list_other = [((self.parent).base).from_sympy(sympify(other))]
else:
list_other = [other]
sol = []
sol.append(list_self[0] + list_other[0])
sol += list_self[1:]
return DifferentialOperator(sol, self.parent)
__radd__ = __add__
def __sub__(self, other):
return self + (-1) * other
def __rsub__(self, other):
return (-1) * self + other
def __neg__(self):
return -1 * self
def __div__(self, other):
return self * (S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, n):
if n == 1:
return self
if n == 0:
return DifferentialOperator([self.parent.base.one], self.parent)
# if self is `Dx`
if self.listofpoly == self.parent.derivative_operator.listofpoly:
sol = []
for i in range(0, n):
sol.append(self.parent.base.zero)
sol.append(self.parent.base.one)
return DifferentialOperator(sol, self.parent)
# the general case
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def __str__(self):
listofpoly = self.listofpoly
print_str = ''
for i, j in enumerate(listofpoly):
if j == self.parent.base.zero:
continue
if i == 0:
print_str += '(' + sstr(j) + ')'
continue
if print_str:
print_str += ' + '
if i == 1:
print_str += '(' + sstr(j) + ')*%s' %(self.parent.gen_symbol)
continue
print_str += '(' + sstr(j) + ')' + '*%s**' %(self.parent.gen_symbol) + sstr(i)
return print_str
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, DifferentialOperator):
if self.listofpoly == other.listofpoly and self.parent == other.parent:
return True
else:
return False
else:
if self.listofpoly[0] == other:
for i in self.listofpoly[1:]:
if i is not self.parent.base.zero:
return False
return True
else:
return False
def is_singular(self, x0):
"""
Checks if the differential equation is singular at x0.
"""
base = self.parent.base
return x0 in roots(base.to_sympy(self.listofpoly[-1]), self.x)
class HolonomicFunction(object):
r"""
A Holonomic Function is a solution to a linear homogeneous ordinary
differential equation with polynomial coefficients. This differential
equation can also be represented by an annihilator i.e. a Differential
Operator ``L`` such that :math:`L.f = 0`. For uniqueness of these functions,
initial conditions can also be provided along with the annihilator.
Holonomic functions have closure properties and thus forms a ring.
Given two Holonomic Functions f and g, their sum, product,
integral and derivative is also a Holonomic Function.
For ordinary points initial condition should be a vector of values of
the derivatives i.e. :math:`[y(x_0), y'(x_0), y''(x_0) ... ]`.
For regular singular points initial conditions can also be provided in this
format:
:math:`{s0: [C_0, C_1, ...], s1: [C^1_0, C^1_1, ...], ...}`
where s0, s1, ... are the roots of indicial equation and vectors
:math:`[C_0, C_1, ...], [C^0_0, C^0_1, ...], ...` are the corresponding initial
terms of the associated power series. See Examples below.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> p = HolonomicFunction(Dx - 1, x, 0, [1]) # e^x
>>> q = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]) # sin(x)
>>> p + q # annihilator of e^x + sin(x)
HolonomicFunction((-1) + (1)*Dx + (-1)*Dx**2 + (1)*Dx**3, x, 0, [1, 2, 1])
>>> p * q # annihilator of e^x * sin(x)
HolonomicFunction((2) + (-2)*Dx + (1)*Dx**2, x, 0, [0, 1])
An example of initial conditions for regular singular points,
the indicial equation has only one root `1/2`.
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]})
HolonomicFunction((-1/2) + (x)*Dx, x, 0, {1/2: [1]})
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_expr()
sqrt(x)
To plot a Holonomic Function, one can use `.evalf()` for numerical
computation. Here's an example on `sin(x)**2/x` using numpy and matplotlib.
>>> import sympy.holonomic # doctest: +SKIP
>>> from sympy import var, sin # doctest: +SKIP
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> import numpy as np # doctest: +SKIP
>>> var("x") # doctest: +SKIP
>>> r = np.linspace(1, 5, 100) # doctest: +SKIP
>>> y = sympy.holonomic.expr_to_holonomic(sin(x)**2/x, x0=1).evalf(r) # doctest: +SKIP
>>> plt.plot(r, y, label="holonomic function") # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
_op_priority = 20
def __init__(self, annihilator, x, x0=0, y0=None):
"""
Parameters
==========
annihilator:
Annihilator of the Holonomic Function, represented by a
`DifferentialOperator` object.
x:
Variable of the function.
x0:
The point at which initial conditions are stored.
Generally an integer.
y0:
The initial condition. The proper format for the initial condition
is described in class docstring. To make the function unique,
length of the vector `y0` should be equal to or greater than the
order of differential equation.
"""
# initial condition
self.y0 = y0
# the point for initial conditions, default is zero.
self.x0 = x0
# differential operator L such that L.f = 0
self.annihilator = annihilator
self.x = x
def __str__(self):
if self._have_init_cond():
str_sol = 'HolonomicFunction(%s, %s, %s, %s)' % (str(self.annihilator),\
sstr(self.x), sstr(self.x0), sstr(self.y0))
else:
str_sol = 'HolonomicFunction(%s, %s)' % (str(self.annihilator),\
sstr(self.x))
return str_sol
__repr__ = __str__
def unify(self, other):
"""
Unifies the base polynomial ring of a given two Holonomic
Functions.
"""
R1 = self.annihilator.parent.base
R2 = other.annihilator.parent.base
dom1 = R1.dom
dom2 = R2.dom
if R1 == R2:
return (self, other)
R = (dom1.unify(dom2)).old_poly_ring(self.x)
newparent, _ = DifferentialOperators(R, str(self.annihilator.parent.gen_symbol))
sol1 = [R1.to_sympy(i) for i in self.annihilator.listofpoly]
sol2 = [R2.to_sympy(i) for i in other.annihilator.listofpoly]
sol1 = DifferentialOperator(sol1, newparent)
sol2 = DifferentialOperator(sol2, newparent)
sol1 = HolonomicFunction(sol1, self.x, self.x0, self.y0)
sol2 = HolonomicFunction(sol2, other.x, other.x0, other.y0)
return (sol1, sol2)
def is_singularics(self):
"""
Returns True if the function have singular initial condition
in the dictionary format.
Returns False if the function have ordinary initial condition
in the list format.
Returns None for all other cases.
"""
if isinstance(self.y0, dict):
return True
elif isinstance(self.y0, list):
return False
def _have_init_cond(self):
"""
Checks if the function have initial condition.
"""
return bool(self.y0)
def _singularics_to_ord(self):
"""
Converts a singular initial condition to ordinary if possible.
"""
a = list(self.y0)[0]
b = self.y0[a]
if len(self.y0) == 1 and a == int(a) and a > 0:
y0 = []
a = int(a)
for i in range(a):
y0.append(S.Zero)
y0 += [j * factorial(a + i) for i, j in enumerate(b)]
return HolonomicFunction(self.annihilator, self.x, self.x0, y0)
def __add__(self, other):
# if the ground domains are different
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a + b
deg1 = self.annihilator.order
deg2 = other.annihilator.order
dim = max(deg1, deg2)
R = self.annihilator.parent.base
K = R.get_field()
rowsself = [self.annihilator]
rowsother = [other.annihilator]
gen = self.annihilator.parent.derivative_operator
# constructing annihilators up to order dim
for i in range(dim - deg1):
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
for i in range(dim - deg2):
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
# constructing the matrix of the ansatz
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(0)
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
r = NewMatrix(r).transpose()
homosys = [[S.Zero for q in range(dim + 1)]]
homosys = NewMatrix(homosys).transpose()
# solving the linear system using gauss jordan solver
solcomp = r.gauss_jordan_solve(homosys)
sol = solcomp[0]
# if a solution is not obtained then increasing the order by 1 in each
# iteration
while sol.is_zero_matrix:
dim += 1
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(S.Zero)
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
r = NewMatrix(r).transpose()
homosys = [[S.Zero for q in range(dim + 1)]]
homosys = NewMatrix(homosys).transpose()
solcomp = r.gauss_jordan_solve(homosys)
sol = solcomp[0]
# taking only the coefficients needed to multiply with `self`
# can be also be done the other way by taking R.H.S and multiplying with
# `other`
sol = sol[:dim + 1 - deg1]
sol1 = _normalize(sol, self.annihilator.parent)
# annihilator of the solution
sol = sol1 * (self.annihilator)
sol = _normalize(sol.listofpoly, self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol, self.x)
# both the functions have ordinary initial conditions
if self.is_singularics() == False and other.is_singularics() == False:
# directly add the corresponding value
if self.x0 == other.x0:
# try to extended the initial conditions
# using the annihilator
y1 = _extend_y0(self, sol.order)
y2 = _extend_y0(other, sol.order)
y0 = [a + b for a, b in zip(y1, y2)]
return HolonomicFunction(sol, self.x, self.x0, y0)
else:
# change the intiial conditions to a same point
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self + other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) + other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self + other.change_ics(self.x0)
else:
return self.change_ics(other.x0) + other
if self.x0 != other.x0:
return HolonomicFunction(sol, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
# convert the ordinary initial condition to singular.
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S.Zero: _y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S.Zero: _y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
# computing singular initial condition for the result
# taking union of the series terms of both functions
y0 = {}
for i in y1:
# add corresponding initial terms if the power
# on `x` is same
if i in y2:
y0[i] = [a + b for a, b in zip(y1[i], y2[i])]
else:
y0[i] = y1[i]
for i in y2:
if not i in y1:
y0[i] = y2[i]
return HolonomicFunction(sol, self.x, self.x0, y0)
def integrate(self, limits, initcond=False):
"""
Integrates the given holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).integrate((x, 0, x)) # e^x - 1
HolonomicFunction((-1)*Dx + (1)*Dx**2, x, 0, [0, 1])
>>> HolonomicFunction(Dx**2 + 1, x, 0, [1, 0]).integrate((x, 0, x))
HolonomicFunction((1)*Dx + (1)*Dx**3, x, 0, [0, 1, 0])
"""
# to get the annihilator, just multiply by Dx from right
D = self.annihilator.parent.derivative_operator
# if the function have initial conditions of the series format
if self.is_singularics() == True:
r = self._singularics_to_ord()
if r:
return r.integrate(limits, initcond=initcond)
# computing singular initial condition for the function
# produced after integration.
y0 = {}
for i in self.y0:
c = self.y0[i]
c2 = []
for j in range(len(c)):
if c[j] == 0:
c2.append(S.Zero)
# if power on `x` is -1, the integration becomes log(x)
# TODO: Implement this case
elif i + j + 1 == 0:
raise NotImplementedError("logarithmic terms in the series are not supported")
else:
c2.append(c[j] / S(i + j + 1))
y0[i + 1] = c2
if hasattr(limits, "__iter__"):
raise NotImplementedError("Definite integration for singular initial conditions")
return HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
# if no initial conditions are available for the function
if not self._have_init_cond():
if initcond:
return HolonomicFunction(self.annihilator * D, self.x, self.x0, [S.Zero])
return HolonomicFunction(self.annihilator * D, self.x)
# definite integral
# initial conditions for the answer will be stored at point `a`,
# where `a` is the lower limit of the integrand
if hasattr(limits, "__iter__"):
if len(limits) == 3 and limits[0] == self.x:
x0 = self.x0
a = limits[1]
b = limits[2]
definite = True
else:
definite = False
y0 = [S.Zero]
y0 += self.y0
indefinite_integral = HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
if not definite:
return indefinite_integral
# use evalf to get the values at `a`
if x0 != a:
try:
indefinite_expr = indefinite_integral.to_expr()
except (NotHyperSeriesError, NotPowerSeriesError):
indefinite_expr = None
if indefinite_expr:
lower = indefinite_expr.subs(self.x, a)
if isinstance(lower, NaN):
lower = indefinite_expr.limit(self.x, a)
else:
lower = indefinite_integral.evalf(a)
if b == self.x:
y0[0] = y0[0] - lower
return HolonomicFunction(self.annihilator * D, self.x, x0, y0)
elif S(b).is_Number:
if indefinite_expr:
upper = indefinite_expr.subs(self.x, b)
if isinstance(upper, NaN):
upper = indefinite_expr.limit(self.x, b)
else:
upper = indefinite_integral.evalf(b)
return upper - lower
# if the upper limit is `x`, the answer will be a function
if b == self.x:
return HolonomicFunction(self.annihilator * D, self.x, a, y0)
# if the upper limits is a Number, a numerical value will be returned
elif S(b).is_Number:
try:
s = HolonomicFunction(self.annihilator * D, self.x, a,\
y0).to_expr()
indefinite = s.subs(self.x, b)
if not isinstance(indefinite, NaN):
return indefinite
else:
return s.limit(self.x, b)
except (NotHyperSeriesError, NotPowerSeriesError):
return HolonomicFunction(self.annihilator * D, self.x, a, y0).evalf(b)
return HolonomicFunction(self.annihilator * D, self.x)
def diff(self, *args, **kwargs):
r"""
Differentiation of the given Holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).diff().to_expr()
cos(x)
>>> HolonomicFunction(Dx - 2, x, 0, [1]).diff().to_expr()
2*exp(2*x)
See Also
========
.integrate()
"""
kwargs.setdefault('evaluate', True)
if args:
if args[0] != self.x:
return S.Zero
elif len(args) == 2:
sol = self
for i in range(args[1]):
sol = sol.diff(args[0])
return sol
ann = self.annihilator
# if the function is constant.
if ann.listofpoly[0] == ann.parent.base.zero and ann.order == 1:
return S.Zero
# if the coefficient of y in the differential equation is zero.
# a shifting is done to compute the answer in this case.
elif ann.listofpoly[0] == ann.parent.base.zero:
sol = DifferentialOperator(ann.listofpoly[1:], ann.parent)
if self._have_init_cond():
# if ordinary initial condition
if self.is_singularics() == False:
return HolonomicFunction(sol, self.x, self.x0, self.y0[1:])
# TODO: support for singular initial condition
return HolonomicFunction(sol, self.x)
else:
return HolonomicFunction(sol, self.x)
# the general algorithm
R = ann.parent.base
K = R.get_field()
seq_dmf = [K.new(i.rep) for i in ann.listofpoly]
# -y = a1*y'/a0 + a2*y''/a0 ... + an*y^n/a0
rhs = [i / seq_dmf[0] for i in seq_dmf[1:]]
rhs.insert(0, K.zero)
# differentiate both lhs and rhs
sol = _derivate_diff_eq(rhs)
# add the term y' in lhs to rhs
sol = _add_lists(sol, [K.zero, K.one])
sol = _normalize(sol[1:], self.annihilator.parent, negative=False)
if not self._have_init_cond() or self.is_singularics() == True:
return HolonomicFunction(sol, self.x)
y0 = _extend_y0(self, sol.order + 1)[1:]
return HolonomicFunction(sol, self.x, self.x0, y0)
def __eq__(self, other):
if self.annihilator == other.annihilator:
if self.x == other.x:
if self._have_init_cond() and other._have_init_cond():
if self.x0 == other.x0 and self.y0 == other.y0:
return True
else:
return False
else:
return True
else:
return False
else:
return False
def __mul__(self, other):
ann_self = self.annihilator
if not isinstance(other, HolonomicFunction):
other = sympify(other)
if other.has(self.x):
raise NotImplementedError(" Can't multiply a HolonomicFunction and expressions/functions.")
if not self._have_init_cond():
return self
else:
y0 = _extend_y0(self, ann_self.order)
y1 = []
for j in y0:
y1.append((Poly.new(j, self.x) * other).rep)
return HolonomicFunction(ann_self, self.x, self.x0, y1)
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a * b
ann_other = other.annihilator
list_self = []
list_other = []
a = ann_self.order
b = ann_other.order
R = ann_self.parent.base
K = R.get_field()
for j in ann_self.listofpoly:
list_self.append(K.new(j.rep))
for j in ann_other.listofpoly:
list_other.append(K.new(j.rep))
# will be used to reduce the degree
self_red = [-list_self[i] / list_self[a] for i in range(a)]
other_red = [-list_other[i] / list_other[b] for i in range(b)]
# coeff_mull[i][j] is the coefficient of Dx^i(f).Dx^j(g)
coeff_mul = [[S.Zero for i in range(b + 1)] for j in range(a + 1)]
coeff_mul[0][0] = S.One
# making the ansatz
lin_sys = [[coeff_mul[i][j] for i in range(a) for j in range(b)]]
homo_sys = [[S.Zero for q in range(a * b)]]
homo_sys = NewMatrix(homo_sys).transpose()
sol = (NewMatrix(lin_sys).transpose()).gauss_jordan_solve(homo_sys)
# until a non trivial solution is found
while sol[0].is_zero_matrix:
# updating the coefficients Dx^i(f).Dx^j(g) for next degree
for i in range(a - 1, -1, -1):
for j in range(b - 1, -1, -1):
coeff_mul[i][j + 1] += coeff_mul[i][j]
coeff_mul[i + 1][j] += coeff_mul[i][j]
if isinstance(coeff_mul[i][j], K.dtype):
coeff_mul[i][j] = DMFdiff(coeff_mul[i][j])
else:
coeff_mul[i][j] = coeff_mul[i][j].diff(self.x)
# reduce the terms to lower power using annihilators of f, g
for i in range(a + 1):
if not coeff_mul[i][b].is_zero:
for j in range(b):
coeff_mul[i][j] += other_red[j] * \
coeff_mul[i][b]
coeff_mul[i][b] = S.Zero
# not d2 + 1, as that is already covered in previous loop
for j in range(b):
if not coeff_mul[a][j] == 0:
for i in range(a):
coeff_mul[i][j] += self_red[i] * \
coeff_mul[a][j]
coeff_mul[a][j] = S.Zero
lin_sys.append([coeff_mul[i][j] for i in range(a)
for j in range(b)])
sol = (NewMatrix(lin_sys).transpose()).gauss_jordan_solve(homo_sys)
sol_ann = _normalize(sol[0][0:], self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol_ann, self.x)
if self.is_singularics() == False and other.is_singularics() == False:
# if both the conditions are at same point
if self.x0 == other.x0:
# try to find more initial conditions
y0_self = _extend_y0(self, sol_ann.order)
y0_other = _extend_y0(other, sol_ann.order)
# h(x0) = f(x0) * g(x0)
y0 = [y0_self[0] * y0_other[0]]
# coefficient of Dx^j(f)*Dx^i(g) in Dx^i(fg)
for i in range(1, min(len(y0_self), len(y0_other))):
coeff = [[0 for i in range(i + 1)] for j in range(i + 1)]
for j in range(i + 1):
for k in range(i + 1):
if j + k == i:
coeff[j][k] = binomial(i, j)
sol = 0
for j in range(i + 1):
for k in range(i + 1):
sol += coeff[j][k]* y0_self[j] * y0_other[k]
y0.append(sol)
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
# if the points are different, consider one
else:
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self * other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) * other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self * other.change_ics(self.x0)
else:
return self.change_ics(other.x0) * other
if self.x0 != other.x0:
return HolonomicFunction(sol_ann, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S.Zero: _y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S.Zero: _y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
y0 = {}
# multiply every possible pair of the series terms
for i in y1:
for j in y2:
k = min(len(y1[i]), len(y2[j]))
c = []
for a in range(k):
s = S.Zero
for b in range(a + 1):
s += y1[i][b] * y2[j][a - b]
c.append(s)
if not i + j in y0:
y0[i + j] = c
else:
y0[i + j] = [a + b for a, b in zip(c, y0[i + j])]
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
__rmul__ = __mul__
def __sub__(self, other):
return self + other * -1
def __rsub__(self, other):
return self * -1 + other
def __neg__(self):
return -1 * self
def __div__(self, other):
return self * (S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, n):
if self.annihilator.order <= 1:
ann = self.annihilator
parent = ann.parent
if self.y0 is None:
y0 = None
else:
y0 = [list(self.y0)[0] ** n]
p0 = ann.listofpoly[0]
p1 = ann.listofpoly[1]
p0 = (Poly.new(p0, self.x) * n).rep
sol = [parent.base.to_sympy(i) for i in [p0, p1]]
dd = DifferentialOperator(sol, parent)
return HolonomicFunction(dd, self.x, self.x0, y0)
if n < 0:
raise NotHolonomicError("Negative Power on a Holonomic Function")
if n == 0:
Dx = self.annihilator.parent.derivative_operator
return HolonomicFunction(Dx, self.x, S.Zero, [S.One])
if n == 1:
return self
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def degree(self):
"""
Returns the highest power of `x` in the annihilator.
"""
sol = [i.degree() for i in self.annihilator.listofpoly]
return max(sol)
def composition(self, expr, *args, **kwargs):
"""
Returns function after composition of a holonomic
function with an algebraic function. The method can't compute
initial conditions for the result by itself, so they can be also be
provided.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x).composition(x**2, 0, [1]) # e^(x**2)
HolonomicFunction((-2*x) + (1)*Dx, x, 0, [1])
>>> HolonomicFunction(Dx**2 + 1, x).composition(x**2 - 1, 1, [1, 0])
HolonomicFunction((4*x**3) + (-1)*Dx + (x)*Dx**2, x, 1, [1, 0])
See Also
========
from_hyper()
"""
R = self.annihilator.parent
a = self.annihilator.order
diff = expr.diff(self.x)
listofpoly = self.annihilator.listofpoly
for i, j in enumerate(listofpoly):
if isinstance(j, self.annihilator.parent.base.dtype):
listofpoly[i] = self.annihilator.parent.base.to_sympy(j)
r = listofpoly[a].subs({self.x:expr})
subs = [-listofpoly[i].subs({self.x:expr}) / r for i in range (a)]
coeffs = [S.Zero for i in range(a)] # coeffs[i] == coeff of (D^i f)(a) in D^k (f(a))
coeffs[0] = S.One
system = [coeffs]
homogeneous = Matrix([[S.Zero for i in range(a)]]).transpose()
sol = S.Zero
while True:
coeffs_next = [p.diff(self.x) for p in coeffs]
for i in range(a - 1):
coeffs_next[i + 1] += (coeffs[i] * diff)
for i in range(a):
coeffs_next[i] += (coeffs[-1] * subs[i] * diff)
coeffs = coeffs_next
# check for linear relations
system.append(coeffs)
sol, taus = (Matrix(system).transpose()
).gauss_jordan_solve(homogeneous)
if sol.is_zero_matrix is not True:
break
tau = list(taus)[0]
sol = sol.subs(tau, 1)
sol = _normalize(sol[0:], R, negative=False)
# if initial conditions are given for the resulting function
if args:
return HolonomicFunction(sol, self.x, args[0], args[1])
return HolonomicFunction(sol, self.x)
def to_sequence(self, lb=True):
r"""
Finds recurrence relation for the coefficients in the series expansion
of the function about :math:`x_0`, where :math:`x_0` is the point at
which the initial condition is stored.
If the point :math:`x_0` is ordinary, solution of the form :math:`[(R, n_0)]`
is returned. Where :math:`R` is the recurrence relation and :math:`n_0` is the
smallest ``n`` for which the recurrence holds true.
If the point :math:`x_0` is regular singular, a list of solutions in
the format :math:`(R, p, n_0)` is returned, i.e. `[(R, p, n_0), ... ]`.
Each tuple in this vector represents a recurrence relation :math:`R`
associated with a root of the indicial equation ``p``. Conditions of
a different format can also be provided in this case, see the
docstring of HolonomicFunction class.
If it's not possible to numerically compute a initial condition,
it is returned as a symbol :math:`C_j`, denoting the coefficient of
:math:`(x - x_0)^j` in the power series about :math:`x_0`.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_sequence()
[(HolonomicSequence((-1) + (n + 1)Sn, n), u(0) = 1, 0)]
>>> HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1]).to_sequence()
[(HolonomicSequence((n**2) + (n**2 + n)Sn, n), u(0) = 0, u(1) = 1, u(2) = -1/2, 2)]
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_sequence()
[(HolonomicSequence((n), n), u(0) = 1, 1/2, 1)]
See Also
========
HolonomicFunction.series()
References
==========
.. [1] https://hal.inria.fr/inria-00070025/document
.. [2] http://www.risc.jku.at/publications/download/risc_2244/DIPLFORM.pdf
"""
if self.x0 != 0:
return self.shift_x(self.x0).to_sequence()
# check whether a power series exists if the point is singular
if self.annihilator.is_singular(self.x0):
return self._frobenius(lb=lb)
dict1 = {}
n = Symbol('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
# substituting each term of the form `x^k Dx^j` in the
# annihilator, according to the formula below:
# x^k Dx^j = Sum(rf(n + 1 - k, j) * a(n + j - k) * x^n, (n, k, oo))
# for explanation see [2].
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k) in dict1:
dict1[(i - k, k)] += (dom.to_sympy(coeff) * rf(n - k + 1, i))
else:
dict1[(i - k, k)] = (dom.to_sympy(coeff) * rf(n - k + 1, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = self.degree()
# the recurrence relation holds for all values of
# n greater than smallest_n, i.e. n >= smallest_n
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
# an appropriate shift of the recurrence
for j in range(lower, upper + 1):
if j in keylist:
temp = S.Zero
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S.Zero)
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
y0 = _extend_y0(self, order)
u0 = []
# u(n) = y^n(0)/factorial(n)
for i, j in enumerate(y0):
u0.append(j / factorial(i))
# if sufficient conditions can't be computed then
# try to use the series method i.e.
# equate the coefficients of x^k in the equation formed by
# substituting the series in differential equation, to zero.
if len(u0) < order:
for i in range(degree):
eq = S.Zero
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S.Zero
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
dummys[i + j[0]] = Symbol('C_%s' %(i + j[0]))
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
def _frobenius(self, lb=True):
# compute the roots of indicial equation
indicialroots = self._indicial()
reals = []
compl = []
for i in ordered(indicialroots.keys()):
if i.is_real:
reals.extend([i] * indicialroots[i])
else:
a, b = i.as_real_imag()
compl.extend([(i, a, b)] * indicialroots[i])
# sort the roots for a fixed ordering of solution
compl.sort(key=lambda x : x[1])
compl.sort(key=lambda x : x[2])
reals.sort()
# grouping the roots, roots differ by an integer are put in the same group.
grp = []
for i in reals:
intdiff = False
if len(grp) == 0:
grp.append([i])
continue
for j in grp:
if int(j[0] - i) == j[0] - i:
j.append(i)
intdiff = True
break
if not intdiff:
grp.append([i])
# True if none of the roots differ by an integer i.e.
# each element in group have only one member
independent = True if all(len(i) == 1 for i in grp) else False
allpos = all(i >= 0 for i in reals)
allint = all(int(i) == i for i in reals)
# if initial conditions are provided
# then use them.
if self.is_singularics() == True:
rootstoconsider = []
for i in ordered(self.y0.keys()):
for j in ordered(indicialroots.keys()):
if j == i:
rootstoconsider.append(i)
elif allpos and allint:
rootstoconsider = [min(reals)]
elif independent:
rootstoconsider = [i[0] for i in grp] + [j[0] for j in compl]
elif not allint:
rootstoconsider = []
for i in reals:
if not int(i) == i:
rootstoconsider.append(i)
elif not allpos:
if not self._have_init_cond() or S(self.y0[0]).is_finite == False:
rootstoconsider = [min(reals)]
else:
posroots = []
for i in reals:
if i >= 0:
posroots.append(i)
rootstoconsider = [min(posroots)]
n = Symbol('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
finalsol = []
char = ord('C')
for p in rootstoconsider:
dict1 = {}
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k - i) in dict1:
dict1[(i - k, k - i)] += (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
else:
dict1[(i - k, k - i)] = (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = max([i[1] for i in dict1])
degree2 = min([i[1] for i in dict1])
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
for j in range(lower, upper + 1):
if j in keylist:
temp = S.Zero
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S.Zero)
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
u0 = []
if self.is_singularics() == True:
u0 = self.y0[p]
elif self.is_singularics() == False and p >= 0 and int(p) == p and len(rootstoconsider) == 1:
y0 = _extend_y0(self, order + int(p))
# u(n) = y^n(0)/factorial(n)
if len(y0) > int(p):
for i in range(int(p), len(y0)):
u0.append(y0[i] / factorial(i))
if len(u0) < order:
for i in range(degree2, degree):
eq = S.Zero
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S.Zero
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
letter = chr(char) + '_%s' %(i + j[0])
dummys[i + j[0]] = Symbol(letter)
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
continue
else:
finalsol.append((HolonomicSequence(sol, u0), p))
continue
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
else:
finalsol.append((HolonomicSequence(sol, u0), p))
char += 1
return finalsol
def series(self, n=6, coefficient=False, order=True, _recur=None):
r"""
Finds the power series expansion of given holonomic function about :math:`x_0`.
A list of series might be returned if :math:`x_0` is a regular point with
multiple roots of the indicial equation.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).series() # e^x
1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).series(n=8) # sin(x)
x - x**3/6 + x**5/120 - x**7/5040 + O(x**8)
See Also
========
HolonomicFunction.to_sequence()
"""
if _recur is None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = []
for i in recurrence:
sol.append(self.series(_recur=i))
return sol
n = n - int(constantpower)
l = len(recurrence.u0) - 1
k = recurrence.recurrence.order
x = self.x
x0 = self.x0
seq_dmp = recurrence.recurrence.listofpoly
R = recurrence.recurrence.parent.base
K = R.get_field()
seq = []
for i, j in enumerate(seq_dmp):
seq.append(K.new(j.rep))
sub = [-seq[i] / seq[k] for i in range(k)]
sol = [i for i in recurrence.u0]
if l + 1 >= n:
pass
else:
# use the initial conditions to find the next term
for i in range(l + 1 - k, n - k):
coeff = S.Zero
for j in range(k):
if i + j >= 0:
coeff += DMFsubs(sub[j], i) * sol[i + j]
sol.append(coeff)
if coefficient:
return sol
ser = S.Zero
for i, j in enumerate(sol):
ser += x**(i + constantpower) * j
if order:
ser += Order(x**(n + int(constantpower)), x)
if x0 != 0:
return ser.subs(x, x - x0)
return ser
def _indicial(self):
"""
Computes roots of the Indicial equation.
"""
if self.x0 != 0:
return self.shift_x(self.x0)._indicial()
list_coeff = self.annihilator.listofpoly
R = self.annihilator.parent.base
x = self.x
s = R.zero
y = R.one
def _pole_degree(poly):
root_all = roots(R.to_sympy(poly), x, filter='Z')
if 0 in root_all.keys():
return root_all[0]
else:
return 0
degree = [j.degree() for j in list_coeff]
degree = max(degree)
inf = 10 * (max(1, degree) + max(1, self.annihilator.order))
deg = lambda q: inf if q.is_zero else _pole_degree(q)
b = deg(list_coeff[0])
for j in range(1, len(list_coeff)):
b = min(b, deg(list_coeff[j]) - j)
for i, j in enumerate(list_coeff):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
if - i - b <= 0 and degree - i - b >= 0:
s = s + listofdmp[degree - i - b] * y
y *= x - i
return roots(R.to_sympy(s), x)
def evalf(self, points, method='RK4', h=0.05, derivatives=False):
r"""
Finds numerical value of a holonomic function using numerical methods.
(RK4 by default). A set of points (real or complex) must be provided
which will be the path for the numerical integration.
The path should be given as a list :math:`[x_1, x_2, ... x_n]`. The numerical
values will be computed at each point in this order
:math:`x_1 --> x_2 --> x_3 ... --> x_n`.
Returns values of the function at :math:`x_1, x_2, ... x_n` in a list.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
A straight line on the real axis from (0 to 1)
>>> r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
Runge-Kutta 4th order on e^x from 0.1 to 1.
Exact solution at 1 is 2.71828182845905
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r)
[1.10517083333333, 1.22140257085069, 1.34985849706254, 1.49182424008069,
1.64872063859684, 1.82211796209193, 2.01375162659678, 2.22553956329232,
2.45960141378007, 2.71827974413517]
Euler's method for the same
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r, method='Euler')
[1.1, 1.21, 1.331, 1.4641, 1.61051, 1.771561, 1.9487171, 2.14358881,
2.357947691, 2.5937424601]
One can also observe that the value obtained using Runge-Kutta 4th order
is much more accurate than Euler's method.
"""
from sympy.holonomic.numerical import _evalf
lp = False
# if a point `b` is given instead of a mesh
if not hasattr(points, "__iter__"):
lp = True
b = S(points)
if self.x0 == b:
return _evalf(self, [b], method=method, derivatives=derivatives)[-1]
if not b.is_Number:
raise NotImplementedError
a = self.x0
if a > b:
h = -h
n = int((b - a) / h)
points = [a + h]
for i in range(n - 1):
points.append(points[-1] + h)
for i in roots(self.annihilator.parent.base.to_sympy(self.annihilator.listofpoly[-1]), self.x):
if i == self.x0 or i in points:
raise SingularityError(self, i)
if lp:
return _evalf(self, points, method=method, derivatives=derivatives)[-1]
return _evalf(self, points, method=method, derivatives=derivatives)
def change_x(self, z):
"""
Changes only the variable of Holonomic Function, for internal
purposes. For composition use HolonomicFunction.composition()
"""
dom = self.annihilator.parent.base.dom
R = dom.old_poly_ring(z)
parent, _ = DifferentialOperators(R, 'Dx')
sol = []
for j in self.annihilator.listofpoly:
sol.append(R(j.rep))
sol = DifferentialOperator(sol, parent)
return HolonomicFunction(sol, z, self.x0, self.y0)
def shift_x(self, a):
"""
Substitute `x + a` for `x`.
"""
x = self.x
listaftershift = self.annihilator.listofpoly
base = self.annihilator.parent.base
sol = [base.from_sympy(base.to_sympy(i).subs(x, x + a)) for i in listaftershift]
sol = DifferentialOperator(sol, self.annihilator.parent)
x0 = self.x0 - a
if not self._have_init_cond():
return HolonomicFunction(sol, x)
return HolonomicFunction(sol, x, x0, self.y0)
def to_hyper(self, as_list=False, _recur=None):
r"""
Returns a hypergeometric function (or linear combination of them)
representing the given holonomic function.
Returns an answer of the form:
`a_1 \cdot x^{b_1} \cdot{hyper()} + a_2 \cdot x^{b_2} \cdot{hyper()} ...`
This is very useful as one can now use ``hyperexpand`` to find the
symbolic expressions/functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> # sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).to_hyper()
x*hyper((), (3/2,), -x**2/4)
>>> # exp(x)
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_hyper()
hyper((), (), x)
See Also
========
from_hyper, from_meijerg
"""
if _recur is None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
smallest_n = recurrence[1]
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
smallest_n = recurrence[2]
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
smallest_n = recurrence[0][1]
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
smallest_n = recurrence[0][2]
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = self.to_hyper(as_list=as_list, _recur=recurrence[0])
for i in recurrence[1:]:
sol += self.to_hyper(as_list=as_list, _recur=i)
return sol
u0 = recurrence.u0
r = recurrence.recurrence
x = self.x
x0 = self.x0
# order of the recurrence relation
m = r.order
# when no recurrence exists, and the power series have finite terms
if m == 0:
nonzeroterms = roots(r.parent.base.to_sympy(r.listofpoly[0]), recurrence.n, filter='R')
sol = S.Zero
for j, i in enumerate(nonzeroterms):
if i < 0 or int(i) != i:
continue
i = int(i)
if i < len(u0):
if isinstance(u0[i], (PolyElement, FracElement)):
u0[i] = u0[i].as_expr()
sol += u0[i] * x**i
else:
sol += Symbol('C_%s' %j) * x**i
if isinstance(sol, (PolyElement, FracElement)):
sol = sol.as_expr() * x**constantpower
else:
sol = sol * x**constantpower
if as_list:
if x0 != 0:
return [(sol.subs(x, x - x0), )]
return [(sol, )]
if x0 != 0:
return sol.subs(x, x - x0)
return sol
if smallest_n + m > len(u0):
raise NotImplementedError("Can't compute sufficient Initial Conditions")
# check if the recurrence represents a hypergeometric series
is_hyper = True
for i in range(1, len(r.listofpoly)-1):
if r.listofpoly[i] != r.parent.base.zero:
is_hyper = False
break
if not is_hyper:
raise NotHyperSeriesError(self, self.x0)
a = r.listofpoly[0]
b = r.listofpoly[-1]
# the constant multiple of argument of hypergeometric function
if isinstance(a.rep[0], (PolyElement, FracElement)):
c = - (S(a.rep[0].as_expr()) * m**(a.degree())) / (S(b.rep[0].as_expr()) * m**(b.degree()))
else:
c = - (S(a.rep[0]) * m**(a.degree())) / (S(b.rep[0]) * m**(b.degree()))
sol = 0
arg1 = roots(r.parent.base.to_sympy(a), recurrence.n)
arg2 = roots(r.parent.base.to_sympy(b), recurrence.n)
# iterate through the initial conditions to find
# the hypergeometric representation of the given
# function.
# The answer will be a linear combination
# of different hypergeometric series which satisfies
# the recurrence.
if as_list:
listofsol = []
for i in range(smallest_n + m):
# if the recurrence relation doesn't hold for `n = i`,
# then a Hypergeometric representation doesn't exist.
# add the algebraic term a * x**i to the solution,
# where a is u0[i]
if i < smallest_n:
if as_list:
listofsol.append(((S(u0[i]) * x**(i+constantpower)).subs(x, x-x0), ))
else:
sol += S(u0[i]) * x**i
continue
# if the coefficient u0[i] is zero, then the
# independent hypergeomtric series starting with
# x**i is not a part of the answer.
if S(u0[i]) == 0:
continue
ap = []
bq = []
# substitute m * n + i for n
for k in ordered(arg1.keys()):
ap.extend([nsimplify((i - k) / m)] * arg1[k])
for k in ordered(arg2.keys()):
bq.extend([nsimplify((i - k) / m)] * arg2[k])
# convention of (k + 1) in the denominator
if 1 in bq:
bq.remove(1)
else:
ap.append(1)
if as_list:
listofsol.append(((S(u0[i])*x**(i+constantpower)).subs(x, x-x0), (hyper(ap, bq, c*x**m)).subs(x, x-x0)))
else:
sol += S(u0[i]) * hyper(ap, bq, c * x**m) * x**i
if as_list:
return listofsol
sol = sol * x**constantpower
if x0 != 0:
return sol.subs(x, x - x0)
return sol
def to_expr(self):
"""
Converts a Holonomic Function back to elementary functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(x**2*Dx**2 + x*Dx + (x**2 - 1), x, 0, [0, S(1)/2]).to_expr()
besselj(1, x)
>>> HolonomicFunction((1 + x)*Dx**3 + Dx**2, x, 0, [1, 1, 1]).to_expr()
x*log(x + 1) + log(x + 1) + 1
"""
return hyperexpand(self.to_hyper()).simplify()
def change_ics(self, b, lenics=None):
"""
Changes the point `x0` to `b` for initial conditions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import symbols, sin, cos, exp
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x)).change_ics(1)
HolonomicFunction((1) + (1)*Dx**2, x, 1, [sin(1), cos(1)])
>>> expr_to_holonomic(exp(x)).change_ics(2)
HolonomicFunction((-1) + (1)*Dx, x, 2, [exp(2)])
"""
symbolic = True
if lenics is None and len(self.y0) > self.annihilator.order:
lenics = len(self.y0)
dom = self.annihilator.parent.base.domain
try:
sol = expr_to_holonomic(self.to_expr(), x=self.x, x0=b, lenics=lenics, domain=dom)
except (NotPowerSeriesError, NotHyperSeriesError):
symbolic = False
if symbolic and sol.x0 == b:
return sol
y0 = self.evalf(b, derivatives=True)
return HolonomicFunction(self.annihilator, self.x, b, y0)
def to_meijerg(self):
"""
Returns a linear combination of Meijer G-functions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import sin, cos, hyperexpand, log, symbols
>>> x = symbols('x')
>>> hyperexpand(expr_to_holonomic(cos(x) + sin(x)).to_meijerg())
sin(x) + cos(x)
>>> hyperexpand(expr_to_holonomic(log(x)).to_meijerg()).simplify()
log(x)
See Also
========
to_hyper()
"""
# convert to hypergeometric first
rep = self.to_hyper(as_list=True)
sol = S.Zero
for i in rep:
if len(i) == 1:
sol += i[0]
elif len(i) == 2:
sol += i[0] * _hyper_to_meijerg(i[1])
return sol
def from_hyper(func, x0=0, evalf=False):
r"""
Converts a hypergeometric function to holonomic.
``func`` is the Hypergeometric Function and ``x0`` is the point at
which initial conditions are required.
Examples
========
>>> from sympy.holonomic.holonomic import from_hyper, DifferentialOperators
>>> from sympy import symbols, hyper, S
>>> x = symbols('x')
>>> from_hyper(hyper([], [S(3)/2], x**2/4))
HolonomicFunction((-x) + (2)*Dx + (x)*Dx**2, x, 1, [sinh(1), -sinh(1) + cosh(1)])
"""
a = func.ap
b = func.bq
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
# generalized hypergeometric differential equation
r1 = 1
for i in range(len(a)):
r1 = r1 * (x * Dx + a[i])
r2 = Dx
for i in range(len(b)):
r2 = r2 * (x * Dx + b[i] - 1)
sol = r1 - r2
simp = hyperexpand(func)
if isinstance(simp, Infinity) or isinstance(simp, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
# return None if it is Infinite or NaN
if val.is_finite is False or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# if the function is known symbolically
if not isinstance(simp, hyper):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
# if values don't exist at 0, then try to find initial
# conditions at 1. If it doesn't exist at 1 too then
# try 2 and so on.
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, hyper):
x0 = 1
# use evalf if the function can't be simplified
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
def from_meijerg(func, x0=0, evalf=False, initcond=True, domain=QQ):
"""
Converts a Meijer G-function to Holonomic.
``func`` is the G-Function and ``x0`` is the point at
which initial conditions are required.
Examples
========
>>> from sympy.holonomic.holonomic import from_meijerg, DifferentialOperators
>>> from sympy import symbols, meijerg, S
>>> x = symbols('x')
>>> from_meijerg(meijerg(([], []), ([S(1)/2], [0]), x**2/4))
HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1/sqrt(pi)])
"""
a = func.ap
b = func.bq
n = len(func.an)
m = len(func.bm)
p = len(a)
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(domain.old_poly_ring(x), 'Dx')
# compute the differential equation satisfied by the
# Meijer G-function.
mnp = (-1)**(m + n - p)
r1 = x * mnp
for i in range(len(a)):
r1 *= x * Dx + 1 - a[i]
r2 = 1
for i in range(len(b)):
r2 *= x * Dx - b[i]
sol = r1 - r2
if not initcond:
return HolonomicFunction(sol, x).composition(z)
simp = hyperexpand(func)
if isinstance(simp, Infinity) or isinstance(simp, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
if val.is_finite is False or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# computing initial conditions
if not isinstance(simp, meijerg):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, meijerg):
x0 = 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
x_1 = Dummy('x_1')
_lookup_table = None
domain_for_table = None
from sympy.integrals.meijerint import _mytype
def expr_to_holonomic(func, x=None, x0=0, y0=None, lenics=None, domain=None, initcond=True):
"""
Converts a function or an expression to a holonomic function.
Parameters
==========
func:
The expression to be converted.
x:
variable for the function.
x0:
point at which initial condition must be computed.
y0:
One can optionally provide initial condition if the method
isn't able to do it automatically.
lenics:
Number of terms in the initial condition. By default it is
equal to the order of the annihilator.
domain:
Ground domain for the polynomials in `x` appearing as coefficients
in the annihilator.
initcond:
Set it false if you don't want the initial conditions to be computed.
Examples
========
>>> from sympy.holonomic.holonomic import expr_to_holonomic
>>> from sympy import sin, exp, symbols
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x))
HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1])
>>> expr_to_holonomic(exp(x))
HolonomicFunction((-1) + (1)*Dx, x, 0, [1])
See Also
========
sympy.integrals.meijerint._rewrite1, _convert_poly_rat_alg, _create_table
"""
func = sympify(func)
syms = func.free_symbols
if not x:
if len(syms) == 1:
x= syms.pop()
else:
raise ValueError("Specify the variable for the function")
elif x in syms:
syms.remove(x)
extra_syms = list(syms)
if domain is None:
if func.has(Float):
domain = RR
else:
domain = QQ
if len(extra_syms) != 0:
domain = domain[extra_syms].get_field()
# try to convert if the function is polynomial or rational
solpoly = _convert_poly_rat_alg(func, x, x0=x0, y0=y0, lenics=lenics, domain=domain, initcond=initcond)
if solpoly:
return solpoly
# create the lookup table
global _lookup_table, domain_for_table
if not _lookup_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
elif domain != domain_for_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
# use the table directly to convert to Holonomic
if func.is_Function:
f = func.subs(x, x_1)
t = _mytype(f, x_1)
if t in _lookup_table:
l = _lookup_table[t]
sol = l[0][1].change_x(x)
else:
sol = _convert_meijerint(func, x, initcond=False, domain=domain)
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
if y0 or not initcond:
sol = sol.composition(func.args[0])
if y0:
sol.y0 = y0
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return sol.composition(func.args[0], x0, _y0)
# iterate through the expression recursively
args = func.args
f = func.func
from sympy.core import Add, Mul, Pow
sol = expr_to_holonomic(args[0], x=x, initcond=False, domain=domain)
if f is Add:
for i in range(1, len(args)):
sol += expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Mul:
for i in range(1, len(args)):
sol *= expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Pow:
sol = sol**args[1]
sol.x0 = x0
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
return sol
if sol.y0:
return sol
if not lenics:
lenics = sol.annihilator.order
if sol.annihilator.is_singular(x0):
r = sol._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S.One:
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol.annihilator, x, x0, y0)
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
## Some helper functions ##
def _normalize(list_of, parent, negative=True):
"""
Normalize a given annihilator
"""
num = []
denom = []
base = parent.base
K = base.get_field()
lcm_denom = base.from_sympy(S.One)
list_of_coeff = []
# convert polynomials to the elements of associated
# fraction field
for i, j in enumerate(list_of):
if isinstance(j, base.dtype):
list_of_coeff.append(K.new(j.rep))
elif not isinstance(j, K.dtype):
list_of_coeff.append(K.from_sympy(sympify(j)))
else:
list_of_coeff.append(j)
# corresponding numerators of the sequence of polynomials
num.append(list_of_coeff[i].numer())
# corresponding denominators
denom.append(list_of_coeff[i].denom())
# lcm of denominators in the coefficients
for i in denom:
lcm_denom = i.lcm(lcm_denom)
if negative:
lcm_denom = -lcm_denom
lcm_denom = K.new(lcm_denom.rep)
# multiply the coefficients with lcm
for i, j in enumerate(list_of_coeff):
list_of_coeff[i] = j * lcm_denom
gcd_numer = base((list_of_coeff[-1].numer() / list_of_coeff[-1].denom()).rep)
# gcd of numerators in the coefficients
for i in num:
gcd_numer = i.gcd(gcd_numer)
gcd_numer = K.new(gcd_numer.rep)
# divide all the coefficients by the gcd
for i, j in enumerate(list_of_coeff):
frac_ans = j / gcd_numer
list_of_coeff[i] = base((frac_ans.numer() / frac_ans.denom()).rep)
return DifferentialOperator(list_of_coeff, parent)
def _derivate_diff_eq(listofpoly):
"""
Let a differential equation a0(x)y(x) + a1(x)y'(x) + ... = 0
where a0, a1,... are polynomials or rational functions. The function
returns b0, b1, b2... such that the differential equation
b0(x)y(x) + b1(x)y'(x) +... = 0 is formed after differentiating the
former equation.
"""
sol = []
a = len(listofpoly) - 1
sol.append(DMFdiff(listofpoly[0]))
for i, j in enumerate(listofpoly[1:]):
sol.append(DMFdiff(j) + listofpoly[i])
sol.append(listofpoly[a])
return sol
def _hyper_to_meijerg(func):
"""
Converts a `hyper` to meijerg.
"""
ap = func.ap
bq = func.bq
ispoly = any(i <= 0 and int(i) == i for i in ap)
if ispoly:
return hyperexpand(func)
z = func.args[2]
# parameters of the `meijerg` function.
an = (1 - i for i in ap)
anp = ()
bm = (S.Zero, )
bmq = (1 - i for i in bq)
k = S.One
for i in bq:
k = k * gamma(i)
for i in ap:
k = k / gamma(i)
return k * meijerg(an, anp, bm, bmq, -z)
def _add_lists(list1, list2):
"""Takes polynomial sequences of two annihilators a and b and returns
the list of polynomials of sum of a and b.
"""
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
def _extend_y0(Holonomic, n):
"""
Tries to find more initial conditions by substituting the initial
value point in the differential equation.
"""
if Holonomic.annihilator.is_singular(Holonomic.x0) or Holonomic.is_singularics() == True:
return Holonomic.y0
annihilator = Holonomic.annihilator
a = annihilator.order
listofpoly = []
y0 = Holonomic.y0
R = annihilator.parent.base
K = R.get_field()
for i, j in enumerate(annihilator.listofpoly):
if isinstance(j, annihilator.parent.base.dtype):
listofpoly.append(K.new(j.rep))
if len(y0) < a or n <= len(y0):
return y0
else:
list_red = [-listofpoly[i] / listofpoly[a]
for i in range(a)]
if len(y0) > a:
y1 = [y0[i] for i in range(a)]
else:
y1 = [i for i in y0]
for i in range(n - a):
sol = 0
for a, b in zip(y1, list_red):
r = DMFsubs(b, Holonomic.x0)
if not getattr(r, 'is_finite', True):
return y0
if isinstance(r, (PolyElement, FracElement)):
r = r.as_expr()
sol += a * r
y1.append(sol)
list_red = _derivate_diff_eq(list_red)
return y0 + y1[len(y0):]
def DMFdiff(frac):
# differentiate a DMF object represented as p/q
if not isinstance(frac, DMF):
return frac.diff()
K = frac.ring
p = K.numer(frac)
q = K.denom(frac)
sol_num = - p * q.diff() + q * p.diff()
sol_denom = q**2
return K((sol_num.rep, sol_denom.rep))
def DMFsubs(frac, x0, mpm=False):
# substitute the point x0 in DMF object of the form p/q
if not isinstance(frac, DMF):
return frac
p = frac.num
q = frac.den
sol_p = S.Zero
sol_q = S.Zero
if mpm:
from mpmath import mp
for i, j in enumerate(reversed(p)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_p += j * x0**i
for i, j in enumerate(reversed(q)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_q += j * x0**i
if isinstance(sol_p, (PolyElement, FracElement)):
sol_p = sol_p.as_expr()
if isinstance(sol_q, (PolyElement, FracElement)):
sol_q = sol_q.as_expr()
return sol_p / sol_q
def _convert_poly_rat_alg(func, x, x0=0, y0=None, lenics=None, domain=QQ, initcond=True):
"""
Converts polynomials, rationals and algebraic functions to holonomic.
"""
ispoly = func.is_polynomial()
if not ispoly:
israt = func.is_rational_function()
else:
israt = True
if not (ispoly or israt):
basepoly, ratexp = func.as_base_exp()
if basepoly.is_polynomial() and ratexp.is_Number:
if isinstance(ratexp, Float):
ratexp = nsimplify(ratexp)
m, n = ratexp.p, ratexp.q
is_alg = True
else:
is_alg = False
else:
is_alg = True
if not (ispoly or israt or is_alg):
return None
R = domain.old_poly_ring(x)
_, Dx = DifferentialOperators(R, 'Dx')
# if the function is constant
if not func.has(x):
return HolonomicFunction(Dx, x, 0, [func])
if ispoly:
# differential equation satisfied by polynomial
sol = func * Dx - func.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 is None and x0 == 0 and is_singular:
rep = R.from_sympy(func).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
else:
coeff = list(reversed(rep))[i:]
indicial = i
break
for i, j in enumerate(coeff):
if isinstance(j, (PolyElement, FracElement)):
coeff[i] = j.as_expr()
y0 = {indicial: S(coeff)}
elif israt:
p, q = func.as_numer_denom()
# differential equation satisfied by rational
sol = p * q * Dx + p * q.diff(x) - q * p.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
elif is_alg:
sol = n * (x / m) * Dx - 1
sol = HolonomicFunction(sol, x).composition(basepoly).annihilator
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 is None and x0 == 0 and is_singular and \
(lenics is None or lenics <= 1):
rep = R.from_sympy(basepoly).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
if isinstance(j, (PolyElement, FracElement)):
j = j.as_expr()
coeff = S(j)**ratexp
indicial = S(i) * ratexp
break
if isinstance(coeff, (PolyElement, FracElement)):
coeff = coeff.as_expr()
y0 = {indicial: S([coeff])}
if y0 or not initcond:
return HolonomicFunction(sol, x, x0, y0)
if not lenics:
lenics = sol.order
if sol.is_singular(x0):
r = HolonomicFunction(sol, x, x0)._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S.One:
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol, x, x0, y0)
y0 = _find_conditions(func, x, x0, lenics)
while not y0:
x0 += 1
y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol, x, x0, y0)
def _convert_meijerint(func, x, initcond=True, domain=QQ):
args = meijerint._rewrite1(func, x)
if args:
fac, po, g, _ = args
else:
return None
# lists for sum of meijerg functions
fac_list = [fac * i[0] for i in g]
t = po.as_base_exp()
s = t[1] if t[0] is x else S.Zero
po_list = [s + i[1] for i in g]
G_list = [i[2] for i in g]
# finds meijerg representation of x**s * meijerg(a1 ... ap, b1 ... bq, z)
def _shift(func, s):
z = func.args[-1]
if z.has(I):
z = z.subs(exp_polar, exp)
d = z.collect(x, evaluate=False)
b = list(d)[0]
a = d[b]
t = b.as_base_exp()
b = t[1] if t[0] is x else S.Zero
r = s / b
an = (i + r for i in func.args[0][0])
ap = (i + r for i in func.args[0][1])
bm = (i + r for i in func.args[1][0])
bq = (i + r for i in func.args[1][1])
return a**-r, meijerg((an, ap), (bm, bq), z)
coeff, m = _shift(G_list[0], po_list[0])
sol = fac_list[0] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
# add all the meijerg functions after converting to holonomic
for i in range(1, len(G_list)):
coeff, m = _shift(G_list[i], po_list[i])
sol += fac_list[i] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
return sol
def _create_table(table, domain=QQ):
"""
Creates the look-up table. For a similar implementation
see meijerint._create_lookup_table.
"""
def add(formula, annihilator, arg, x0=0, y0=[]):
"""
Adds a formula in the dictionary
"""
table.setdefault(_mytype(formula, x_1), []).append((formula,
HolonomicFunction(annihilator, arg, x0, y0)))
R = domain.old_poly_ring(x_1)
_, Dx = DifferentialOperators(R, 'Dx')
from sympy import (sin, cos, exp, log, erf, sqrt, pi,
sinh, cosh, sinc, erfc, Si, Ci, Shi, erfi)
# add some basic functions
add(sin(x_1), Dx**2 + 1, x_1, 0, [0, 1])
add(cos(x_1), Dx**2 + 1, x_1, 0, [1, 0])
add(exp(x_1), Dx - 1, x_1, 0, 1)
add(log(x_1), Dx + x_1*Dx**2, x_1, 1, [0, 1])
add(erf(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(erfc(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [1, -2/sqrt(pi)])
add(erfi(x_1), -2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(sinh(x_1), Dx**2 - 1, x_1, 0, [0, 1])
add(cosh(x_1), Dx**2 - 1, x_1, 0, [1, 0])
add(sinc(x_1), x_1 + 2*Dx + x_1*Dx**2, x_1)
add(Si(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Ci(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Shi(x_1), -x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
def _find_conditions(func, x, x0, order):
y0 = []
for i in range(order):
val = func.subs(x, x0)
if isinstance(val, NaN):
val = limit(func, x, x0)
if val.is_finite is False or isinstance(val, NaN):
return None
y0.append(val)
func = func.diff(x)
return y0
|
44f87735c0247ff6ace572a3eac63e01fb2494c04c943fbae6e64e87aa55897b | from sympy.printing import pycode, ccode, fcode
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
lfortran = import_module('lfortran')
cin = import_module('clang.cindex', import_kwargs = {'fromlist': ['cindex']})
if not lfortran and not cin:
class SymPyExpression(object):
def __init__(self, *args, **kwargs):
raise ImportError('Module not available.')
else:
if lfortran:
from sympy.parsing.fortran.fortran_parser import src_to_sympy
if cin:
from sympy.parsing.c.c_parser import parse_c
@doctest_depends_on(modules=['lfortran', 'clang.cindex'])
class SymPyExpression(object): # type: ignore
"""Class to store and handle SymPy expressions
This class will hold SymPy Expressions and handle the API for the
conversion to and from different languages.
It works with the C and the Fortran Parser to generate SymPy expressions
which are stored here and which can be converted to multiple language's
source code.
Notes
=====
The module and its API are currently under development and experimental
and can be changed during development.
The Fortran parser does not support numeric assignments, so all the
variables have been Initialized to zero.
The module also depends on external dependencies:
- LFortran which is required to use the Fortran parser
- Clang which is required for the C parser
Examples
========
Example of parsing C code:
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src = '''
... int a,b;
... float c = 2, d =4;
... '''
>>> a = SymPyExpression(src, 'c')
>>> a.return_expr()
[Declaration(Variable(a, type=integer, value=0)),
Declaration(Variable(b, type=integer, value=0)),
Declaration(Variable(c, type=real, value=2.0)),
Declaration(Variable(d, type=real, value=4.0))]
An example of variable definiton:
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src2 = '''
... integer :: a, b, c, d
... real :: p, q, r, s
... '''
>>> p = SymPyExpression()
>>> p.convert_to_expr(src2, 'f')
>>> p.convert_to_c()
['int a = 0', 'int b = 0', 'int c = 0', 'int d = 0', 'double p = 0.0', 'double q = 0.0', 'double r = 0.0', 'double s = 0.0']
An example of Assignment:
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src3 = '''
... integer :: a, b, c, d, e
... d = a + b - c
... e = b * d + c * e / a
... '''
>>> p = SymPyExpression(src3, 'f')
>>> p.convert_to_python()
['a = 0', 'b = 0', 'c = 0', 'd = 0', 'e = 0', 'd = a + b - c', 'e = b*d + c*e/a']
An example of function definition:
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src = '''
... integer function f(a,b)
... integer, intent(in) :: a, b
... integer :: r
... end function
... '''
>>> a = SymPyExpression(src, 'f')
>>> a.convert_to_python()
['def f(a, b):\\n f = 0\\n r = 0\\n return f']
"""
def __init__(self, source_code = None, mode = None):
"""Constructor for SymPyExpression class"""
super(SymPyExpression, self).__init__()
if not(mode or source_code):
self._expr = []
elif mode:
if source_code:
if mode.lower() == 'f':
if lfortran:
self._expr = src_to_sympy(source_code)
else:
raise ImportError("LFortran is not installed, cannot parse Fortran code")
elif mode.lower() == 'c':
if cin:
self._expr = parse_c(source_code)
else:
raise ImportError("Clang is not installed, cannot parse C code")
else:
raise NotImplementedError(
'Parser for specified language is not implemented'
)
else:
raise ValueError('Source code not present')
else:
raise ValueError('Please specify a mode for conversion')
def convert_to_expr(self, src_code, mode):
"""Converts the given source code to sympy Expressions
Attributes
==========
src_code : String
the source code or filename of the source code that is to be
converted
mode: String
the mode to determine which parser is to be used according to
the language of the source code
f or F for Fortran
c or C for C/C++
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src3 = '''
... integer function f(a,b) result(r)
... integer, intent(in) :: a, b
... integer :: x
... r = a + b -x
... end function
... '''
>>> p = SymPyExpression()
>>> p.convert_to_expr(src3, 'f')
>>> p.return_expr()
[FunctionDefinition(integer, name=f, parameters=(Variable(a), Variable(b)), body=CodeBlock(
Declaration(Variable(r, type=integer, value=0)),
Declaration(Variable(x, type=integer, value=0)),
Assignment(Variable(r), a + b - x),
Return(Variable(r))
))]
"""
if mode.lower() == 'f':
if lfortran:
self._expr = src_to_sympy(src_code)
else:
raise ImportError("LFortran is not installed, cannot parse Fortran code")
elif mode.lower() == 'c':
if cin:
self._expr = parse_c(src_code)
else:
raise ImportError("Clang is not installed, cannot parse C code")
else:
raise NotImplementedError(
"Parser for specified language has not been implemented"
)
def convert_to_python(self):
"""Returns a list with python code for the sympy expressions
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src2 = '''
... integer :: a, b, c, d
... real :: p, q, r, s
... c = a/b
... d = c/a
... s = p/q
... r = q/p
... '''
>>> p = SymPyExpression(src2, 'f')
>>> p.convert_to_python()
['a = 0', 'b = 0', 'c = 0', 'd = 0', 'p = 0.0', 'q = 0.0', 'r = 0.0', 's = 0.0', 'c = a/b', 'd = c/a', 's = p/q', 'r = q/p']
"""
self._pycode = []
for iter in self._expr:
self._pycode.append(pycode(iter))
return self._pycode
def convert_to_c(self):
"""Returns a list with the c source code for the sympy expressions
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src2 = '''
... integer :: a, b, c, d
... real :: p, q, r, s
... c = a/b
... d = c/a
... s = p/q
... r = q/p
... '''
>>> p = SymPyExpression()
>>> p.convert_to_expr(src2, 'f')
>>> p.convert_to_c()
['int a = 0', 'int b = 0', 'int c = 0', 'int d = 0', 'double p = 0.0', 'double q = 0.0', 'double r = 0.0', 'double s = 0.0', 'c = a/b;', 'd = c/a;', 's = p/q;', 'r = q/p;']
"""
self._ccode = []
for iter in self._expr:
self._ccode.append(ccode(iter))
return self._ccode
def convert_to_fortran(self):
"""Returns a list with the fortran source code for the sympy expressions
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src2 = '''
... integer :: a, b, c, d
... real :: p, q, r, s
... c = a/b
... d = c/a
... s = p/q
... r = q/p
... '''
>>> p = SymPyExpression(src2, 'f')
>>> p.convert_to_fortran()
[' integer*4 a', ' integer*4 b', ' integer*4 c', ' integer*4 d', ' real*8 p', ' real*8 q', ' real*8 r', ' real*8 s', ' c = a/b', ' d = c/a', ' s = p/q', ' r = q/p']
"""
self._fcode = []
for iter in self._expr:
self._fcode.append(fcode(iter))
return self._fcode
def return_expr(self):
"""Returns the expression list
Examples
========
>>> from sympy.parsing.sym_expr import SymPyExpression
>>> src3 = '''
... integer function f(a,b)
... integer, intent(in) :: a, b
... integer :: r
... r = a+b
... f = r
... end function
... '''
>>> p = SymPyExpression()
>>> p.convert_to_expr(src3, 'f')
>>> p.return_expr()
[FunctionDefinition(integer, name=f, parameters=(Variable(a), Variable(b)), body=CodeBlock(
Declaration(Variable(f, type=integer, value=0)),
Declaration(Variable(r, type=integer, value=0)),
Assignment(Variable(f), Variable(r)),
Return(Variable(f))
))]
"""
return self._expr
|
6a31e72ad809a3d7abf892efb198b6346271fa4d17dab8196dfa090ee6093bad | from __future__ import print_function, division
from typing import Any, Dict, Tuple
from itertools import product
import re
from sympy import sympify
def mathematica(s, additional_translations=None):
'''Users can add their own translation dictionary
# Example
In [1]: mathematica('Log3[9]', {'Log3[x]':'log(x,3)'})
Out[1]: 2
In [2]: mathematica('F[7,5,3]', {'F[*x]':'Max(*x)*Min(*x)'})
Out[2]: 21
variable-length argument needs '*' character '''
parser = MathematicaParser(additional_translations)
return sympify(parser.parse(s))
def _deco(cls):
cls._initialize_class()
return cls
@_deco
class MathematicaParser(object):
'''An instance of this class converts a string of a basic Mathematica
expression to SymPy style. Output is string type.'''
# left: Mathematica, right: SymPy
CORRESPONDENCES = {
'Sqrt[x]': 'sqrt(x)',
'Exp[x]': 'exp(x)',
'Log[x]': 'log(x)',
'Log[x,y]': 'log(y,x)',
'Log2[x]': 'log(x,2)',
'Log10[x]': 'log(x,10)',
'Mod[x,y]': 'Mod(x,y)',
'Max[*x]': 'Max(*x)',
'Min[*x]': 'Min(*x)',
}
# trigonometric, e.t.c.
for arc, tri, h in product(('', 'Arc'), (
'Sin', 'Cos', 'Tan', 'Cot', 'Sec', 'Csc'), ('', 'h')):
fm = arc + tri + h + '[x]'
if arc: # arc func
fs = 'a' + tri.lower() + h + '(x)'
else: # non-arc func
fs = tri.lower() + h + '(x)'
CORRESPONDENCES.update({fm: fs})
REPLACEMENTS = {
' ': '',
'^': '**',
'{': '[',
'}': ']',
}
RULES = {
# a single whitespace to '*'
'whitespace': (
re.compile(r'''
(?<=[a-zA-Z\d]) # a letter or a number
\ # a whitespace
(?=[a-zA-Z\d]) # a letter or a number
''', re.VERBOSE),
'*'),
# add omitted '*' character
'add*_1': (
re.compile(r'''
(?<=[])\d]) # ], ) or a number
# ''
(?=[(a-zA-Z]) # ( or a single letter
''', re.VERBOSE),
'*'),
# add omitted '*' character (variable letter preceding)
'add*_2': (
re.compile(r'''
(?<=[a-zA-Z]) # a letter
\( # ( as a character
(?=.) # any characters
''', re.VERBOSE),
'*('),
# convert 'Pi' to 'pi'
'Pi': (
re.compile(r'''
(?:
\A|(?<=[^a-zA-Z])
)
Pi # 'Pi' is 3.14159... in Mathematica
(?=[^a-zA-Z])
''', re.VERBOSE),
'pi'),
}
# Mathematica function name pattern
FM_PATTERN = re.compile(r'''
(?:
\A|(?<=[^a-zA-Z]) # at the top or a non-letter
)
[A-Z][a-zA-Z\d]* # Function
(?=\[) # [ as a character
''', re.VERBOSE)
# list or matrix pattern (for future usage)
ARG_MTRX_PATTERN = re.compile(r'''
\{.*\}
''', re.VERBOSE)
# regex string for function argument pattern
ARGS_PATTERN_TEMPLATE = r'''
(?:
\A|(?<=[^a-zA-Z])
)
{arguments} # model argument like x, y,...
(?=[^a-zA-Z])
'''
# will contain transformed CORRESPONDENCES dictionary
TRANSLATIONS = {} # type: Dict[Tuple[str, int], Dict[str, Any]]
# cache for a raw users' translation dictionary
cache_original = {} # type: Dict[Tuple[str, int], Dict[str, Any]]
# cache for a compiled users' translation dictionary
cache_compiled = {} # type: Dict[Tuple[str, int], Dict[str, Any]]
@classmethod
def _initialize_class(cls):
# get a transformed CORRESPONDENCES dictionary
d = cls._compile_dictionary(cls.CORRESPONDENCES)
cls.TRANSLATIONS.update(d)
def __init__(self, additional_translations=None):
self.translations = {}
# update with TRANSLATIONS (class constant)
self.translations.update(self.TRANSLATIONS)
if additional_translations is None:
additional_translations = {}
# check the latest added translations
if self.__class__.cache_original != additional_translations:
if not isinstance(additional_translations, dict):
raise ValueError('The argument must be dict type')
# get a transformed additional_translations dictionary
d = self._compile_dictionary(additional_translations)
# update cache
self.__class__.cache_original = additional_translations
self.__class__.cache_compiled = d
# merge user's own translations
self.translations.update(self.__class__.cache_compiled)
@classmethod
def _compile_dictionary(cls, dic):
# for return
d = {}
for fm, fs in dic.items():
# check function form
cls._check_input(fm)
cls._check_input(fs)
# uncover '*' hiding behind a whitespace
fm = cls._apply_rules(fm, 'whitespace')
fs = cls._apply_rules(fs, 'whitespace')
# remove whitespace(s)
fm = cls._replace(fm, ' ')
fs = cls._replace(fs, ' ')
# search Mathematica function name
m = cls.FM_PATTERN.search(fm)
# if no-hit
if m is None:
err = "'{f}' function form is invalid.".format(f=fm)
raise ValueError(err)
# get Mathematica function name like 'Log'
fm_name = m.group()
# get arguments of Mathematica function
args, end = cls._get_args(m)
# function side check. (e.g.) '2*Func[x]' is invalid.
if m.start() != 0 or end != len(fm):
err = "'{f}' function form is invalid.".format(f=fm)
raise ValueError(err)
# check the last argument's 1st character
if args[-1][0] == '*':
key_arg = '*'
else:
key_arg = len(args)
key = (fm_name, key_arg)
# convert '*x' to '\\*x' for regex
re_args = [x if x[0] != '*' else '\\' + x for x in args]
# for regex. Example: (?:(x|y|z))
xyz = '(?:(' + '|'.join(re_args) + '))'
# string for regex compile
patStr = cls.ARGS_PATTERN_TEMPLATE.format(arguments=xyz)
pat = re.compile(patStr, re.VERBOSE)
# update dictionary
d[key] = {}
d[key]['fs'] = fs # SymPy function template
d[key]['args'] = args # args are ['x', 'y'] for example
d[key]['pat'] = pat
return d
def _convert_function(self, s):
'''Parse Mathematica function to SymPy one'''
# compiled regex object
pat = self.FM_PATTERN
scanned = '' # converted string
cur = 0 # position cursor
while True:
m = pat.search(s)
if m is None:
# append the rest of string
scanned += s
break
# get Mathematica function name
fm = m.group()
# get arguments, and the end position of fm function
args, end = self._get_args(m)
# the start position of fm function
bgn = m.start()
# convert Mathematica function to SymPy one
s = self._convert_one_function(s, fm, args, bgn, end)
# update cursor
cur = bgn
# append converted part
scanned += s[:cur]
# shrink s
s = s[cur:]
return scanned
def _convert_one_function(self, s, fm, args, bgn, end):
# no variable-length argument
if (fm, len(args)) in self.translations:
key = (fm, len(args))
# x, y,... model arguments
x_args = self.translations[key]['args']
# make CORRESPONDENCES between model arguments and actual ones
d = {k: v for k, v in zip(x_args, args)}
# with variable-length argument
elif (fm, '*') in self.translations:
key = (fm, '*')
# x, y,..*args (model arguments)
x_args = self.translations[key]['args']
# make CORRESPONDENCES between model arguments and actual ones
d = {}
for i, x in enumerate(x_args):
if x[0] == '*':
d[x] = ','.join(args[i:])
break
d[x] = args[i]
# out of self.translations
else:
err = "'{f}' is out of the whitelist.".format(f=fm)
raise ValueError(err)
# template string of converted function
template = self.translations[key]['fs']
# regex pattern for x_args
pat = self.translations[key]['pat']
scanned = ''
cur = 0
while True:
m = pat.search(template)
if m is None:
scanned += template
break
# get model argument
x = m.group()
# get a start position of the model argument
xbgn = m.start()
# add the corresponding actual argument
scanned += template[:xbgn] + d[x]
# update cursor to the end of the model argument
cur = m.end()
# shrink template
template = template[cur:]
# update to swapped string
s = s[:bgn] + scanned + s[end:]
return s
@classmethod
def _get_args(cls, m):
'''Get arguments of a Mathematica function'''
s = m.string # whole string
anc = m.end() + 1 # pointing the first letter of arguments
square, curly = [], [] # stack for brakets
args = []
# current cursor
cur = anc
for i, c in enumerate(s[anc:], anc):
# extract one argument
if c == ',' and (not square) and (not curly):
args.append(s[cur:i]) # add an argument
cur = i + 1 # move cursor
# handle list or matrix (for future usage)
if c == '{':
curly.append(c)
elif c == '}':
curly.pop()
# seek corresponding ']' with skipping irrevant ones
if c == '[':
square.append(c)
elif c == ']':
if square:
square.pop()
else: # empty stack
args.append(s[cur:i])
break
# the next position to ']' bracket (the function end)
func_end = i + 1
return args, func_end
@classmethod
def _replace(cls, s, bef):
aft = cls.REPLACEMENTS[bef]
s = s.replace(bef, aft)
return s
@classmethod
def _apply_rules(cls, s, bef):
pat, aft = cls.RULES[bef]
return pat.sub(aft, s)
@classmethod
def _check_input(cls, s):
for bracket in (('[', ']'), ('{', '}'), ('(', ')')):
if s.count(bracket[0]) != s.count(bracket[1]):
err = "'{f}' function form is invalid.".format(f=s)
raise ValueError(err)
if '{' in s:
err = "Currently list is not supported."
raise ValueError(err)
def parse(self, s):
# input check
self._check_input(s)
# uncover '*' hiding behind a whitespace
s = self._apply_rules(s, 'whitespace')
# remove whitespace(s)
s = self._replace(s, ' ')
# add omitted '*' character
s = self._apply_rules(s, 'add*_1')
s = self._apply_rules(s, 'add*_2')
# translate function
s = self._convert_function(s)
# '^' to '**'
s = self._replace(s, '^')
# 'Pi' to 'pi'
s = self._apply_rules(s, 'Pi')
# '{', '}' to '[', ']', respectively
# s = cls._replace(s, '{') # currently list is not taken into account
# s = cls._replace(s, '}')
return s
|
d8a627b85bcb20ee26c198e770004e285d17cd500ecb6adb532ffc6252cb1b28 | # -*- coding: utf-8 -*-
r"""
Wigner, Clebsch-Gordan, Racah, and Gaunt coefficients
Collection of functions for calculating Wigner 3j, 6j, 9j,
Clebsch-Gordan, Racah as well as Gaunt coefficients exactly, all
evaluating to a rational number times the square root of a rational
number [Rasch03]_.
Please see the description of the individual functions for further
details and examples.
References
~~~~~~~~~~
.. [Regge58] 'Symmetry Properties of Clebsch-Gordan Coefficients',
T. Regge, Nuovo Cimento, Volume 10, pp. 544 (1958)
.. [Regge59] 'Symmetry Properties of Racah Coefficients',
T. Regge, Nuovo Cimento, Volume 11, pp. 116 (1959)
.. [Edmonds74] A. R. Edmonds. Angular momentum in quantum mechanics.
Investigations in physics, 4.; Investigations in physics, no. 4.
Princeton, N.J., Princeton University Press, 1957.
.. [Rasch03] J. Rasch and A. C. H. Yu, 'Efficient Storage Scheme for
Pre-calculated Wigner 3j, 6j and Gaunt Coefficients', SIAM
J. Sci. Comput. Volume 25, Issue 4, pp. 1416-1428 (2003)
.. [Liberatodebrito82] 'FORTRAN program for the integral of three
spherical harmonics', A. Liberato de Brito,
Comput. Phys. Commun., Volume 25, pp. 81-85 (1982)
Credits and Copyright
~~~~~~~~~~~~~~~~~~~~~
This code was taken from Sage with the permission of all authors:
https://groups.google.com/forum/#!topic/sage-devel/M4NZdu-7O38
AUTHORS:
- Jens Rasch (2009-03-24): initial version for Sage
- Jens Rasch (2009-05-31): updated to sage-4.0
- Oscar Gerardo Lazo Arjona (2017-06-18): added Wigner D matrices
Copyright (C) 2008 Jens Rasch <[email protected]>
"""
from __future__ import print_function, division
from sympy import (Integer, pi, sqrt, sympify, Dummy, S, Sum, Ynm, zeros,
Function, sin, cos, exp, I, factorial, binomial,
Add, ImmutableMatrix)
# This list of precomputed factorials is needed to massively
# accelerate future calculations of the various coefficients
_Factlist = [1]
def _calc_factlist(nn):
r"""
Function calculates a list of precomputed factorials in order to
massively accelerate future calculations of the various
coefficients.
INPUT:
- ``nn`` - integer, highest factorial to be computed
OUTPUT:
list of integers -- the list of precomputed factorials
EXAMPLES:
Calculate list of factorials::
sage: from sage.functions.wigner import _calc_factlist
sage: _calc_factlist(10)
[1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800]
"""
if nn >= len(_Factlist):
for ii in range(len(_Factlist), int(nn + 1)):
_Factlist.append(_Factlist[ii - 1] * ii)
return _Factlist[:int(nn) + 1]
def wigner_3j(j_1, j_2, j_3, m_1, m_2, m_3):
r"""
Calculate the Wigner 3j symbol `\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)`.
INPUT:
- ``j_1``, ``j_2``, ``j_3``, ``m_1``, ``m_2``, ``m_3`` - integer or half integer
OUTPUT:
Rational number times the square root of a rational number.
Examples
========
>>> from sympy.physics.wigner import wigner_3j
>>> wigner_3j(2, 6, 4, 0, 0, 0)
sqrt(715)/143
>>> wigner_3j(2, 6, 4, 0, 0, 1)
0
It is an error to have arguments that are not integer or half
integer values::
sage: wigner_3j(2.1, 6, 4, 0, 0, 0)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer
sage: wigner_3j(2, 6, 4, 1, 0, -1.1)
Traceback (most recent call last):
...
ValueError: m values must be integer or half integer
NOTES:
The Wigner 3j symbol obeys the following symmetry rules:
- invariant under any permutation of the columns (with the
exception of a sign change where `J:=j_1+j_2+j_3`):
.. math::
\begin{aligned}
\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)
&=\operatorname{Wigner3j}(j_3,j_1,j_2,m_3,m_1,m_2) \\
&=\operatorname{Wigner3j}(j_2,j_3,j_1,m_2,m_3,m_1) \\
&=(-1)^J \operatorname{Wigner3j}(j_3,j_2,j_1,m_3,m_2,m_1) \\
&=(-1)^J \operatorname{Wigner3j}(j_1,j_3,j_2,m_1,m_3,m_2) \\
&=(-1)^J \operatorname{Wigner3j}(j_2,j_1,j_3,m_2,m_1,m_3)
\end{aligned}
- invariant under space inflection, i.e.
.. math::
\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)
=(-1)^J \operatorname{Wigner3j}(j_1,j_2,j_3,-m_1,-m_2,-m_3)
- symmetric with respect to the 72 additional symmetries based on
the work by [Regge58]_
- zero for `j_1`, `j_2`, `j_3` not fulfilling triangle relation
- zero for `m_1 + m_2 + m_3 \neq 0`
- zero for violating any one of the conditions
`j_1 \ge |m_1|`, `j_2 \ge |m_2|`, `j_3 \ge |m_3|`
ALGORITHM:
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 3j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
AUTHORS:
- Jens Rasch (2009-03-24): initial version
"""
if int(j_1 * 2) != j_1 * 2 or int(j_2 * 2) != j_2 * 2 or \
int(j_3 * 2) != j_3 * 2:
raise ValueError("j values must be integer or half integer")
if int(m_1 * 2) != m_1 * 2 or int(m_2 * 2) != m_2 * 2 or \
int(m_3 * 2) != m_3 * 2:
raise ValueError("m values must be integer or half integer")
if m_1 + m_2 + m_3 != 0:
return 0
prefid = Integer((-1) ** int(j_1 - j_2 - m_3))
m_3 = -m_3
a1 = j_1 + j_2 - j_3
if a1 < 0:
return 0
a2 = j_1 - j_2 + j_3
if a2 < 0:
return 0
a3 = -j_1 + j_2 + j_3
if a3 < 0:
return 0
if (abs(m_1) > j_1) or (abs(m_2) > j_2) or (abs(m_3) > j_3):
return 0
maxfact = max(j_1 + j_2 + j_3 + 1, j_1 + abs(m_1), j_2 + abs(m_2),
j_3 + abs(m_3))
_calc_factlist(int(maxfact))
argsqrt = Integer(_Factlist[int(j_1 + j_2 - j_3)] *
_Factlist[int(j_1 - j_2 + j_3)] *
_Factlist[int(-j_1 + j_2 + j_3)] *
_Factlist[int(j_1 - m_1)] *
_Factlist[int(j_1 + m_1)] *
_Factlist[int(j_2 - m_2)] *
_Factlist[int(j_2 + m_2)] *
_Factlist[int(j_3 - m_3)] *
_Factlist[int(j_3 + m_3)]) / \
_Factlist[int(j_1 + j_2 + j_3 + 1)]
ressqrt = sqrt(argsqrt)
if ressqrt.is_complex or ressqrt.is_infinite:
ressqrt = ressqrt.as_real_imag()[0]
imin = max(-j_3 + j_1 + m_2, -j_3 + j_2 - m_1, 0)
imax = min(j_2 + m_2, j_1 - m_1, j_1 + j_2 - j_3)
sumres = 0
for ii in range(int(imin), int(imax) + 1):
den = _Factlist[ii] * \
_Factlist[int(ii + j_3 - j_1 - m_2)] * \
_Factlist[int(j_2 + m_2 - ii)] * \
_Factlist[int(j_1 - ii - m_1)] * \
_Factlist[int(ii + j_3 - j_2 + m_1)] * \
_Factlist[int(j_1 + j_2 - j_3 - ii)]
sumres = sumres + Integer((-1) ** ii) / den
res = ressqrt * sumres * prefid
return res
def clebsch_gordan(j_1, j_2, j_3, m_1, m_2, m_3):
r"""
Calculates the Clebsch-Gordan coefficient
`\left\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \right\rangle`.
The reference for this function is [Edmonds74]_.
INPUT:
- ``j_1``, ``j_2``, ``j_3``, ``m_1``, ``m_2``, ``m_3`` - integer or half integer
OUTPUT:
Rational number times the square root of a rational number.
EXAMPLES::
>>> from sympy import S
>>> from sympy.physics.wigner import clebsch_gordan
>>> clebsch_gordan(S(3)/2, S(1)/2, 2, S(3)/2, S(1)/2, 2)
1
>>> clebsch_gordan(S(3)/2, S(1)/2, 1, S(3)/2, -S(1)/2, 1)
sqrt(3)/2
>>> clebsch_gordan(S(3)/2, S(1)/2, 1, -S(1)/2, S(1)/2, 0)
-sqrt(2)/2
NOTES:
The Clebsch-Gordan coefficient will be evaluated via its relation
to Wigner 3j symbols:
.. math::
\left\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \right\rangle
=(-1)^{j_1-j_2+m_3} \sqrt{2j_3+1}
\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,-m_3)
See also the documentation on Wigner 3j symbols which exhibit much
higher symmetry relations than the Clebsch-Gordan coefficient.
AUTHORS:
- Jens Rasch (2009-03-24): initial version
"""
res = (-1) ** sympify(j_1 - j_2 + m_3) * sqrt(2 * j_3 + 1) * \
wigner_3j(j_1, j_2, j_3, m_1, m_2, -m_3)
return res
def _big_delta_coeff(aa, bb, cc, prec=None):
r"""
Calculates the Delta coefficient of the 3 angular momenta for
Racah symbols. Also checks that the differences are of integer
value.
INPUT:
- ``aa`` - first angular momentum, integer or half integer
- ``bb`` - second angular momentum, integer or half integer
- ``cc`` - third angular momentum, integer or half integer
- ``prec`` - precision of the ``sqrt()`` calculation
OUTPUT:
double - Value of the Delta coefficient
EXAMPLES::
sage: from sage.functions.wigner import _big_delta_coeff
sage: _big_delta_coeff(1,1,1)
1/2*sqrt(1/6)
"""
if int(aa + bb - cc) != (aa + bb - cc):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if int(aa + cc - bb) != (aa + cc - bb):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if int(bb + cc - aa) != (bb + cc - aa):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if (aa + bb - cc) < 0:
return 0
if (aa + cc - bb) < 0:
return 0
if (bb + cc - aa) < 0:
return 0
maxfact = max(aa + bb - cc, aa + cc - bb, bb + cc - aa, aa + bb + cc + 1)
_calc_factlist(maxfact)
argsqrt = Integer(_Factlist[int(aa + bb - cc)] *
_Factlist[int(aa + cc - bb)] *
_Factlist[int(bb + cc - aa)]) / \
Integer(_Factlist[int(aa + bb + cc + 1)])
ressqrt = sqrt(argsqrt)
if prec:
ressqrt = ressqrt.evalf(prec).as_real_imag()[0]
return ressqrt
def racah(aa, bb, cc, dd, ee, ff, prec=None):
r"""
Calculate the Racah symbol `W(a,b,c,d;e,f)`.
INPUT:
- ``a``, ..., ``f`` - integer or half integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
Examples
========
>>> from sympy.physics.wigner import racah
>>> racah(3,3,3,3,3,3)
-1/14
NOTES:
The Racah symbol is related to the Wigner 6j symbol:
.. math::
\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)
=(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6)
Please see the 6j symbol for its much richer symmetries and for
additional properties.
ALGORITHM:
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 6j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
AUTHORS:
- Jens Rasch (2009-03-24): initial version
"""
prefac = _big_delta_coeff(aa, bb, ee, prec) * \
_big_delta_coeff(cc, dd, ee, prec) * \
_big_delta_coeff(aa, cc, ff, prec) * \
_big_delta_coeff(bb, dd, ff, prec)
if prefac == 0:
return 0
imin = max(aa + bb + ee, cc + dd + ee, aa + cc + ff, bb + dd + ff)
imax = min(aa + bb + cc + dd, aa + dd + ee + ff, bb + cc + ee + ff)
maxfact = max(imax + 1, aa + bb + cc + dd, aa + dd + ee + ff,
bb + cc + ee + ff)
_calc_factlist(maxfact)
sumres = 0
for kk in range(int(imin), int(imax) + 1):
den = _Factlist[int(kk - aa - bb - ee)] * \
_Factlist[int(kk - cc - dd - ee)] * \
_Factlist[int(kk - aa - cc - ff)] * \
_Factlist[int(kk - bb - dd - ff)] * \
_Factlist[int(aa + bb + cc + dd - kk)] * \
_Factlist[int(aa + dd + ee + ff - kk)] * \
_Factlist[int(bb + cc + ee + ff - kk)]
sumres = sumres + Integer((-1) ** kk * _Factlist[kk + 1]) / den
res = prefac * sumres * (-1) ** int(aa + bb + cc + dd)
return res
def wigner_6j(j_1, j_2, j_3, j_4, j_5, j_6, prec=None):
r"""
Calculate the Wigner 6j symbol `\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)`.
INPUT:
- ``j_1``, ..., ``j_6`` - integer or half integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
Examples
========
>>> from sympy.physics.wigner import wigner_6j
>>> wigner_6j(3,3,3,3,3,3)
-1/14
>>> wigner_6j(5,5,5,5,5,5)
1/52
It is an error to have arguments that are not integer or half
integer values or do not fulfill the triangle relation::
sage: wigner_6j(2.5,2.5,2.5,2.5,2.5,2.5)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
sage: wigner_6j(0.5,0.5,1.1,0.5,0.5,1.1)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
NOTES:
The Wigner 6j symbol is related to the Racah symbol but exhibits
more symmetries as detailed below.
.. math::
\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)
=(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6)
The Wigner 6j symbol obeys the following symmetry rules:
- Wigner 6j symbols are left invariant under any permutation of
the columns:
.. math::
\begin{aligned}
\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)
&=\operatorname{Wigner6j}(j_3,j_1,j_2,j_6,j_4,j_5) \\
&=\operatorname{Wigner6j}(j_2,j_3,j_1,j_5,j_6,j_4) \\
&=\operatorname{Wigner6j}(j_3,j_2,j_1,j_6,j_5,j_4) \\
&=\operatorname{Wigner6j}(j_1,j_3,j_2,j_4,j_6,j_5) \\
&=\operatorname{Wigner6j}(j_2,j_1,j_3,j_5,j_4,j_6)
\end{aligned}
- They are invariant under the exchange of the upper and lower
arguments in each of any two columns, i.e.
.. math::
\operatorname{Wigner6j}(j_1,j_2,j_3,j_4,j_5,j_6)
=\operatorname{Wigner6j}(j_1,j_5,j_6,j_4,j_2,j_3)
=\operatorname{Wigner6j}(j_4,j_2,j_6,j_1,j_5,j_3)
=\operatorname{Wigner6j}(j_4,j_5,j_3,j_1,j_2,j_6)
- additional 6 symmetries [Regge59]_ giving rise to 144 symmetries
in total
- only non-zero if any triple of `j`'s fulfill a triangle relation
ALGORITHM:
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 6j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
"""
res = (-1) ** int(j_1 + j_2 + j_4 + j_5) * \
racah(j_1, j_2, j_5, j_4, j_3, j_6, prec)
return res
def wigner_9j(j_1, j_2, j_3, j_4, j_5, j_6, j_7, j_8, j_9, prec=None):
r"""
Calculate the Wigner 9j symbol
`\operatorname{Wigner9j}(j_1,j_2,j_3,j_4,j_5,j_6,j_7,j_8,j_9)`.
INPUT:
- ``j_1``, ..., ``j_9`` - integer or half integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
Examples
========
>>> from sympy.physics.wigner import wigner_9j
>>> wigner_9j(1,1,1, 1,1,1, 1,1,0 ,prec=64) # ==1/18
0.05555555...
>>> wigner_9j(1/2,1/2,0, 1/2,3/2,1, 0,1,1 ,prec=64) # ==1/6
0.1666666...
It is an error to have arguments that are not integer or half
integer values or do not fulfill the triangle relation::
sage: wigner_9j(0.5,0.5,0.5, 0.5,0.5,0.5, 0.5,0.5,0.5,prec=64)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
sage: wigner_9j(1,1,1, 0.5,1,1.5, 0.5,1,2.5,prec=64)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
ALGORITHM:
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 3j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
"""
imax = int(min(j_1 + j_9, j_2 + j_6, j_4 + j_8) * 2)
imin = imax % 2
sumres = 0
for kk in range(imin, int(imax) + 1, 2):
sumres = sumres + (kk + 1) * \
racah(j_1, j_2, j_9, j_6, j_3, kk / 2, prec) * \
racah(j_4, j_6, j_8, j_2, j_5, kk / 2, prec) * \
racah(j_1, j_4, j_9, j_8, j_7, kk / 2, prec)
return sumres
def gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None):
r"""
Calculate the Gaunt coefficient.
The Gaunt coefficient is defined as the integral over three
spherical harmonics:
.. math::
\begin{aligned}
\operatorname{Gaunt}(l_1,l_2,l_3,m_1,m_2,m_3)
&=\int Y_{l_1,m_1}(\Omega)
Y_{l_2,m_2}(\Omega) Y_{l_3,m_3}(\Omega) \,d\Omega \\
&=\sqrt{\frac{(2l_1+1)(2l_2+1)(2l_3+1)}{4\pi}}
\operatorname{Wigner3j}(l_1,l_2,l_3,0,0,0)
\operatorname{Wigner3j}(l_1,l_2,l_3,m_1,m_2,m_3)
\end{aligned}
INPUT:
- ``l_1``, ``l_2``, ``l_3``, ``m_1``, ``m_2``, ``m_3`` - integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
Examples
========
>>> from sympy.physics.wigner import gaunt
>>> gaunt(1,0,1,1,0,-1)
-1/(2*sqrt(pi))
>>> gaunt(1000,1000,1200,9,3,-12).n(64)
0.00689500421922113448...
It is an error to use non-integer values for `l` and `m`::
sage: gaunt(1.2,0,1.2,0,0,0)
Traceback (most recent call last):
...
ValueError: l values must be integer
sage: gaunt(1,0,1,1.1,0,-1.1)
Traceback (most recent call last):
...
ValueError: m values must be integer
NOTES:
The Gaunt coefficient obeys the following symmetry rules:
- invariant under any permutation of the columns
.. math::
\begin{aligned}
Y(l_1,l_2,l_3,m_1,m_2,m_3)
&=Y(l_3,l_1,l_2,m_3,m_1,m_2) \\
&=Y(l_2,l_3,l_1,m_2,m_3,m_1) \\
&=Y(l_3,l_2,l_1,m_3,m_2,m_1) \\
&=Y(l_1,l_3,l_2,m_1,m_3,m_2) \\
&=Y(l_2,l_1,l_3,m_2,m_1,m_3)
\end{aligned}
- invariant under space inflection, i.e.
.. math::
Y(l_1,l_2,l_3,m_1,m_2,m_3)
=Y(l_1,l_2,l_3,-m_1,-m_2,-m_3)
- symmetric with respect to the 72 Regge symmetries as inherited
for the `3j` symbols [Regge58]_
- zero for `l_1`, `l_2`, `l_3` not fulfilling triangle relation
- zero for violating any one of the conditions: `l_1 \ge |m_1|`,
`l_2 \ge |m_2|`, `l_3 \ge |m_3|`
- non-zero only for an even sum of the `l_i`, i.e.
`L = l_1 + l_2 + l_3 = 2n` for `n` in `\mathbb{N}`
ALGORITHM:
This function uses the algorithm of [Liberatodebrito82]_ to
calculate the value of the Gaunt coefficient exactly. Note that
the formula contains alternating sums over large factorials and is
therefore unsuitable for finite precision arithmetic and only
useful for a computer algebra system [Rasch03]_.
AUTHORS:
- Jens Rasch (2009-03-24): initial version for Sage
"""
if int(l_1) != l_1 or int(l_2) != l_2 or int(l_3) != l_3:
raise ValueError("l values must be integer")
if int(m_1) != m_1 or int(m_2) != m_2 or int(m_3) != m_3:
raise ValueError("m values must be integer")
sumL = l_1 + l_2 + l_3
bigL = sumL // 2
a1 = l_1 + l_2 - l_3
if a1 < 0:
return 0
a2 = l_1 - l_2 + l_3
if a2 < 0:
return 0
a3 = -l_1 + l_2 + l_3
if a3 < 0:
return 0
if sumL % 2:
return 0
if (m_1 + m_2 + m_3) != 0:
return 0
if (abs(m_1) > l_1) or (abs(m_2) > l_2) or (abs(m_3) > l_3):
return 0
imin = max(-l_3 + l_1 + m_2, -l_3 + l_2 - m_1, 0)
imax = min(l_2 + m_2, l_1 - m_1, l_1 + l_2 - l_3)
maxfact = max(l_1 + l_2 + l_3 + 1, imax + 1)
_calc_factlist(maxfact)
argsqrt = (2 * l_1 + 1) * (2 * l_2 + 1) * (2 * l_3 + 1) * \
_Factlist[l_1 - m_1] * _Factlist[l_1 + m_1] * _Factlist[l_2 - m_2] * \
_Factlist[l_2 + m_2] * _Factlist[l_3 - m_3] * _Factlist[l_3 + m_3] / \
(4*pi)
ressqrt = sqrt(argsqrt)
prefac = Integer(_Factlist[bigL] * _Factlist[l_2 - l_1 + l_3] *
_Factlist[l_1 - l_2 + l_3] * _Factlist[l_1 + l_2 - l_3])/ \
_Factlist[2 * bigL + 1]/ \
(_Factlist[bigL - l_1] *
_Factlist[bigL - l_2] * _Factlist[bigL - l_3])
sumres = 0
for ii in range(int(imin), int(imax) + 1):
den = _Factlist[ii] * _Factlist[ii + l_3 - l_1 - m_2] * \
_Factlist[l_2 + m_2 - ii] * _Factlist[l_1 - ii - m_1] * \
_Factlist[ii + l_3 - l_2 + m_1] * _Factlist[l_1 + l_2 - l_3 - ii]
sumres = sumres + Integer((-1) ** ii) / den
res = ressqrt * prefac * sumres * Integer((-1) ** (bigL + l_3 + m_1 - m_2))
if prec is not None:
res = res.n(prec)
return res
class Wigner3j(Function):
def doit(self, **hints):
if all(obj.is_number for obj in self.args):
return wigner_3j(*self.args)
else:
return self
def dot_rot_grad_Ynm(j, p, l, m, theta, phi):
r"""
Returns dot product of rotational gradients of spherical harmonics.
This function returns the right hand side of the following expression:
.. math ::
\vec{R}Y{_j^{p}} \cdot \vec{R}Y{_l^{m}} = (-1)^{m+p}
\sum\limits_{k=|l-j|}^{l+j}Y{_k^{m+p}} * \alpha_{l,m,j,p,k} *
\frac{1}{2} (k^2-j^2-l^2+k-j-l)
Arguments
=========
j, p, l, m .... indices in spherical harmonics (expressions or integers)
theta, phi .... angle arguments in spherical harmonics
Example
=======
>>> from sympy import symbols
>>> from sympy.physics.wigner import dot_rot_grad_Ynm
>>> theta, phi = symbols("theta phi")
>>> dot_rot_grad_Ynm(3, 2, 2, 0, theta, phi).doit()
3*sqrt(55)*Ynm(5, 2, theta, phi)/(11*sqrt(pi))
"""
j = sympify(j)
p = sympify(p)
l = sympify(l)
m = sympify(m)
theta = sympify(theta)
phi = sympify(phi)
k = Dummy("k")
def alpha(l,m,j,p,k):
return sqrt((2*l+1)*(2*j+1)*(2*k+1)/(4*pi)) * \
Wigner3j(j, l, k, S.Zero, S.Zero, S.Zero) * \
Wigner3j(j, l, k, p, m, -m-p)
return (S.NegativeOne)**(m+p) * Sum(Ynm(k, m+p, theta, phi) * alpha(l,m,j,p,k) / 2 \
*(k**2-j**2-l**2+k-j-l), (k, abs(l-j), l+j))
def wigner_d_small(J, beta):
u"""Return the small Wigner d matrix for angular momentum J.
INPUT:
- ``J`` - An integer, half-integer, or sympy symbol for the total angular
momentum of the angular momentum space being rotated.
- ``beta`` - A real number representing the Euler angle of rotation about
the so-called line of nodes. See [Edmonds74]_.
OUTPUT:
A matrix representing the corresponding Euler angle rotation( in the basis
of eigenvectors of `J_z`).
.. math ::
\\mathcal{d}_{\\beta} = \\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big)
The components are calculated using the general form [Edmonds74]_,
equation 4.1.15.
Examples
========
>>> from sympy import Integer, symbols, pi, pprint
>>> from sympy.physics.wigner import wigner_d_small
>>> half = 1/Integer(2)
>>> beta = symbols("beta", real=True)
>>> pprint(wigner_d_small(half, beta), use_unicode=True)
⎡ ⎛β⎞ ⎛β⎞⎤
⎢cos⎜─⎟ sin⎜─⎟⎥
⎢ ⎝2⎠ ⎝2⎠⎥
⎢ ⎥
⎢ ⎛β⎞ ⎛β⎞⎥
⎢-sin⎜─⎟ cos⎜─⎟⎥
⎣ ⎝2⎠ ⎝2⎠⎦
>>> pprint(wigner_d_small(2*half, beta), use_unicode=True)
⎡ 2⎛β⎞ ⎛β⎞ ⎛β⎞ 2⎛β⎞ ⎤
⎢ cos ⎜─⎟ √2⋅sin⎜─⎟⋅cos⎜─⎟ sin ⎜─⎟ ⎥
⎢ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎥
⎢ ⎥
⎢ ⎛β⎞ ⎛β⎞ 2⎛β⎞ 2⎛β⎞ ⎛β⎞ ⎛β⎞⎥
⎢-√2⋅sin⎜─⎟⋅cos⎜─⎟ - sin ⎜─⎟ + cos ⎜─⎟ √2⋅sin⎜─⎟⋅cos⎜─⎟⎥
⎢ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠⎥
⎢ ⎥
⎢ 2⎛β⎞ ⎛β⎞ ⎛β⎞ 2⎛β⎞ ⎥
⎢ sin ⎜─⎟ -√2⋅sin⎜─⎟⋅cos⎜─⎟ cos ⎜─⎟ ⎥
⎣ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎝2⎠ ⎦
From table 4 in [Edmonds74]_
>>> pprint(wigner_d_small(half, beta).subs({beta:pi/2}), use_unicode=True)
⎡ √2 √2⎤
⎢ ── ──⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢-√2 √2⎥
⎢──── ──⎥
⎣ 2 2 ⎦
>>> pprint(wigner_d_small(2*half, beta).subs({beta:pi/2}),
... use_unicode=True)
⎡ √2 ⎤
⎢1/2 ── 1/2⎥
⎢ 2 ⎥
⎢ ⎥
⎢-√2 √2 ⎥
⎢──── 0 ── ⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢ -√2 ⎥
⎢1/2 ──── 1/2⎥
⎣ 2 ⎦
>>> pprint(wigner_d_small(3*half, beta).subs({beta:pi/2}),
... use_unicode=True)
⎡ √2 √6 √6 √2⎤
⎢ ── ── ── ──⎥
⎢ 4 4 4 4 ⎥
⎢ ⎥
⎢-√6 -√2 √2 √6⎥
⎢──── ──── ── ──⎥
⎢ 4 4 4 4 ⎥
⎢ ⎥
⎢ √6 -√2 -√2 √6⎥
⎢ ── ──── ──── ──⎥
⎢ 4 4 4 4 ⎥
⎢ ⎥
⎢-√2 √6 -√6 √2⎥
⎢──── ── ──── ──⎥
⎣ 4 4 4 4 ⎦
>>> pprint(wigner_d_small(4*half, beta).subs({beta:pi/2}),
... use_unicode=True)
⎡ √6 ⎤
⎢1/4 1/2 ── 1/2 1/4⎥
⎢ 4 ⎥
⎢ ⎥
⎢-1/2 -1/2 0 1/2 1/2⎥
⎢ ⎥
⎢ √6 √6 ⎥
⎢ ── 0 -1/2 0 ── ⎥
⎢ 4 4 ⎥
⎢ ⎥
⎢-1/2 1/2 0 -1/2 1/2⎥
⎢ ⎥
⎢ √6 ⎥
⎢1/4 -1/2 ── -1/2 1/4⎥
⎣ 4 ⎦
"""
M = [J-i for i in range(2*J+1)]
d = zeros(2*J+1)
for i, Mi in enumerate(M):
for j, Mj in enumerate(M):
# We get the maximum and minimum value of sigma.
sigmamax = max([-Mi-Mj, J-Mj])
sigmamin = min([0, J-Mi])
dij = sqrt(factorial(J+Mi)*factorial(J-Mi) /
factorial(J+Mj)/factorial(J-Mj))
terms = [(-1)**(J-Mi-s) *
binomial(J+Mj, J-Mi-s) *
binomial(J-Mj, s) *
cos(beta/2)**(2*s+Mi+Mj) *
sin(beta/2)**(2*J-2*s-Mj-Mi)
for s in range(sigmamin, sigmamax+1)]
d[i, j] = dij*Add(*terms)
return ImmutableMatrix(d)
def wigner_d(J, alpha, beta, gamma):
u"""Return the Wigner D matrix for angular momentum J.
INPUT:
- ``J`` - An integer, half-integer, or sympy symbol for the total angular
momentum of the angular momentum space being rotated.
- ``alpha``, ``beta``, ``gamma`` - Real numbers representing the Euler
angles of rotation about the so-called vertical, line of nodes, and
figure axes. See [Edmonds74]_.
OUTPUT:
A matrix representing the corresponding Euler angle rotation( in the basis
of eigenvectors of `J_z`).
.. math ::
\\mathcal{D}_{\\alpha \\beta \\gamma} =
\\exp\\big( \\frac{i\\alpha}{\\hbar} J_z\\big)
\\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big)
\\exp\\big( \\frac{i\\gamma}{\\hbar} J_z\\big)
The components are calculated using the general form [Edmonds74]_,
equation 4.1.12.
Examples
========
The simplest possible example:
>>> from sympy.physics.wigner import wigner_d
>>> from sympy import Integer, symbols, pprint
>>> from sympy.physics.wigner import wigner_d_small
>>> half = 1/Integer(2)
>>> alpha, beta, gamma = symbols("alpha, beta, gamma", real=True)
>>> pprint(wigner_d(half, alpha, beta, gamma), use_unicode=True)
⎡ ⅈ⋅α ⅈ⋅γ ⅈ⋅α -ⅈ⋅γ ⎤
⎢ ─── ─── ─── ───── ⎥
⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞ ⎥
⎢ ℯ ⋅ℯ ⋅cos⎜─⎟ ℯ ⋅ℯ ⋅sin⎜─⎟ ⎥
⎢ ⎝2⎠ ⎝2⎠ ⎥
⎢ ⎥
⎢ -ⅈ⋅α ⅈ⋅γ -ⅈ⋅α -ⅈ⋅γ ⎥
⎢ ───── ─── ───── ───── ⎥
⎢ 2 2 ⎛β⎞ 2 2 ⎛β⎞⎥
⎢-ℯ ⋅ℯ ⋅sin⎜─⎟ ℯ ⋅ℯ ⋅cos⎜─⎟⎥
⎣ ⎝2⎠ ⎝2⎠⎦
"""
d = wigner_d_small(J, beta)
M = [J-i for i in range(2*J+1)]
D = [[exp(I*Mi*alpha)*d[i, j]*exp(I*Mj*gamma)
for j, Mj in enumerate(M)] for i, Mi in enumerate(M)]
return ImmutableMatrix(D)
|
ee35fe60f1c97a4238a4718b44de1a361d2a5fdaf8e60c9affb596ae04458e4a | """
This module implements Pauli algebra by subclassing Symbol. Only algebraic
properties of Pauli matrices are used (we don't use the Matrix class).
See the documentation to the class Pauli for examples.
References
~~~~~~~~~~
.. [1] https://en.wikipedia.org/wiki/Pauli_matrices
"""
from __future__ import print_function, division
from sympy import Symbol, I, Mul, Pow, Add
from sympy.physics.quantum import TensorProduct
__all__ = ['evaluate_pauli_product']
def delta(i, j):
"""
Returns 1 if i == j, else 0.
This is used in the multiplication of Pauli matrices.
Examples
========
>>> from sympy.physics.paulialgebra import delta
>>> delta(1, 1)
1
>>> delta(2, 3)
0
"""
if i == j:
return 1
else:
return 0
def epsilon(i, j, k):
"""
Return 1 if i,j,k is equal to (1,2,3), (2,3,1), or (3,1,2);
-1 if i,j,k is equal to (1,3,2), (3,2,1), or (2,1,3);
else return 0.
This is used in the multiplication of Pauli matrices.
Examples
========
>>> from sympy.physics.paulialgebra import epsilon
>>> epsilon(1, 2, 3)
1
>>> epsilon(1, 3, 2)
-1
"""
if (i, j, k) in [(1, 2, 3), (2, 3, 1), (3, 1, 2)]:
return 1
elif (i, j, k) in [(1, 3, 2), (3, 2, 1), (2, 1, 3)]:
return -1
else:
return 0
class Pauli(Symbol):
"""
The class representing algebraic properties of Pauli matrices.
The symbol used to display the Pauli matrices can be changed with an
optional parameter ``label="sigma"``. Pauli matrices with different
``label`` attributes cannot multiply together.
If the left multiplication of symbol or number with Pauli matrix is needed,
please use parentheses to separate Pauli and symbolic multiplication
(for example: 2*I*(Pauli(3)*Pauli(2))).
Another variant is to use evaluate_pauli_product function to evaluate
the product of Pauli matrices and other symbols (with commutative
multiply rules).
See Also
========
evaluate_pauli_product
Examples
========
>>> from sympy.physics.paulialgebra import Pauli
>>> Pauli(1)
sigma1
>>> Pauli(1)*Pauli(2)
I*sigma3
>>> Pauli(1)*Pauli(1)
1
>>> Pauli(3)**4
1
>>> Pauli(1)*Pauli(2)*Pauli(3)
I
>>> from sympy.physics.paulialgebra import Pauli
>>> Pauli(1, label="tau")
tau1
>>> Pauli(1)*Pauli(2, label="tau")
sigma1*tau2
>>> Pauli(1, label="tau")*Pauli(2, label="tau")
I*tau3
>>> from sympy import I
>>> I*(Pauli(2)*Pauli(3))
-sigma1
>>> from sympy.physics.paulialgebra import evaluate_pauli_product
>>> f = I*Pauli(2)*Pauli(3)
>>> f
I*sigma2*sigma3
>>> evaluate_pauli_product(f)
-sigma1
"""
__slots__ = ("i", "label")
def __new__(cls, i, label="sigma"):
if not i in [1, 2, 3]:
raise IndexError("Invalid Pauli index")
obj = Symbol.__new__(cls, "%s%d" %(label,i), commutative=False, hermitian=True)
obj.i = i
obj.label = label
return obj
def __getnewargs__(self):
return (self.i,self.label,)
# FIXME don't work for -I*Pauli(2)*Pauli(3)
def __mul__(self, other):
if isinstance(other, Pauli):
j = self.i
k = other.i
jlab = self.label
klab = other.label
if jlab == klab:
return delta(j, k) \
+ I*epsilon(j, k, 1)*Pauli(1,jlab) \
+ I*epsilon(j, k, 2)*Pauli(2,jlab) \
+ I*epsilon(j, k, 3)*Pauli(3,jlab)
return super(Pauli, self).__mul__(other)
def _eval_power(b, e):
if e.is_Integer and e.is_positive:
return super(Pauli, b).__pow__(int(e) % 2)
def evaluate_pauli_product(arg):
'''Help function to evaluate Pauli matrices product
with symbolic objects
Parameters
==========
arg: symbolic expression that contains Paulimatrices
Examples
========
>>> from sympy.physics.paulialgebra import Pauli, evaluate_pauli_product
>>> from sympy import I
>>> evaluate_pauli_product(I*Pauli(1)*Pauli(2))
-sigma3
>>> from sympy.abc import x,y
>>> evaluate_pauli_product(x**2*Pauli(2)*Pauli(1))
-I*x**2*sigma3
'''
start = arg
end = arg
if isinstance(arg, Pow) and isinstance(arg.args[0], Pauli):
if arg.args[1].is_odd:
return arg.args[0]
else:
return 1
if isinstance(arg, Add):
return Add(*[evaluate_pauli_product(part) for part in arg.args])
if isinstance(arg, TensorProduct):
return TensorProduct(*[evaluate_pauli_product(part) for part in arg.args])
elif not(isinstance(arg, Mul)):
return arg
while ((not(start == end)) | ((start == arg) & (end == arg))):
start = end
tmp = start.as_coeff_mul()
sigma_product = 1
com_product = 1
keeper = 1
for el in tmp[1]:
if isinstance(el, Pauli):
sigma_product *= el
elif not(el.is_commutative):
if isinstance(el, Pow) and isinstance(el.args[0], Pauli):
if el.args[1].is_odd:
sigma_product *= el.args[0]
elif isinstance(el, TensorProduct):
keeper = keeper*sigma_product*\
TensorProduct(
*[evaluate_pauli_product(part) for part in el.args]
)
sigma_product = 1
else:
keeper = keeper*sigma_product*el
sigma_product = 1
else:
com_product *= el
end = (tmp[0]*keeper*sigma_product*com_product)
if end == arg: break
return end
|
4ee1af6d82eb7ea666d1730535e1ca824252ed785736ac3da58522beeb0e15bb | from __future__ import print_function, division
from sympy.core import S, pi, Rational
from sympy.functions import assoc_laguerre, sqrt, exp, factorial, factorial2
def R_nl(n, l, nu, r):
"""
Returns the radial wavefunction R_{nl} for a 3d isotropic harmonic
oscillator.
``n``
the "nodal" quantum number. Corresponds to the number of nodes in
the wavefunction. n >= 0
``l``
the quantum number for orbital angular momentum
``nu``
mass-scaled frequency: nu = m*omega/(2*hbar) where `m` is the mass
and `omega` the frequency of the oscillator.
(in atomic units nu == omega/2)
``r``
Radial coordinate
Examples
========
>>> from sympy.physics.sho import R_nl
>>> from sympy import var
>>> var("r nu l")
(r, nu, l)
>>> R_nl(0, 0, 1, r)
2*2**(3/4)*exp(-r**2)/pi**(1/4)
>>> R_nl(1, 0, 1, r)
4*2**(1/4)*sqrt(3)*(3/2 - 2*r**2)*exp(-r**2)/(3*pi**(1/4))
l, nu and r may be symbolic:
>>> R_nl(0, 0, nu, r)
2*2**(3/4)*sqrt(nu**(3/2))*exp(-nu*r**2)/pi**(1/4)
>>> R_nl(0, l, 1, r)
r**l*sqrt(2**(l + 3/2)*2**(l + 2)/factorial2(2*l + 1))*exp(-r**2)/pi**(1/4)
The normalization of the radial wavefunction is:
>>> from sympy import Integral, oo
>>> Integral(R_nl(0, 0, 1, r)**2*r**2, (r, 0, oo)).n()
1.00000000000000
>>> Integral(R_nl(1, 0, 1, r)**2*r**2, (r, 0, oo)).n()
1.00000000000000
>>> Integral(R_nl(1, 1, 1, r)**2*r**2, (r, 0, oo)).n()
1.00000000000000
"""
n, l, nu, r = map(S, [n, l, nu, r])
# formula uses n >= 1 (instead of nodal n >= 0)
n = n + 1
C = sqrt(
((2*nu)**(l + Rational(3, 2))*2**(n + l + 1)*factorial(n - 1))/
(sqrt(pi)*(factorial2(2*n + 2*l - 1)))
)
return C*r**(l)*exp(-nu*r**2)*assoc_laguerre(n - 1, l + S.Half, 2*nu*r**2)
def E_nl(n, l, hw):
"""
Returns the Energy of an isotropic harmonic oscillator
``n``
the "nodal" quantum number
``l``
the orbital angular momentum
``hw``
the harmonic oscillator parameter.
The unit of the returned value matches the unit of hw, since the energy is
calculated as:
E_nl = (2*n + l + 3/2)*hw
Examples
========
>>> from sympy.physics.sho import E_nl
>>> from sympy import symbols
>>> x, y, z = symbols('x, y, z')
>>> E_nl(x, y, z)
z*(2*x + y + 3/2)
"""
return (2*n + l + Rational(3, 2))*hw
|
5b1d1f85c5f359d866aae0008b6fbdf71f449c549754196cab071d844e44c2fb | """Known matrices related to physics"""
from __future__ import print_function, division
from sympy import Matrix, I, pi, sqrt
from sympy.functions import exp
def msigma(i):
r"""Returns a Pauli matrix `\sigma_i` with `i=1,2,3`
References
==========
.. [1] https://en.wikipedia.org/wiki/Pauli_matrices
Examples
========
>>> from sympy.physics.matrices import msigma
>>> msigma(1)
Matrix([
[0, 1],
[1, 0]])
"""
if i == 1:
mat = ( (
(0, 1),
(1, 0)
) )
elif i == 2:
mat = ( (
(0, -I),
(I, 0)
) )
elif i == 3:
mat = ( (
(1, 0),
(0, -1)
) )
else:
raise IndexError("Invalid Pauli index")
return Matrix(mat)
def pat_matrix(m, dx, dy, dz):
"""Returns the Parallel Axis Theorem matrix to translate the inertia
matrix a distance of `(dx, dy, dz)` for a body of mass m.
Examples
========
To translate a body having a mass of 2 units a distance of 1 unit along
the `x`-axis we get:
>>> from sympy.physics.matrices import pat_matrix
>>> pat_matrix(2, 1, 0, 0)
Matrix([
[0, 0, 0],
[0, 2, 0],
[0, 0, 2]])
"""
dxdy = -dx*dy
dydz = -dy*dz
dzdx = -dz*dx
dxdx = dx**2
dydy = dy**2
dzdz = dz**2
mat = ((dydy + dzdz, dxdy, dzdx),
(dxdy, dxdx + dzdz, dydz),
(dzdx, dydz, dydy + dxdx))
return m*Matrix(mat)
def mgamma(mu, lower=False):
r"""Returns a Dirac gamma matrix `\gamma^\mu` in the standard
(Dirac) representation.
If you want `\gamma_\mu`, use ``gamma(mu, True)``.
We use a convention:
`\gamma^5 = i \cdot \gamma^0 \cdot \gamma^1 \cdot \gamma^2 \cdot \gamma^3`
`\gamma_5 = i \cdot \gamma_0 \cdot \gamma_1 \cdot \gamma_2 \cdot \gamma_3 = - \gamma^5`
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_matrices
Examples
========
>>> from sympy.physics.matrices import mgamma
>>> mgamma(1)
Matrix([
[ 0, 0, 0, 1],
[ 0, 0, 1, 0],
[ 0, -1, 0, 0],
[-1, 0, 0, 0]])
"""
if not mu in [0, 1, 2, 3, 5]:
raise IndexError("Invalid Dirac index")
if mu == 0:
mat = (
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
)
elif mu == 1:
mat = (
(0, 0, 0, 1),
(0, 0, 1, 0),
(0, -1, 0, 0),
(-1, 0, 0, 0)
)
elif mu == 2:
mat = (
(0, 0, 0, -I),
(0, 0, I, 0),
(0, I, 0, 0),
(-I, 0, 0, 0)
)
elif mu == 3:
mat = (
(0, 0, 1, 0),
(0, 0, 0, -1),
(-1, 0, 0, 0),
(0, 1, 0, 0)
)
elif mu == 5:
mat = (
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 0, 0, 0),
(0, 1, 0, 0)
)
m = Matrix(mat)
if lower:
if mu in [1, 2, 3, 5]:
m = -m
return m
#Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field
#Theory
minkowski_tensor = Matrix( (
(1, 0, 0, 0),
(0, -1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
))
def mdft(n):
r"""
Returns an expression of a discrete Fourier transform as a matrix multiplication.
It is an n X n matrix.
References
==========
.. [1] https://en.wikipedia.org/wiki/DFT_matrix
Examples
========
>>> from sympy.physics.matrices import mdft
>>> mdft(3)
Matrix([
[sqrt(3)/3, sqrt(3)/3, sqrt(3)/3],
[sqrt(3)/3, sqrt(3)*exp(-2*I*pi/3)/3, sqrt(3)*exp(2*I*pi/3)/3],
[sqrt(3)/3, sqrt(3)*exp(2*I*pi/3)/3, sqrt(3)*exp(-2*I*pi/3)/3]])
"""
mat = [[None for x in range(n)] for y in range(n)]
base = exp(-2*pi*I/n)
mat[0] = [1]*n
for i in range(n):
mat[i][0] = 1
for i in range(1, n):
for j in range(i, n):
mat[i][j] = mat[j][i] = base**(i*j)
return (1/sqrt(n))*Matrix(mat)
|
69748e006f794510de45f08dc925843a019f5d1d3bf34e4b82874b95ce9b374f | """
Second quantization operators and states for bosons.
This follow the formulation of Fetter and Welecka, "Quantum Theory
of Many-Particle Systems."
"""
from __future__ import print_function, division
from collections import defaultdict
from sympy import (Add, Basic, cacheit, Dummy, Expr, Function, I,
KroneckerDelta, Mul, Pow, S, sqrt, Symbol, sympify, Tuple,
zeros)
from sympy.printing.str import StrPrinter
from sympy.utilities.iterables import has_dups
from sympy.utilities import default_sort_key
__all__ = [
'Dagger',
'KroneckerDelta',
'BosonicOperator',
'AnnihilateBoson',
'CreateBoson',
'AnnihilateFermion',
'CreateFermion',
'FockState',
'FockStateBra',
'FockStateKet',
'FockStateBosonKet',
'FockStateBosonBra',
'FockStateFermionKet',
'FockStateFermionBra',
'BBra',
'BKet',
'FBra',
'FKet',
'F',
'Fd',
'B',
'Bd',
'apply_operators',
'InnerProduct',
'BosonicBasis',
'VarBosonicBasis',
'FixedBosonicBasis',
'Commutator',
'matrix_rep',
'contraction',
'wicks',
'NO',
'evaluate_deltas',
'AntiSymmetricTensor',
'substitute_dummies',
'PermutationOperator',
'simplify_index_permutations',
]
class SecondQuantizationError(Exception):
pass
class AppliesOnlyToSymbolicIndex(SecondQuantizationError):
pass
class ContractionAppliesOnlyToFermions(SecondQuantizationError):
pass
class ViolationOfPauliPrinciple(SecondQuantizationError):
pass
class SubstitutionOfAmbigousOperatorFailed(SecondQuantizationError):
pass
class WicksTheoremDoesNotApply(SecondQuantizationError):
pass
class Dagger(Expr):
"""
Hermitian conjugate of creation/annihilation operators.
Examples
========
>>> from sympy import I
>>> from sympy.physics.secondquant import Dagger, B, Bd
>>> Dagger(2*I)
-2*I
>>> Dagger(B(0))
CreateBoson(0)
>>> Dagger(Bd(0))
AnnihilateBoson(0)
"""
def __new__(cls, arg):
arg = sympify(arg)
r = cls.eval(arg)
if isinstance(r, Basic):
return r
obj = Basic.__new__(cls, arg)
return obj
@classmethod
def eval(cls, arg):
"""
Evaluates the Dagger instance.
Examples
========
>>> from sympy import I
>>> from sympy.physics.secondquant import Dagger, B, Bd
>>> Dagger(2*I)
-2*I
>>> Dagger(B(0))
CreateBoson(0)
>>> Dagger(Bd(0))
AnnihilateBoson(0)
The eval() method is called automatically.
"""
dagger = getattr(arg, '_dagger_', None)
if dagger is not None:
return dagger()
if isinstance(arg, Basic):
if arg.is_Add:
return Add(*tuple(map(Dagger, arg.args)))
if arg.is_Mul:
return Mul(*tuple(map(Dagger, reversed(arg.args))))
if arg.is_Number:
return arg
if arg.is_Pow:
return Pow(Dagger(arg.args[0]), arg.args[1])
if arg == I:
return -arg
else:
return None
def _dagger_(self):
return self.args[0]
class TensorSymbol(Expr):
is_commutative = True
class AntiSymmetricTensor(TensorSymbol):
"""Stores upper and lower indices in separate Tuple's.
Each group of indices is assumed to be antisymmetric.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (i, a), (b, j))
-AntiSymmetricTensor(v, (a, i), (b, j))
As you can see, the indices are automatically sorted to a canonical form.
"""
def __new__(cls, symbol, upper, lower):
try:
upper, signu = _sort_anticommuting_fermions(
upper, key=cls._sortkey)
lower, signl = _sort_anticommuting_fermions(
lower, key=cls._sortkey)
except ViolationOfPauliPrinciple:
return S.Zero
symbol = sympify(symbol)
upper = Tuple(*upper)
lower = Tuple(*lower)
if (signu + signl) % 2:
return -TensorSymbol.__new__(cls, symbol, upper, lower)
else:
return TensorSymbol.__new__(cls, symbol, upper, lower)
@classmethod
def _sortkey(cls, index):
"""Key for sorting of indices.
particle < hole < general
FIXME: This is a bottle-neck, can we do it faster?
"""
h = hash(index)
label = str(index)
if isinstance(index, Dummy):
if index.assumptions0.get('above_fermi'):
return (20, label, h)
elif index.assumptions0.get('below_fermi'):
return (21, label, h)
else:
return (22, label, h)
if index.assumptions0.get('above_fermi'):
return (10, label, h)
elif index.assumptions0.get('below_fermi'):
return (11, label, h)
else:
return (12, label, h)
def _latex(self, printer):
return "%s^{%s}_{%s}" % (
self.symbol,
"".join([ i.name for i in self.args[1]]),
"".join([ i.name for i in self.args[2]])
)
@property
def symbol(self):
"""
Returns the symbol of the tensor.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (a, i), (b, j)).symbol
v
"""
return self.args[0]
@property
def upper(self):
"""
Returns the upper indices.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (a, i), (b, j)).upper
(a, i)
"""
return self.args[1]
@property
def lower(self):
"""
Returns the lower indices.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (a, i), (b, j)).lower
(b, j)
"""
return self.args[2]
def __str__(self):
return "%s(%s,%s)" % self.args
def doit(self, **kw_args):
"""
Returns self.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j)).doit()
AntiSymmetricTensor(v, (a, i), (b, j))
"""
return self
class SqOperator(Expr):
"""
Base class for Second Quantization operators.
"""
op_symbol = 'sq'
is_commutative = False
def __new__(cls, k):
obj = Basic.__new__(cls, sympify(k))
return obj
@property
def state(self):
"""
Returns the state index related to this operator.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F, Fd, B, Bd
>>> p = Symbol('p')
>>> F(p).state
p
>>> Fd(p).state
p
>>> B(p).state
p
>>> Bd(p).state
p
"""
return self.args[0]
@property
def is_symbolic(self):
"""
Returns True if the state is a symbol (as opposed to a number).
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> p = Symbol('p')
>>> F(p).is_symbolic
True
>>> F(1).is_symbolic
False
"""
if self.state.is_Integer:
return False
else:
return True
def doit(self, **kw_args):
"""
FIXME: hack to prevent crash further up...
"""
return self
def __repr__(self):
return NotImplemented
def __str__(self):
return "%s(%r)" % (self.op_symbol, self.state)
def apply_operator(self, state):
"""
Applies an operator to itself.
"""
raise NotImplementedError('implement apply_operator in a subclass')
class BosonicOperator(SqOperator):
pass
class Annihilator(SqOperator):
pass
class Creator(SqOperator):
pass
class AnnihilateBoson(BosonicOperator, Annihilator):
"""
Bosonic annihilation operator.
Examples
========
>>> from sympy.physics.secondquant import B
>>> from sympy.abc import x
>>> B(x)
AnnihilateBoson(x)
"""
op_symbol = 'b'
def _dagger_(self):
return CreateBoson(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, BKet
>>> from sympy.abc import x, y, n
>>> B(x).apply_operator(y)
y*AnnihilateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if not self.is_symbolic and isinstance(state, FockStateKet):
element = self.state
amp = sqrt(state[element])
return amp*state.down(element)
else:
return Mul(self, state)
def __repr__(self):
return "AnnihilateBoson(%s)" % self.state
def _latex(self, printer):
return "b_{%s}" % self.state.name
class CreateBoson(BosonicOperator, Creator):
"""
Bosonic creation operator.
"""
op_symbol = 'b+'
def _dagger_(self):
return AnnihilateBoson(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if not self.is_symbolic and isinstance(state, FockStateKet):
element = self.state
amp = sqrt(state[element] + 1)
return amp*state.up(element)
else:
return Mul(self, state)
def __repr__(self):
return "CreateBoson(%s)" % self.state
def _latex(self, printer):
return "b^\\dagger_{%s}" % self.state.name
B = AnnihilateBoson
Bd = CreateBoson
class FermionicOperator(SqOperator):
@property
def is_restricted(self):
"""
Is this FermionicOperator restricted with respect to fermi level?
Return values:
1 : restricted to orbits above fermi
0 : no restriction
-1 : restricted to orbits below fermi
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F, Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_restricted
1
>>> Fd(a).is_restricted
1
>>> F(i).is_restricted
-1
>>> Fd(i).is_restricted
-1
>>> F(p).is_restricted
0
>>> Fd(p).is_restricted
0
"""
ass = self.args[0].assumptions0
if ass.get("below_fermi"):
return -1
if ass.get("above_fermi"):
return 1
return 0
@property
def is_above_fermi(self):
"""
Does the index of this FermionicOperator allow values above fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_above_fermi
True
>>> F(i).is_above_fermi
False
>>> F(p).is_above_fermi
True
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("below_fermi")
@property
def is_below_fermi(self):
"""
Does the index of this FermionicOperator allow values below fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_below_fermi
False
>>> F(i).is_below_fermi
True
>>> F(p).is_below_fermi
True
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("above_fermi")
@property
def is_only_below_fermi(self):
"""
Is the index of this FermionicOperator restricted to values below fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_below_fermi
False
>>> F(i).is_only_below_fermi
True
>>> F(p).is_only_below_fermi
False
The same applies to creation operators Fd
"""
return self.is_below_fermi and not self.is_above_fermi
@property
def is_only_above_fermi(self):
"""
Is the index of this FermionicOperator restricted to values above fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_above_fermi
True
>>> F(i).is_only_above_fermi
False
>>> F(p).is_only_above_fermi
False
The same applies to creation operators Fd
"""
return self.is_above_fermi and not self.is_below_fermi
def _sortkey(self):
h = hash(self)
label = str(self.args[0])
if self.is_only_q_creator:
return 1, label, h
if self.is_only_q_annihilator:
return 4, label, h
if isinstance(self, Annihilator):
return 3, label, h
if isinstance(self, Creator):
return 2, label, h
class AnnihilateFermion(FermionicOperator, Annihilator):
"""
Fermionic annihilation operator.
"""
op_symbol = 'f'
def _dagger_(self):
return CreateFermion(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if isinstance(state, FockStateFermionKet):
element = self.state
return state.down(element)
elif isinstance(state, Mul):
c_part, nc_part = state.args_cnc()
if isinstance(nc_part[0], FockStateFermionKet):
element = self.state
return Mul(*(c_part + [nc_part[0].down(element)] + nc_part[1:]))
else:
return Mul(self, state)
else:
return Mul(self, state)
@property
def is_q_creator(self):
"""
Can we create a quasi-particle? (create hole or create particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_q_creator
0
>>> F(i).is_q_creator
-1
>>> F(p).is_q_creator
-1
"""
if self.is_below_fermi:
return -1
return 0
@property
def is_q_annihilator(self):
"""
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=1)
>>> i = Symbol('i', below_fermi=1)
>>> p = Symbol('p')
>>> F(a).is_q_annihilator
1
>>> F(i).is_q_annihilator
0
>>> F(p).is_q_annihilator
1
"""
if self.is_above_fermi:
return 1
return 0
@property
def is_only_q_creator(self):
"""
Always create a quasi-particle? (create hole or create particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_q_creator
False
>>> F(i).is_only_q_creator
True
>>> F(p).is_only_q_creator
False
"""
return self.is_only_below_fermi
@property
def is_only_q_annihilator(self):
"""
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_q_annihilator
True
>>> F(i).is_only_q_annihilator
False
>>> F(p).is_only_q_annihilator
False
"""
return self.is_only_above_fermi
def __repr__(self):
return "AnnihilateFermion(%s)" % self.state
def _latex(self, printer):
return "a_{%s}" % self.state.name
class CreateFermion(FermionicOperator, Creator):
"""
Fermionic creation operator.
"""
op_symbol = 'f+'
def _dagger_(self):
return AnnihilateFermion(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if isinstance(state, FockStateFermionKet):
element = self.state
return state.up(element)
elif isinstance(state, Mul):
c_part, nc_part = state.args_cnc()
if isinstance(nc_part[0], FockStateFermionKet):
element = self.state
return Mul(*(c_part + [nc_part[0].up(element)] + nc_part[1:]))
return Mul(self, state)
@property
def is_q_creator(self):
"""
Can we create a quasi-particle? (create hole or create particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_q_creator
1
>>> Fd(i).is_q_creator
0
>>> Fd(p).is_q_creator
1
"""
if self.is_above_fermi:
return 1
return 0
@property
def is_q_annihilator(self):
"""
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=1)
>>> i = Symbol('i', below_fermi=1)
>>> p = Symbol('p')
>>> Fd(a).is_q_annihilator
0
>>> Fd(i).is_q_annihilator
-1
>>> Fd(p).is_q_annihilator
-1
"""
if self.is_below_fermi:
return -1
return 0
@property
def is_only_q_creator(self):
"""
Always create a quasi-particle? (create hole or create particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_creator
True
>>> Fd(i).is_only_q_creator
False
>>> Fd(p).is_only_q_creator
False
"""
return self.is_only_above_fermi
@property
def is_only_q_annihilator(self):
"""
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_annihilator
False
>>> Fd(i).is_only_q_annihilator
True
>>> Fd(p).is_only_q_annihilator
False
"""
return self.is_only_below_fermi
def __repr__(self):
return "CreateFermion(%s)" % self.state
def _latex(self, printer):
return "a^\\dagger_{%s}" % self.state.name
Fd = CreateFermion
F = AnnihilateFermion
class FockState(Expr):
"""
Many particle Fock state with a sequence of occupation numbers.
Anywhere you can have a FockState, you can also have S.Zero.
All code must check for this!
Base class to represent FockStates.
"""
is_commutative = False
def __new__(cls, occupations):
"""
occupations is a list with two possible meanings:
- For bosons it is a list of occupation numbers.
Element i is the number of particles in state i.
- For fermions it is a list of occupied orbits.
Element 0 is the state that was occupied first, element i
is the i'th occupied state.
"""
occupations = list(map(sympify, occupations))
obj = Basic.__new__(cls, Tuple(*occupations))
return obj
def __getitem__(self, i):
i = int(i)
return self.args[0][i]
def __repr__(self):
return ("FockState(%r)") % (self.args)
def __str__(self):
return "%s%r%s" % (self.lbracket, self._labels(), self.rbracket)
def _labels(self):
return self.args[0]
def __len__(self):
return len(self.args[0])
class BosonState(FockState):
"""
Base class for FockStateBoson(Ket/Bra).
"""
def up(self, i):
"""
Performs the action of a creation operator.
Examples
========
>>> from sympy.physics.secondquant import BBra
>>> b = BBra([1, 2])
>>> b
FockStateBosonBra((1, 2))
>>> b.up(1)
FockStateBosonBra((1, 3))
"""
i = int(i)
new_occs = list(self.args[0])
new_occs[i] = new_occs[i] + S.One
return self.__class__(new_occs)
def down(self, i):
"""
Performs the action of an annihilation operator.
Examples
========
>>> from sympy.physics.secondquant import BBra
>>> b = BBra([1, 2])
>>> b
FockStateBosonBra((1, 2))
>>> b.down(1)
FockStateBosonBra((1, 1))
"""
i = int(i)
new_occs = list(self.args[0])
if new_occs[i] == S.Zero:
return S.Zero
else:
new_occs[i] = new_occs[i] - S.One
return self.__class__(new_occs)
class FermionState(FockState):
"""
Base class for FockStateFermion(Ket/Bra).
"""
fermi_level = 0
def __new__(cls, occupations, fermi_level=0):
occupations = list(map(sympify, occupations))
if len(occupations) > 1:
try:
(occupations, sign) = _sort_anticommuting_fermions(
occupations, key=hash)
except ViolationOfPauliPrinciple:
return S.Zero
else:
sign = 0
cls.fermi_level = fermi_level
if cls._count_holes(occupations) > fermi_level:
return S.Zero
if sign % 2:
return S.NegativeOne*FockState.__new__(cls, occupations)
else:
return FockState.__new__(cls, occupations)
def up(self, i):
"""
Performs the action of a creation operator.
If below fermi we try to remove a hole,
if above fermi we try to create a particle.
if general index p we return Kronecker(p,i)*self
where i is a new symbol with restriction above or below.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import FKet
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> FKet([]).up(a)
FockStateFermionKet((a,))
A creator acting on vacuum below fermi vanishes
>>> FKet([]).up(i)
0
"""
present = i in self.args[0]
if self._only_above_fermi(i):
if present:
return S.Zero
else:
return self._add_orbit(i)
elif self._only_below_fermi(i):
if present:
return self._remove_orbit(i)
else:
return S.Zero
else:
if present:
hole = Dummy("i", below_fermi=True)
return KroneckerDelta(i, hole)*self._remove_orbit(i)
else:
particle = Dummy("a", above_fermi=True)
return KroneckerDelta(i, particle)*self._add_orbit(i)
def down(self, i):
"""
Performs the action of an annihilation operator.
If below fermi we try to create a hole,
if above fermi we try to remove a particle.
if general index p we return Kronecker(p,i)*self
where i is a new symbol with restriction above or below.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import FKet
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
An annihilator acting on vacuum above fermi vanishes
>>> FKet([]).down(a)
0
Also below fermi, it vanishes, unless we specify a fermi level > 0
>>> FKet([]).down(i)
0
>>> FKet([],4).down(i)
FockStateFermionKet((i,))
"""
present = i in self.args[0]
if self._only_above_fermi(i):
if present:
return self._remove_orbit(i)
else:
return S.Zero
elif self._only_below_fermi(i):
if present:
return S.Zero
else:
return self._add_orbit(i)
else:
if present:
hole = Dummy("i", below_fermi=True)
return KroneckerDelta(i, hole)*self._add_orbit(i)
else:
particle = Dummy("a", above_fermi=True)
return KroneckerDelta(i, particle)*self._remove_orbit(i)
@classmethod
def _only_below_fermi(cls, i):
"""
Tests if given orbit is only below fermi surface.
If nothing can be concluded we return a conservative False.
"""
if i.is_number:
return i <= cls.fermi_level
if i.assumptions0.get('below_fermi'):
return True
return False
@classmethod
def _only_above_fermi(cls, i):
"""
Tests if given orbit is only above fermi surface.
If fermi level has not been set we return True.
If nothing can be concluded we return a conservative False.
"""
if i.is_number:
return i > cls.fermi_level
if i.assumptions0.get('above_fermi'):
return True
return not cls.fermi_level
def _remove_orbit(self, i):
"""
Removes particle/fills hole in orbit i. No input tests performed here.
"""
new_occs = list(self.args[0])
pos = new_occs.index(i)
del new_occs[pos]
if (pos) % 2:
return S.NegativeOne*self.__class__(new_occs, self.fermi_level)
else:
return self.__class__(new_occs, self.fermi_level)
def _add_orbit(self, i):
"""
Adds particle/creates hole in orbit i. No input tests performed here.
"""
return self.__class__((i,) + self.args[0], self.fermi_level)
@classmethod
def _count_holes(cls, list):
"""
returns number of identified hole states in list.
"""
return len([i for i in list if cls._only_below_fermi(i)])
def _negate_holes(self, list):
return tuple([-i if i <= self.fermi_level else i for i in list])
def __repr__(self):
if self.fermi_level:
return "FockStateKet(%r, fermi_level=%s)" % (self.args[0], self.fermi_level)
else:
return "FockStateKet(%r)" % (self.args[0],)
def _labels(self):
return self._negate_holes(self.args[0])
class FockStateKet(FockState):
"""
Representation of a ket.
"""
lbracket = '|'
rbracket = '>'
class FockStateBra(FockState):
"""
Representation of a bra.
"""
lbracket = '<'
rbracket = '|'
def __mul__(self, other):
if isinstance(other, FockStateKet):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
class FockStateBosonKet(BosonState, FockStateKet):
"""
Many particle Fock state with a sequence of occupation numbers.
Occupation numbers can be any integer >= 0.
Examples
========
>>> from sympy.physics.secondquant import BKet
>>> BKet([1, 2])
FockStateBosonKet((1, 2))
"""
def _dagger_(self):
return FockStateBosonBra(*self.args)
class FockStateBosonBra(BosonState, FockStateBra):
"""
Describes a collection of BosonBra particles.
Examples
========
>>> from sympy.physics.secondquant import BBra
>>> BBra([1, 2])
FockStateBosonBra((1, 2))
"""
def _dagger_(self):
return FockStateBosonKet(*self.args)
class FockStateFermionKet(FermionState, FockStateKet):
"""
Many-particle Fock state with a sequence of occupied orbits.
Each state can only have one particle, so we choose to store a list of
occupied orbits rather than a tuple with occupation numbers (zeros and ones).
states below fermi level are holes, and are represented by negative labels
in the occupation list.
For symbolic state labels, the fermi_level caps the number of allowed hole-
states.
Examples
========
>>> from sympy.physics.secondquant import FKet
>>> FKet([1, 2])
FockStateFermionKet((1, 2))
"""
def _dagger_(self):
return FockStateFermionBra(*self.args)
class FockStateFermionBra(FermionState, FockStateBra):
"""
See Also
========
FockStateFermionKet
Examples
========
>>> from sympy.physics.secondquant import FBra
>>> FBra([1, 2])
FockStateFermionBra((1, 2))
"""
def _dagger_(self):
return FockStateFermionKet(*self.args)
BBra = FockStateBosonBra
BKet = FockStateBosonKet
FBra = FockStateFermionBra
FKet = FockStateFermionKet
def _apply_Mul(m):
"""
Take a Mul instance with operators and apply them to states.
This method applies all operators with integer state labels
to the actual states. For symbolic state labels, nothing is done.
When inner products of FockStates are encountered (like <a|b>),
they are converted to instances of InnerProduct.
This does not currently work on double inner products like,
<a|b><c|d>.
If the argument is not a Mul, it is simply returned as is.
"""
if not isinstance(m, Mul):
return m
c_part, nc_part = m.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return m
else:
last = nc_part[-1]
next_to_last = nc_part[-2]
if isinstance(last, FockStateKet):
if isinstance(next_to_last, SqOperator):
if next_to_last.is_symbolic:
return m
else:
result = next_to_last.apply_operator(last)
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
elif isinstance(next_to_last, Pow):
if isinstance(next_to_last.base, SqOperator) and \
next_to_last.exp.is_Integer:
if next_to_last.base.is_symbolic:
return m
else:
result = last
for i in range(next_to_last.exp):
result = next_to_last.base.apply_operator(result)
if result == 0:
break
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
else:
return m
elif isinstance(next_to_last, FockStateBra):
result = InnerProduct(next_to_last, last)
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
else:
return m
else:
return m
def apply_operators(e):
"""
Take a sympy expression with operators and states and apply the operators.
Examples
========
>>> from sympy.physics.secondquant import apply_operators
>>> from sympy import sympify
>>> apply_operators(sympify(3)+4)
7
"""
e = e.expand()
muls = e.atoms(Mul)
subs_list = [(m, _apply_Mul(m)) for m in iter(muls)]
return e.subs(subs_list)
class InnerProduct(Basic):
"""
An unevaluated inner product between a bra and ket.
Currently this class just reduces things to a product of
Kronecker Deltas. In the future, we could introduce abstract
states like ``|a>`` and ``|b>``, and leave the inner product unevaluated as
``<a|b>``.
"""
is_commutative = True
def __new__(cls, bra, ket):
if not isinstance(bra, FockStateBra):
raise TypeError("must be a bra")
if not isinstance(ket, FockStateKet):
raise TypeError("must be a key")
return cls.eval(bra, ket)
@classmethod
def eval(cls, bra, ket):
result = S.One
for i, j in zip(bra.args[0], ket.args[0]):
result *= KroneckerDelta(i, j)
if result == 0:
break
return result
@property
def bra(self):
"""Returns the bra part of the state"""
return self.args[0]
@property
def ket(self):
"""Returns the ket part of the state"""
return self.args[1]
def __repr__(self):
sbra = repr(self.bra)
sket = repr(self.ket)
return "%s|%s" % (sbra[:-1], sket[1:])
def __str__(self):
return self.__repr__()
def matrix_rep(op, basis):
"""
Find the representation of an operator in a basis.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis, B, matrix_rep
>>> b = VarBosonicBasis(5)
>>> o = B(0)
>>> matrix_rep(o, b)
Matrix([
[0, 1, 0, 0, 0],
[0, 0, sqrt(2), 0, 0],
[0, 0, 0, sqrt(3), 0],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 0]])
"""
a = zeros(len(basis))
for i in range(len(basis)):
for j in range(len(basis)):
a[i, j] = apply_operators(Dagger(basis[i])*op*basis[j])
return a
class BosonicBasis(object):
"""
Base class for a basis set of bosonic Fock states.
"""
pass
class VarBosonicBasis(object):
"""
A single state, variable particle number basis set.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis
>>> b = VarBosonicBasis(5)
>>> b
[FockState((0,)), FockState((1,)), FockState((2,)),
FockState((3,)), FockState((4,))]
"""
def __init__(self, n_max):
self.n_max = n_max
self._build_states()
def _build_states(self):
self.basis = []
for i in range(self.n_max):
self.basis.append(FockStateBosonKet([i]))
self.n_basis = len(self.basis)
def index(self, state):
"""
Returns the index of state in basis.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis
>>> b = VarBosonicBasis(3)
>>> state = b.state(1)
>>> b
[FockState((0,)), FockState((1,)), FockState((2,))]
>>> state
FockStateBosonKet((1,))
>>> b.index(state)
1
"""
return self.basis.index(state)
def state(self, i):
"""
The state of a single basis.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis
>>> b = VarBosonicBasis(5)
>>> b.state(3)
FockStateBosonKet((3,))
"""
return self.basis[i]
def __getitem__(self, i):
return self.state(i)
def __len__(self):
return len(self.basis)
def __repr__(self):
return repr(self.basis)
class FixedBosonicBasis(BosonicBasis):
"""
Fixed particle number basis set.
Examples
========
>>> from sympy.physics.secondquant import FixedBosonicBasis
>>> b = FixedBosonicBasis(2, 2)
>>> state = b.state(1)
>>> b
[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]
>>> state
FockStateBosonKet((1, 1))
>>> b.index(state)
1
"""
def __init__(self, n_particles, n_levels):
self.n_particles = n_particles
self.n_levels = n_levels
self._build_particle_locations()
self._build_states()
def _build_particle_locations(self):
tup = ["i%i" % i for i in range(self.n_particles)]
first_loop = "for i0 in range(%i)" % self.n_levels
other_loops = ''
for cur, prev in zip(tup[1:], tup):
temp = "for %s in range(%s + 1) " % (cur, prev)
other_loops = other_loops + temp
tup_string = "(%s)" % ", ".join(tup)
list_comp = "[%s %s %s]" % (tup_string, first_loop, other_loops)
result = eval(list_comp)
if self.n_particles == 1:
result = [(item,) for item in result]
self.particle_locations = result
def _build_states(self):
self.basis = []
for tuple_of_indices in self.particle_locations:
occ_numbers = self.n_levels*[0]
for level in tuple_of_indices:
occ_numbers[level] += 1
self.basis.append(FockStateBosonKet(occ_numbers))
self.n_basis = len(self.basis)
def index(self, state):
"""Returns the index of state in basis.
Examples
========
>>> from sympy.physics.secondquant import FixedBosonicBasis
>>> b = FixedBosonicBasis(2, 3)
>>> b.index(b.state(3))
3
"""
return self.basis.index(state)
def state(self, i):
"""Returns the state that lies at index i of the basis
Examples
========
>>> from sympy.physics.secondquant import FixedBosonicBasis
>>> b = FixedBosonicBasis(2, 3)
>>> b.state(3)
FockStateBosonKet((1, 0, 1))
"""
return self.basis[i]
def __getitem__(self, i):
return self.state(i)
def __len__(self):
return len(self.basis)
def __repr__(self):
return repr(self.basis)
class Commutator(Function):
"""
The Commutator: [A, B] = A*B - B*A
The arguments are ordered according to .__cmp__()
>>> from sympy import symbols
>>> from sympy.physics.secondquant import Commutator
>>> A, B = symbols('A,B', commutative=False)
>>> Commutator(B, A)
-Commutator(A, B)
Evaluate the commutator with .doit()
>>> comm = Commutator(A,B); comm
Commutator(A, B)
>>> comm.doit()
A*B - B*A
For two second quantization operators the commutator is evaluated
immediately:
>>> from sympy.physics.secondquant import Fd, F
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> p,q = symbols('p,q')
>>> Commutator(Fd(a),Fd(i))
2*NO(CreateFermion(a)*CreateFermion(i))
But for more complicated expressions, the evaluation is triggered by
a call to .doit()
>>> comm = Commutator(Fd(p)*Fd(q),F(i)); comm
Commutator(CreateFermion(p)*CreateFermion(q), AnnihilateFermion(i))
>>> comm.doit(wicks=True)
-KroneckerDelta(i, p)*CreateFermion(q) +
KroneckerDelta(i, q)*CreateFermion(p)
"""
is_commutative = False
@classmethod
def eval(cls, a, b):
"""
The Commutator [A,B] is on canonical form if A < B.
Examples
========
>>> from sympy.physics.secondquant import Commutator, F, Fd
>>> from sympy.abc import x
>>> c1 = Commutator(F(x), Fd(x))
>>> c2 = Commutator(Fd(x), F(x))
>>> Commutator.eval(c1, c2)
0
"""
if not (a and b):
return S.Zero
if a == b:
return S.Zero
if a.is_commutative or b.is_commutative:
return S.Zero
#
# [A+B,C] -> [A,C] + [B,C]
#
a = a.expand()
if isinstance(a, Add):
return Add(*[cls(term, b) for term in a.args])
b = b.expand()
if isinstance(b, Add):
return Add(*[cls(a, term) for term in b.args])
#
# [xA,yB] -> xy*[A,B]
#
ca, nca = a.args_cnc()
cb, ncb = b.args_cnc()
c_part = list(ca) + list(cb)
if c_part:
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
#
# single second quantization operators
#
if isinstance(a, BosonicOperator) and isinstance(b, BosonicOperator):
if isinstance(b, CreateBoson) and isinstance(a, AnnihilateBoson):
return KroneckerDelta(a.state, b.state)
if isinstance(a, CreateBoson) and isinstance(b, AnnihilateBoson):
return S.NegativeOne*KroneckerDelta(a.state, b.state)
else:
return S.Zero
if isinstance(a, FermionicOperator) and isinstance(b, FermionicOperator):
return wicks(a*b) - wicks(b*a)
#
# Canonical ordering of arguments
#
if a.sort_key() > b.sort_key():
return S.NegativeOne*cls(b, a)
def doit(self, **hints):
"""
Enables the computation of complex expressions.
Examples
========
>>> from sympy.physics.secondquant import Commutator, F, Fd
>>> from sympy import symbols
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> c = Commutator(Fd(a)*F(i),Fd(b)*F(j))
>>> c.doit(wicks=True)
0
"""
a = self.args[0]
b = self.args[1]
if hints.get("wicks"):
a = a.doit(**hints)
b = b.doit(**hints)
try:
return wicks(a*b) - wicks(b*a)
except ContractionAppliesOnlyToFermions:
pass
except WicksTheoremDoesNotApply:
pass
return (a*b - b*a).doit(**hints)
def __repr__(self):
return "Commutator(%s,%s)" % (self.args[0], self.args[1])
def __str__(self):
return "[%s,%s]" % (self.args[0], self.args[1])
def _latex(self, printer):
return "\\left[%s,%s\\right]" % tuple([
printer._print(arg) for arg in self.args])
class NO(Expr):
"""
This Object is used to represent normal ordering brackets.
i.e. {abcd} sometimes written :abcd:
Applying the function NO(arg) to an argument means that all operators in
the argument will be assumed to anticommute, and have vanishing
contractions. This allows an immediate reordering to canonical form
upon object creation.
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> p,q = symbols('p,q')
>>> NO(Fd(p)*F(q))
NO(CreateFermion(p)*AnnihilateFermion(q))
>>> NO(F(q)*Fd(p))
-NO(CreateFermion(p)*AnnihilateFermion(q))
Note:
If you want to generate a normal ordered equivalent of an expression, you
should use the function wicks(). This class only indicates that all
operators inside the brackets anticommute, and have vanishing contractions.
Nothing more, nothing less.
"""
is_commutative = False
def __new__(cls, arg):
"""
Use anticommutation to get canonical form of operators.
Employ associativity of normal ordered product: {ab{cd}} = {abcd}
but note that {ab}{cd} /= {abcd}.
We also employ distributivity: {ab + cd} = {ab} + {cd}.
Canonical form also implies expand() {ab(c+d)} = {abc} + {abd}.
"""
# {ab + cd} = {ab} + {cd}
arg = sympify(arg)
arg = arg.expand()
if arg.is_Add:
return Add(*[ cls(term) for term in arg.args])
if arg.is_Mul:
# take coefficient outside of normal ordering brackets
c_part, seq = arg.args_cnc()
if c_part:
coeff = Mul(*c_part)
if not seq:
return coeff
else:
coeff = S.One
# {ab{cd}} = {abcd}
newseq = []
foundit = False
for fac in seq:
if isinstance(fac, NO):
newseq.extend(fac.args)
foundit = True
else:
newseq.append(fac)
if foundit:
return coeff*cls(Mul(*newseq))
# We assume that the user don't mix B and F operators
if isinstance(seq[0], BosonicOperator):
raise NotImplementedError
try:
newseq, sign = _sort_anticommuting_fermions(seq)
except ViolationOfPauliPrinciple:
return S.Zero
if sign % 2:
return (S.NegativeOne*coeff)*cls(Mul(*newseq))
elif sign:
return coeff*cls(Mul(*newseq))
else:
pass # since sign==0, no permutations was necessary
# if we couldn't do anything with Mul object, we just
# mark it as normal ordered
if coeff != S.One:
return coeff*cls(Mul(*newseq))
return Expr.__new__(cls, Mul(*newseq))
if isinstance(arg, NO):
return arg
# if object was not Mul or Add, normal ordering does not apply
return arg
@property
def has_q_creators(self):
"""
Return 0 if the leftmost argument of the first argument is a not a
q_creator, else 1 if it is above fermi or -1 if it is below fermi.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> NO(Fd(a)*Fd(i)).has_q_creators
1
>>> NO(F(i)*F(a)).has_q_creators
-1
>>> NO(Fd(i)*F(a)).has_q_creators #doctest: +SKIP
0
"""
return self.args[0].args[0].is_q_creator
@property
def has_q_annihilators(self):
"""
Return 0 if the rightmost argument of the first argument is a not a
q_annihilator, else 1 if it is above fermi or -1 if it is below fermi.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> NO(Fd(a)*Fd(i)).has_q_annihilators
-1
>>> NO(F(i)*F(a)).has_q_annihilators
1
>>> NO(Fd(a)*F(i)).has_q_annihilators
0
"""
return self.args[0].args[-1].is_q_annihilator
def doit(self, **kw_args):
"""
Either removes the brackets or enables complex computations
in its arguments.
Examples
========
>>> from sympy.physics.secondquant import NO, Fd, F
>>> from textwrap import fill
>>> from sympy import symbols, Dummy
>>> p,q = symbols('p,q', cls=Dummy)
>>> print(fill(str(NO(Fd(p)*F(q)).doit())))
KroneckerDelta(_a, _p)*KroneckerDelta(_a,
_q)*CreateFermion(_a)*AnnihilateFermion(_a) + KroneckerDelta(_a,
_p)*KroneckerDelta(_i, _q)*CreateFermion(_a)*AnnihilateFermion(_i) -
KroneckerDelta(_a, _q)*KroneckerDelta(_i,
_p)*AnnihilateFermion(_a)*CreateFermion(_i) - KroneckerDelta(_i,
_p)*KroneckerDelta(_i, _q)*AnnihilateFermion(_i)*CreateFermion(_i)
"""
if kw_args.get("remove_brackets", True):
return self._remove_brackets()
else:
return self.__new__(type(self), self.args[0].doit(**kw_args))
def _remove_brackets(self):
"""
Returns the sorted string without normal order brackets.
The returned string have the property that no nonzero
contractions exist.
"""
# check if any creator is also an annihilator
subslist = []
for i in self.iter_q_creators():
if self[i].is_q_annihilator:
assume = self[i].state.assumptions0
# only operators with a dummy index can be split in two terms
if isinstance(self[i].state, Dummy):
# create indices with fermi restriction
assume.pop("above_fermi", None)
assume["below_fermi"] = True
below = Dummy('i', **assume)
assume.pop("below_fermi", None)
assume["above_fermi"] = True
above = Dummy('a', **assume)
cls = type(self[i])
split = (
self[i].__new__(cls, below)
* KroneckerDelta(below, self[i].state)
+ self[i].__new__(cls, above)
* KroneckerDelta(above, self[i].state)
)
subslist.append((self[i], split))
else:
raise SubstitutionOfAmbigousOperatorFailed(self[i])
if subslist:
result = NO(self.subs(subslist))
if isinstance(result, Add):
return Add(*[term.doit() for term in result.args])
else:
return self.args[0]
def _expand_operators(self):
"""
Returns a sum of NO objects that contain no ambiguous q-operators.
If an index q has range both above and below fermi, the operator F(q)
is ambiguous in the sense that it can be both a q-creator and a q-annihilator.
If q is dummy, it is assumed to be a summation variable and this method
rewrites it into a sum of NO terms with unambiguous operators:
{Fd(p)*F(q)} = {Fd(a)*F(b)} + {Fd(a)*F(i)} + {Fd(j)*F(b)} -{F(i)*Fd(j)}
where a,b are above and i,j are below fermi level.
"""
return NO(self._remove_brackets)
def __getitem__(self, i):
if isinstance(i, slice):
indices = i.indices(len(self))
return [self.args[0].args[i] for i in range(*indices)]
else:
return self.args[0].args[i]
def __len__(self):
return len(self.args[0].args)
def iter_q_annihilators(self):
"""
Iterates over the annihilation operators.
Examples
========
>>> from sympy import symbols
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> from sympy.physics.secondquant import NO, F, Fd
>>> no = NO(Fd(a)*F(i)*F(b)*Fd(j))
>>> no.iter_q_creators()
<generator object... at 0x...>
>>> list(no.iter_q_creators())
[0, 1]
>>> list(no.iter_q_annihilators())
[3, 2]
"""
ops = self.args[0].args
iter = range(len(ops) - 1, -1, -1)
for i in iter:
if ops[i].is_q_annihilator:
yield i
else:
break
def iter_q_creators(self):
"""
Iterates over the creation operators.
Examples
========
>>> from sympy import symbols
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> from sympy.physics.secondquant import NO, F, Fd
>>> no = NO(Fd(a)*F(i)*F(b)*Fd(j))
>>> no.iter_q_creators()
<generator object... at 0x...>
>>> list(no.iter_q_creators())
[0, 1]
>>> list(no.iter_q_annihilators())
[3, 2]
"""
ops = self.args[0].args
iter = range(0, len(ops))
for i in iter:
if ops[i].is_q_creator:
yield i
else:
break
def get_subNO(self, i):
"""
Returns a NO() without FermionicOperator at index i.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import F, NO
>>> p, q, r = symbols('p,q,r')
>>> NO(F(p)*F(q)*F(r)).get_subNO(1)
NO(AnnihilateFermion(p)*AnnihilateFermion(r))
"""
arg0 = self.args[0] # it's a Mul by definition of how it's created
mul = arg0._new_rawargs(*(arg0.args[:i] + arg0.args[i + 1:]))
return NO(mul)
def _latex(self, printer):
return "\\left\\{%s\\right\\}" % printer._print(self.args[0])
def __repr__(self):
return "NO(%s)" % self.args[0]
def __str__(self):
return ":%s:" % self.args[0]
def contraction(a, b):
"""
Calculates contraction of Fermionic operators a and b.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import F, Fd, contraction
>>> p, q = symbols('p,q')
>>> a, b = symbols('a,b', above_fermi=True)
>>> i, j = symbols('i,j', below_fermi=True)
A contraction is non-zero only if a quasi-creator is to the right of a
quasi-annihilator:
>>> contraction(F(a),Fd(b))
KroneckerDelta(a, b)
>>> contraction(Fd(i),F(j))
KroneckerDelta(i, j)
For general indices a non-zero result restricts the indices to below/above
the fermi surface:
>>> contraction(Fd(p),F(q))
KroneckerDelta(_i, q)*KroneckerDelta(p, q)
>>> contraction(F(p),Fd(q))
KroneckerDelta(_a, q)*KroneckerDelta(p, q)
Two creators or two annihilators always vanishes:
>>> contraction(F(p),F(q))
0
>>> contraction(Fd(p),Fd(q))
0
"""
if isinstance(b, FermionicOperator) and isinstance(a, FermionicOperator):
if isinstance(a, AnnihilateFermion) and isinstance(b, CreateFermion):
if b.state.assumptions0.get("below_fermi"):
return S.Zero
if a.state.assumptions0.get("below_fermi"):
return S.Zero
if b.state.assumptions0.get("above_fermi"):
return KroneckerDelta(a.state, b.state)
if a.state.assumptions0.get("above_fermi"):
return KroneckerDelta(a.state, b.state)
return (KroneckerDelta(a.state, b.state)*
KroneckerDelta(b.state, Dummy('a', above_fermi=True)))
if isinstance(b, AnnihilateFermion) and isinstance(a, CreateFermion):
if b.state.assumptions0.get("above_fermi"):
return S.Zero
if a.state.assumptions0.get("above_fermi"):
return S.Zero
if b.state.assumptions0.get("below_fermi"):
return KroneckerDelta(a.state, b.state)
if a.state.assumptions0.get("below_fermi"):
return KroneckerDelta(a.state, b.state)
return (KroneckerDelta(a.state, b.state)*
KroneckerDelta(b.state, Dummy('i', below_fermi=True)))
# vanish if 2xAnnihilator or 2xCreator
return S.Zero
else:
#not fermion operators
t = ( isinstance(i, FermionicOperator) for i in (a, b) )
raise ContractionAppliesOnlyToFermions(*t)
def _sqkey(sq_operator):
"""Generates key for canonical sorting of SQ operators."""
return sq_operator._sortkey()
def _sort_anticommuting_fermions(string1, key=_sqkey):
"""Sort fermionic operators to canonical order, assuming all pairs anticommute.
Uses a bidirectional bubble sort. Items in string1 are not referenced
so in principle they may be any comparable objects. The sorting depends on the
operators '>' and '=='.
If the Pauli principle is violated, an exception is raised.
Returns
=======
tuple (sorted_str, sign)
sorted_str: list containing the sorted operators
sign: int telling how many times the sign should be changed
(if sign==0 the string was already sorted)
"""
verified = False
sign = 0
rng = list(range(len(string1) - 1))
rev = list(range(len(string1) - 3, -1, -1))
keys = list(map(key, string1))
key_val = dict(list(zip(keys, string1)))
while not verified:
verified = True
for i in rng:
left = keys[i]
right = keys[i + 1]
if left == right:
raise ViolationOfPauliPrinciple([left, right])
if left > right:
verified = False
keys[i:i + 2] = [right, left]
sign = sign + 1
if verified:
break
for i in rev:
left = keys[i]
right = keys[i + 1]
if left == right:
raise ViolationOfPauliPrinciple([left, right])
if left > right:
verified = False
keys[i:i + 2] = [right, left]
sign = sign + 1
string1 = [ key_val[k] for k in keys ]
return (string1, sign)
def evaluate_deltas(e):
"""
We evaluate KroneckerDelta symbols in the expression assuming Einstein summation.
If one index is repeated it is summed over and in effect substituted with
the other one. If both indices are repeated we substitute according to what
is the preferred index. this is determined by
KroneckerDelta.preferred_index and KroneckerDelta.killable_index.
In case there are no possible substitutions or if a substitution would
imply a loss of information, nothing is done.
In case an index appears in more than one KroneckerDelta, the resulting
substitution depends on the order of the factors. Since the ordering is platform
dependent, the literal expression resulting from this function may be hard to
predict.
Examples
========
We assume the following:
>>> from sympy import symbols, Function, Dummy, KroneckerDelta
>>> from sympy.physics.secondquant import evaluate_deltas
>>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
>>> a,b = symbols('a b', above_fermi=True, cls=Dummy)
>>> p,q = symbols('p q', cls=Dummy)
>>> f = Function('f')
>>> t = Function('t')
The order of preference for these indices according to KroneckerDelta is
(a, b, i, j, p, q).
Trivial cases:
>>> evaluate_deltas(KroneckerDelta(i,j)*f(i)) # d_ij f(i) -> f(j)
f(_j)
>>> evaluate_deltas(KroneckerDelta(i,j)*f(j)) # d_ij f(j) -> f(i)
f(_i)
>>> evaluate_deltas(KroneckerDelta(i,p)*f(p)) # d_ip f(p) -> f(i)
f(_i)
>>> evaluate_deltas(KroneckerDelta(q,p)*f(p)) # d_qp f(p) -> f(q)
f(_q)
>>> evaluate_deltas(KroneckerDelta(q,p)*f(q)) # d_qp f(q) -> f(p)
f(_p)
More interesting cases:
>>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q))
f(_i, _q)*t(_a, _i)
>>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q))
f(_a, _q)*t(_a, _i)
>>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q))
f(_p, _p)
Finally, here are some cases where nothing is done, because that would
imply a loss of information:
>>> evaluate_deltas(KroneckerDelta(i,p)*f(q))
f(_q)*KroneckerDelta(_i, _p)
>>> evaluate_deltas(KroneckerDelta(i,p)*f(i))
f(_i)*KroneckerDelta(_i, _p)
"""
# We treat Deltas only in mul objects
# for general function objects we don't evaluate KroneckerDeltas in arguments,
# but here we hard code exceptions to this rule
accepted_functions = (
Add,
)
if isinstance(e, accepted_functions):
return e.func(*[evaluate_deltas(arg) for arg in e.args])
elif isinstance(e, Mul):
# find all occurrences of delta function and count each index present in
# expression.
deltas = []
indices = {}
for i in e.args:
for s in i.free_symbols:
if s in indices:
indices[s] += 1
else:
indices[s] = 0 # geek counting simplifies logic below
if isinstance(i, KroneckerDelta):
deltas.append(i)
for d in deltas:
# If we do something, and there are more deltas, we should recurse
# to treat the resulting expression properly
if d.killable_index.is_Symbol and indices[d.killable_index]:
e = e.subs(d.killable_index, d.preferred_index)
if len(deltas) > 1:
return evaluate_deltas(e)
elif (d.preferred_index.is_Symbol and indices[d.preferred_index]
and d.indices_contain_equal_information):
e = e.subs(d.preferred_index, d.killable_index)
if len(deltas) > 1:
return evaluate_deltas(e)
else:
pass
return e
# nothing to do, maybe we hit a Symbol or a number
else:
return e
def substitute_dummies(expr, new_indices=False, pretty_indices={}):
"""
Collect terms by substitution of dummy variables.
This routine allows simplification of Add expressions containing terms
which differ only due to dummy variables.
The idea is to substitute all dummy variables consistently depending on
the structure of the term. For each term, we obtain a sequence of all
dummy variables, where the order is determined by the index range, what
factors the index belongs to and its position in each factor. See
_get_ordered_dummies() for more information about the sorting of dummies.
The index sequence is then substituted consistently in each term.
Examples
========
>>> from sympy import symbols, Function, Dummy
>>> from sympy.physics.secondquant import substitute_dummies
>>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy)
>>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
>>> f = Function('f')
>>> expr = f(a,b) + f(c,d); expr
f(_a, _b) + f(_c, _d)
Since a, b, c and d are equivalent summation indices, the expression can be
simplified to a single term (for which the dummy indices are still summed over)
>>> substitute_dummies(expr)
2*f(_a, _b)
Controlling output:
By default the dummy symbols that are already present in the expression
will be reused in a different permutation. However, if new_indices=True,
new dummies will be generated and inserted. The keyword 'pretty_indices'
can be used to control this generation of new symbols.
By default the new dummies will be generated on the form i_1, i_2, a_1,
etc. If you supply a dictionary with key:value pairs in the form:
{ index_group: string_of_letters }
The letters will be used as labels for the new dummy symbols. The
index_groups must be one of 'above', 'below' or 'general'.
>>> expr = f(a,b,i,j)
>>> my_dummies = { 'above':'st', 'below':'uv' }
>>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
f(_s, _t, _u, _v)
If we run out of letters, or if there is no keyword for some index_group
the default dummy generator will be used as a fallback:
>>> p,q = symbols('p q', cls=Dummy) # general indices
>>> expr = f(p,q)
>>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
f(_p_0, _p_1)
"""
# setup the replacing dummies
if new_indices:
letters_above = pretty_indices.get('above', "")
letters_below = pretty_indices.get('below', "")
letters_general = pretty_indices.get('general', "")
len_above = len(letters_above)
len_below = len(letters_below)
len_general = len(letters_general)
def _i(number):
try:
return letters_below[number]
except IndexError:
return 'i_' + str(number - len_below)
def _a(number):
try:
return letters_above[number]
except IndexError:
return 'a_' + str(number - len_above)
def _p(number):
try:
return letters_general[number]
except IndexError:
return 'p_' + str(number - len_general)
aboves = []
belows = []
generals = []
dummies = expr.atoms(Dummy)
if not new_indices:
dummies = sorted(dummies, key=default_sort_key)
# generate lists with the dummies we will insert
a = i = p = 0
for d in dummies:
assum = d.assumptions0
if assum.get("above_fermi"):
if new_indices:
sym = _a(a)
a += 1
l1 = aboves
elif assum.get("below_fermi"):
if new_indices:
sym = _i(i)
i += 1
l1 = belows
else:
if new_indices:
sym = _p(p)
p += 1
l1 = generals
if new_indices:
l1.append(Dummy(sym, **assum))
else:
l1.append(d)
expr = expr.expand()
terms = Add.make_args(expr)
new_terms = []
for term in terms:
i = iter(belows)
a = iter(aboves)
p = iter(generals)
ordered = _get_ordered_dummies(term)
subsdict = {}
for d in ordered:
if d.assumptions0.get('below_fermi'):
subsdict[d] = next(i)
elif d.assumptions0.get('above_fermi'):
subsdict[d] = next(a)
else:
subsdict[d] = next(p)
subslist = []
final_subs = []
for k, v in subsdict.items():
if k == v:
continue
if v in subsdict:
# We check if the sequence of substitutions end quickly. In
# that case, we can avoid temporary symbols if we ensure the
# correct substitution order.
if subsdict[v] in subsdict:
# (x, y) -> (y, x), we need a temporary variable
x = Dummy('x')
subslist.append((k, x))
final_subs.append((x, v))
else:
# (x, y) -> (y, a), x->y must be done last
# but before temporary variables are resolved
final_subs.insert(0, (k, v))
else:
subslist.append((k, v))
subslist.extend(final_subs)
new_terms.append(term.subs(subslist))
return Add(*new_terms)
class KeyPrinter(StrPrinter):
"""Printer for which only equal objects are equal in print"""
def _print_Dummy(self, expr):
return "(%s_%i)" % (expr.name, expr.dummy_index)
def __kprint(expr):
p = KeyPrinter()
return p.doprint(expr)
def _get_ordered_dummies(mul, verbose=False):
"""Returns all dummies in the mul sorted in canonical order
The purpose of the canonical ordering is that dummies can be substituted
consistently across terms with the result that equivalent terms can be
simplified.
It is not possible to determine if two terms are equivalent based solely on
the dummy order. However, a consistent substitution guided by the ordered
dummies should lead to trivially (non-)equivalent terms, thereby revealing
the equivalence. This also means that if two terms have identical sequences of
dummies, the (non-)equivalence should already be apparent.
Strategy
--------
The canoncial order is given by an arbitrary sorting rule. A sort key
is determined for each dummy as a tuple that depends on all factors where
the index is present. The dummies are thereby sorted according to the
contraction structure of the term, instead of sorting based solely on the
dummy symbol itself.
After all dummies in the term has been assigned a key, we check for identical
keys, i.e. unorderable dummies. If any are found, we call a specialized
method, _determine_ambiguous(), that will determine a unique order based
on recursive calls to _get_ordered_dummies().
Key description
---------------
A high level description of the sort key:
1. Range of the dummy index
2. Relation to external (non-dummy) indices
3. Position of the index in the first factor
4. Position of the index in the second factor
The sort key is a tuple with the following components:
1. A single character indicating the range of the dummy (above, below
or general.)
2. A list of strings with fully masked string representations of all
factors where the dummy is present. By masked, we mean that dummies
are represented by a symbol to indicate either below fermi, above or
general. No other information is displayed about the dummies at
this point. The list is sorted stringwise.
3. An integer number indicating the position of the index, in the first
factor as sorted in 2.
4. An integer number indicating the position of the index, in the second
factor as sorted in 2.
If a factor is either of type AntiSymmetricTensor or SqOperator, the index
position in items 3 and 4 is indicated as 'upper' or 'lower' only.
(Creation operators are considered upper and annihilation operators lower.)
If the masked factors are identical, the two factors cannot be ordered
unambiguously in item 2. In this case, items 3, 4 are left out. If several
indices are contracted between the unorderable factors, it will be handled by
_determine_ambiguous()
"""
# setup dicts to avoid repeated calculations in key()
args = Mul.make_args(mul)
fac_dum = dict([ (fac, fac.atoms(Dummy)) for fac in args] )
fac_repr = dict([ (fac, __kprint(fac)) for fac in args] )
all_dums = set().union(*fac_dum.values())
mask = {}
for d in all_dums:
if d.assumptions0.get('below_fermi'):
mask[d] = '0'
elif d.assumptions0.get('above_fermi'):
mask[d] = '1'
else:
mask[d] = '2'
dum_repr = {d: __kprint(d) for d in all_dums}
def _key(d):
dumstruct = [ fac for fac in fac_dum if d in fac_dum[fac] ]
other_dums = set().union(*[fac_dum[fac] for fac in dumstruct])
fac = dumstruct[-1]
if other_dums is fac_dum[fac]:
other_dums = fac_dum[fac].copy()
other_dums.remove(d)
masked_facs = [ fac_repr[fac] for fac in dumstruct ]
for d2 in other_dums:
masked_facs = [ fac.replace(dum_repr[d2], mask[d2])
for fac in masked_facs ]
all_masked = [ fac.replace(dum_repr[d], mask[d])
for fac in masked_facs ]
masked_facs = dict(list(zip(dumstruct, masked_facs)))
# dummies for which the ordering cannot be determined
if has_dups(all_masked):
all_masked.sort()
return mask[d], tuple(all_masked) # positions are ambiguous
# sort factors according to fully masked strings
keydict = dict(list(zip(dumstruct, all_masked)))
dumstruct.sort(key=lambda x: keydict[x])
all_masked.sort()
pos_val = []
for fac in dumstruct:
if isinstance(fac, AntiSymmetricTensor):
if d in fac.upper:
pos_val.append('u')
if d in fac.lower:
pos_val.append('l')
elif isinstance(fac, Creator):
pos_val.append('u')
elif isinstance(fac, Annihilator):
pos_val.append('l')
elif isinstance(fac, NO):
ops = [ op for op in fac if op.has(d) ]
for op in ops:
if isinstance(op, Creator):
pos_val.append('u')
else:
pos_val.append('l')
else:
# fallback to position in string representation
facpos = -1
while 1:
facpos = masked_facs[fac].find(dum_repr[d], facpos + 1)
if facpos == -1:
break
pos_val.append(facpos)
return (mask[d], tuple(all_masked), pos_val[0], pos_val[-1])
dumkey = dict(list(zip(all_dums, list(map(_key, all_dums)))))
result = sorted(all_dums, key=lambda x: dumkey[x])
if has_dups(iter(dumkey.values())):
# We have ambiguities
unordered = defaultdict(set)
for d, k in dumkey.items():
unordered[k].add(d)
for k in [ k for k in unordered if len(unordered[k]) < 2 ]:
del unordered[k]
unordered = [ unordered[k] for k in sorted(unordered) ]
result = _determine_ambiguous(mul, result, unordered)
return result
def _determine_ambiguous(term, ordered, ambiguous_groups):
# We encountered a term for which the dummy substitution is ambiguous.
# This happens for terms with 2 or more contractions between factors that
# cannot be uniquely ordered independent of summation indices. For
# example:
#
# Sum(p, q) v^{p, .}_{q, .}v^{q, .}_{p, .}
#
# Assuming that the indices represented by . are dummies with the
# same range, the factors cannot be ordered, and there is no
# way to determine a consistent ordering of p and q.
#
# The strategy employed here, is to relabel all unambiguous dummies with
# non-dummy symbols and call _get_ordered_dummies again. This procedure is
# applied to the entire term so there is a possibility that
# _determine_ambiguous() is called again from a deeper recursion level.
# break recursion if there are no ordered dummies
all_ambiguous = set()
for dummies in ambiguous_groups:
all_ambiguous |= dummies
all_ordered = set(ordered) - all_ambiguous
if not all_ordered:
# FIXME: If we arrive here, there are no ordered dummies. A method to
# handle this needs to be implemented. In order to return something
# useful nevertheless, we choose arbitrarily the first dummy and
# determine the rest from this one. This method is dependent on the
# actual dummy labels which violates an assumption for the
# canonicalization procedure. A better implementation is needed.
group = [ d for d in ordered if d in ambiguous_groups[0] ]
d = group[0]
all_ordered.add(d)
ambiguous_groups[0].remove(d)
stored_counter = _symbol_factory._counter
subslist = []
for d in [ d for d in ordered if d in all_ordered ]:
nondum = _symbol_factory._next()
subslist.append((d, nondum))
newterm = term.subs(subslist)
neworder = _get_ordered_dummies(newterm)
_symbol_factory._set_counter(stored_counter)
# update ordered list with new information
for group in ambiguous_groups:
ordered_group = [ d for d in neworder if d in group ]
ordered_group.reverse()
result = []
for d in ordered:
if d in group:
result.append(ordered_group.pop())
else:
result.append(d)
ordered = result
return ordered
class _SymbolFactory(object):
def __init__(self, label):
self._counterVar = 0
self._label = label
def _set_counter(self, value):
"""
Sets counter to value.
"""
self._counterVar = value
@property
def _counter(self):
"""
What counter is currently at.
"""
return self._counterVar
def _next(self):
"""
Generates the next symbols and increments counter by 1.
"""
s = Symbol("%s%i" % (self._label, self._counterVar))
self._counterVar += 1
return s
_symbol_factory = _SymbolFactory('_]"]_') # most certainly a unique label
@cacheit
def _get_contractions(string1, keep_only_fully_contracted=False):
"""
Returns Add-object with contracted terms.
Uses recursion to find all contractions. -- Internal helper function --
Will find nonzero contractions in string1 between indices given in
leftrange and rightrange.
"""
# Should we store current level of contraction?
if keep_only_fully_contracted and string1:
result = []
else:
result = [NO(Mul(*string1))]
for i in range(len(string1) - 1):
for j in range(i + 1, len(string1)):
c = contraction(string1[i], string1[j])
if c:
sign = (j - i + 1) % 2
if sign:
coeff = S.NegativeOne*c
else:
coeff = c
#
# Call next level of recursion
# ============================
#
# We now need to find more contractions among operators
#
# oplist = string1[:i]+ string1[i+1:j] + string1[j+1:]
#
# To prevent overcounting, we don't allow contractions
# we have already encountered. i.e. contractions between
# string1[:i] <---> string1[i+1:j]
# and string1[:i] <---> string1[j+1:].
#
# This leaves the case:
oplist = string1[i + 1:j] + string1[j + 1:]
if oplist:
result.append(coeff*NO(
Mul(*string1[:i])*_get_contractions( oplist,
keep_only_fully_contracted=keep_only_fully_contracted)))
else:
result.append(coeff*NO( Mul(*string1[:i])))
if keep_only_fully_contracted:
break # next iteration over i leaves leftmost operator string1[0] uncontracted
return Add(*result)
def wicks(e, **kw_args):
"""
Returns the normal ordered equivalent of an expression using Wicks Theorem.
Examples
========
>>> from sympy import symbols, Function, Dummy
>>> from sympy.physics.secondquant import wicks, F, Fd, NO
>>> p, q, r = symbols('p,q,r')
>>> wicks(Fd(p)*F(q))
KroneckerDelta(_i, q)*KroneckerDelta(p, q) + NO(CreateFermion(p)*AnnihilateFermion(q))
By default, the expression is expanded:
>>> wicks(F(p)*(F(q)+F(r)))
NO(AnnihilateFermion(p)*AnnihilateFermion(q)) + NO(AnnihilateFermion(p)*AnnihilateFermion(r))
With the keyword 'keep_only_fully_contracted=True', only fully contracted
terms are returned.
By request, the result can be simplified in the following order:
-- KroneckerDelta functions are evaluated
-- Dummy variables are substituted consistently across terms
>>> p, q, r = symbols('p q r', cls=Dummy)
>>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True)
KroneckerDelta(_i, _q)*KroneckerDelta(_p, _q) + KroneckerDelta(_i, _r)*KroneckerDelta(_p, _r)
"""
if not e:
return S.Zero
opts = {
'simplify_kronecker_deltas': False,
'expand': True,
'simplify_dummies': False,
'keep_only_fully_contracted': False
}
opts.update(kw_args)
# check if we are already normally ordered
if isinstance(e, NO):
if opts['keep_only_fully_contracted']:
return S.Zero
else:
return e
elif isinstance(e, FermionicOperator):
if opts['keep_only_fully_contracted']:
return S.Zero
else:
return e
# break up any NO-objects, and evaluate commutators
e = e.doit(wicks=True)
# make sure we have only one term to consider
e = e.expand()
if isinstance(e, Add):
if opts['simplify_dummies']:
return substitute_dummies(Add(*[ wicks(term, **kw_args) for term in e.args]))
else:
return Add(*[ wicks(term, **kw_args) for term in e.args])
# For Mul-objects we can actually do something
if isinstance(e, Mul):
# we don't want to mess around with commuting part of Mul
# so we factorize it out before starting recursion
c_part = []
string1 = []
for factor in e.args:
if factor.is_commutative:
c_part.append(factor)
else:
string1.append(factor)
n = len(string1)
# catch trivial cases
if n == 0:
result = e
elif n == 1:
if opts['keep_only_fully_contracted']:
return S.Zero
else:
result = e
else: # non-trivial
if isinstance(string1[0], BosonicOperator):
raise NotImplementedError
string1 = tuple(string1)
# recursion over higher order contractions
result = _get_contractions(string1,
keep_only_fully_contracted=opts['keep_only_fully_contracted'] )
result = Mul(*c_part)*result
if opts['expand']:
result = result.expand()
if opts['simplify_kronecker_deltas']:
result = evaluate_deltas(result)
return result
# there was nothing to do
return e
class PermutationOperator(Expr):
"""
Represents the index permutation operator P(ij).
P(ij)*f(i)*g(j) = f(i)*g(j) - f(j)*g(i)
"""
is_commutative = True
def __new__(cls, i, j):
i, j = sorted(map(sympify, (i, j)), key=default_sort_key)
obj = Basic.__new__(cls, i, j)
return obj
def get_permuted(self, expr):
"""
Returns -expr with permuted indices.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q = symbols('p,q')
>>> f = Function('f')
>>> PermutationOperator(p,q).get_permuted(f(p,q))
-f(q, p)
"""
i = self.args[0]
j = self.args[1]
if expr.has(i) and expr.has(j):
tmp = Dummy()
expr = expr.subs(i, tmp)
expr = expr.subs(j, i)
expr = expr.subs(tmp, j)
return S.NegativeOne*expr
else:
return expr
def _latex(self, printer):
return "P(%s%s)" % self.args
def simplify_index_permutations(expr, permutation_operators):
"""
Performs simplification by introducing PermutationOperators where appropriate.
Schematically:
[abij] - [abji] - [baij] + [baji] -> P(ab)*P(ij)*[abij]
permutation_operators is a list of PermutationOperators to consider.
If permutation_operators=[P(ab),P(ij)] we will try to introduce the
permutation operators P(ij) and P(ab) in the expression. If there are other
possible simplifications, we ignore them.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import simplify_index_permutations
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q,r,s = symbols('p,q,r,s')
>>> f = Function('f')
>>> g = Function('g')
>>> expr = f(p)*g(q) - f(q)*g(p); expr
f(p)*g(q) - f(q)*g(p)
>>> simplify_index_permutations(expr,[PermutationOperator(p,q)])
f(p)*g(q)*PermutationOperator(p, q)
>>> PermutList = [PermutationOperator(p,q),PermutationOperator(r,s)]
>>> expr = f(p,r)*g(q,s) - f(q,r)*g(p,s) + f(q,s)*g(p,r) - f(p,s)*g(q,r)
>>> simplify_index_permutations(expr,PermutList)
f(p, r)*g(q, s)*PermutationOperator(p, q)*PermutationOperator(r, s)
"""
def _get_indices(expr, ind):
"""
Collects indices recursively in predictable order.
"""
result = []
for arg in expr.args:
if arg in ind:
result.append(arg)
else:
if arg.args:
result.extend(_get_indices(arg, ind))
return result
def _choose_one_to_keep(a, b, ind):
# we keep the one where indices in ind are in order ind[0] < ind[1]
return min(a, b, key=lambda x: default_sort_key(_get_indices(x, ind)))
expr = expr.expand()
if isinstance(expr, Add):
terms = set(expr.args)
for P in permutation_operators:
new_terms = set([])
on_hold = set([])
while terms:
term = terms.pop()
permuted = P.get_permuted(term)
if permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
# Some terms must get a second chance because the permuted
# term may already have canonical dummy ordering. Then
# substitute_dummies() does nothing. However, the other
# term, if it exists, will be able to match with us.
permuted1 = permuted
permuted = substitute_dummies(permuted)
if permuted1 == permuted:
on_hold.add(term)
elif permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
new_terms.add(term)
terms = new_terms | on_hold
return Add(*terms)
return expr
|
de92d06aa653805bfc1eac5312245231785df4166d0b2c22ab377bdd2beef5be | from sympy import Symbol, Number, sympify
from sympy import MutableDenseNDimArray, S
from sympy.tensor.tensor import (Tensor, TensExpr, TensAdd, TensMul,
TensorIndex)
class PartialDerivative(TensExpr):
"""
Partial derivative for tensor expressions.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead
>>> from sympy.tensor.toperators import PartialDerivative
>>> from sympy import symbols
>>> L = TensorIndexType("L")
>>> A = TensorHead("A", [L])
>>> i, j = symbols("i j")
>>> expr = PartialDerivative(A(i), A(j))
>>> expr
PartialDerivative(A(i), A(j))
The ``PartialDerivative`` object behaves like a tensorial expression:
>>> expr.get_indices()
[i, -j]
Indices can be contracted:
>>> expr = PartialDerivative(A(i), A(i))
>>> expr
PartialDerivative(A(L_0), A(L_0))
>>> expr.get_indices()
[L_0, -L_0]
"""
def __new__(cls, expr, *variables):
# Flatten:
if isinstance(expr, PartialDerivative):
variables = expr.variables + variables
expr = expr.expr
args, indices, free, dum = cls._contract_indices_for_derivative(
S(expr), variables)
obj = TensExpr.__new__(cls, *args)
obj._indices = indices
obj._free = free
obj._dum = dum
return obj
@property
def coeff(self):
return S.One
@property
def nocoeff(self):
return self
@classmethod
def _contract_indices_for_derivative(cls, expr, variables):
variables_opposite_valence = []
for i in variables:
if isinstance(i, Tensor):
i_free_indices = i.get_free_indices()
variables_opposite_valence.append(
i.xreplace({k: -k for k in i_free_indices}))
elif isinstance(i, Symbol):
variables_opposite_valence.append(i)
args, indices, free, dum = TensMul._tensMul_contract_indices(
[expr] + variables_opposite_valence, replace_indices=True)
for i in range(1, len(args)):
args_i = args[i]
if isinstance(args_i, Tensor):
i_indices = args[i].get_free_indices()
args[i] = args[i].xreplace({k: -k for k in i_indices})
return args, indices, free, dum
def doit(self):
args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables)
obj = self.func(*args)
obj._indices = indices
obj._free = free
obj._dum = dum
return obj
def _expand_partial_derivative(self):
args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables)
obj = self.func(*args)
obj._indices = indices
obj._free = free
obj._dum = dum
result = obj
if not args[0].free_symbols:
return S.Zero
elif isinstance(obj.expr, TensAdd):
# take care of sums of multi PDs
result = obj.expr.func(*[
self.func(a, *obj.variables)._expand_partial_derivative()
for a in result.expr.args])
elif isinstance(obj.expr, TensMul):
# take care of products of multi PDs
if len(obj.variables) == 1:
# derivative with respect to single variable
terms = []
mulargs = list(obj.expr.args)
for ind in range(len(mulargs)):
if not isinstance(sympify(mulargs[ind]), Number):
# a number coefficient is not considered for
# expansion of PartialDerivative
d = self.func(mulargs[ind], *obj.variables)._expand_partial_derivative()
terms.append(TensMul(*(mulargs[:ind]
+ [d]
+ mulargs[(ind + 1):])))
result = TensAdd.fromiter(terms)
else:
# derivative with respect to multiple variables
# decompose:
# partial(expr, (u, v))
# = partial(partial(expr, u).doit(), v).doit()
result = obj.expr # init with expr
for v in obj.variables:
result = self.func(result, v)._expand_partial_derivative()
# then throw PD on it
return result
def _perform_derivative(self):
result = self.expr
for v in self.variables:
if isinstance(result, TensExpr):
result = result._eval_partial_derivative(v)
else:
if v._diff_wrt:
result = result._eval_derivative(v)
else:
result = S.Zero
return result
def get_indices(self):
return self._indices
def get_free_indices(self):
free = sorted(self._free, key=lambda x: x[1])
return [i[0] for i in free]
def _replace_indices(self, repl):
expr = self.expr.xreplace(repl)
mirrored = {-k: -v for k, v in repl.items()}
variables = [i.xreplace(mirrored) for i in self.variables]
return self.func(expr, *variables)
@property
def expr(self):
return self.args[0]
@property
def variables(self):
return self.args[1:]
def _extract_data(self, replacement_dict):
from .array import derive_by_array, tensorcontraction
indices, array = self.expr._extract_data(replacement_dict)
for variable in self.variables:
var_indices, var_array = variable._extract_data(replacement_dict)
var_indices = [-i for i in var_indices]
coeff_array, var_array = zip(*[i.as_coeff_Mul() for i in var_array])
array = derive_by_array(array, var_array)
array = array.as_mutable() # type: MutableDenseNDimArray
varindex = var_indices[0] # type: TensorIndex
# Remove coefficients of base vector:
coeff_index = [0] + [slice(None) for i in range(len(indices))]
for i, coeff in enumerate(coeff_array):
coeff_index[0] = i
array[tuple(coeff_index)] /= coeff
if -varindex in indices:
pos = indices.index(-varindex)
array = tensorcontraction(array, (0, pos+1))
indices.pop(pos)
else:
indices.append(varindex)
return indices, array
|
ac2dbaf0eccb9c4a774fb3860adffb751165d794d1a6a2bad20af4bbdf08802f | """
This module defines tensors with abstract index notation.
The abstract index notation has been first formalized by Penrose.
Tensor indices are formal objects, with a tensor type; there is no
notion of index range, it is only possible to assign the dimension,
used to trace the Kronecker delta; the dimension can be a Symbol.
The Einstein summation convention is used.
The covariant indices are indicated with a minus sign in front of the index.
For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c``
contracted.
A tensor expression ``t`` can be called; called with its
indices in sorted order it is equal to itself:
in the above example ``t(a, b) == t``;
one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``.
The contracted indices are dummy indices, internally they have no name,
the indices being represented by a graph-like structure.
Tensors are put in canonical form using ``canon_bp``, which uses
the Butler-Portugal algorithm for canonicalization using the monoterm
symmetries of the tensors.
If there is a (anti)symmetric metric, the indices can be raised and
lowered when the tensor is put in canonical form.
"""
from __future__ import print_function, division
from typing import Any, Dict as tDict, List, Set
from abc import abstractmethod, ABCMeta
from collections import defaultdict
import operator
import itertools
from sympy import Rational, prod, Integer, default_sort_key
from sympy.combinatorics import Permutation
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \
bsgs_direct_product, canonicalize, riemann_bsgs
from sympy.core import Basic, Expr, sympify, Add, Mul, S
from sympy.core.assumptions import ManagedProperties
from sympy.core.compatibility import reduce, SYMPY_INTS
from sympy.core.containers import Tuple, Dict
from sympy.core.decorators import deprecated
from sympy.core.symbol import Symbol, symbols
from sympy.core.sympify import CantSympify, _sympify
from sympy.core.operations import AssocOp
from sympy.matrices import eye
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.decorator import memoize_property
import warnings
@deprecated(useinstead=".replace_with_arrays", issue=15276, deprecated_since_version="1.4")
def deprecate_data():
pass
@deprecated(useinstead=".substitute_indices()", issue=17515,
deprecated_since_version="1.5")
def deprecate_fun_eval():
pass
@deprecated(useinstead="tensor_heads()", issue=17108,
deprecated_since_version="1.5")
def deprecate_TensorType():
pass
class _IndexStructure(CantSympify):
"""
This class handles the indices (free and dummy ones). It contains the
algorithms to manage the dummy indices replacements and contractions of
free indices under multiplications of tensor expressions, as well as stuff
related to canonicalization sorting, getting the permutation of the
expression and so on. It also includes tools to get the ``TensorIndex``
objects corresponding to the given index structure.
"""
def __init__(self, free, dum, index_types, indices, canon_bp=False):
self.free = free
self.dum = dum
self.index_types = index_types
self.indices = indices
self._ext_rank = len(self.free) + 2*len(self.dum)
self.dum.sort(key=lambda x: x[0])
@staticmethod
def from_indices(*indices):
"""
Create a new ``_IndexStructure`` object from a list of ``indices``
``indices`` ``TensorIndex`` objects, the indices. Contractions are
detected upon construction.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, _IndexStructure
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> _IndexStructure.from_indices(m0, m1, -m1, m3)
_IndexStructure([(m0, 0), (m3, 3)], [(1, 2)], [Lorentz, Lorentz, Lorentz, Lorentz])
"""
free, dum = _IndexStructure._free_dum_from_indices(*indices)
index_types = [i.tensor_index_type for i in indices]
indices = _IndexStructure._replace_dummy_names(indices, free, dum)
return _IndexStructure(free, dum, index_types, indices)
@staticmethod
def from_components_free_dum(components, free, dum):
index_types = []
for component in components:
index_types.extend(component.index_types)
indices = _IndexStructure.generate_indices_from_free_dum_index_types(free, dum, index_types)
return _IndexStructure(free, dum, index_types, indices)
@staticmethod
def _free_dum_from_indices(*indices):
"""
Convert ``indices`` into ``free``, ``dum`` for single component tensor
``free`` list of tuples ``(index, pos, 0)``,
where ``pos`` is the position of index in
the list of indices formed by the component tensors
``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, \
_IndexStructure
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> _IndexStructure._free_dum_from_indices(m0, m1, -m1, m3)
([(m0, 0), (m3, 3)], [(1, 2)])
"""
n = len(indices)
if n == 1:
return [(indices[0], 0)], []
# find the positions of the free indices and of the dummy indices
free = [True]*len(indices)
index_dict = {}
dum = []
for i, index in enumerate(indices):
name = index.name
typ = index.tensor_index_type
contr = index.is_up
if (name, typ) in index_dict:
# found a pair of dummy indices
is_contr, pos = index_dict[(name, typ)]
# check consistency and update free
if is_contr:
if contr:
raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i))
else:
free[pos] = False
free[i] = False
else:
if contr:
free[pos] = False
free[i] = False
else:
raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i))
if contr:
dum.append((i, pos))
else:
dum.append((pos, i))
else:
index_dict[(name, typ)] = index.is_up, i
free = [(index, i) for i, index in enumerate(indices) if free[i]]
free.sort()
return free, dum
def get_indices(self):
"""
Get a list of indices, creating new tensor indices to complete dummy indices.
"""
return self.indices[:]
@staticmethod
def generate_indices_from_free_dum_index_types(free, dum, index_types):
indices = [None]*(len(free)+2*len(dum))
for idx, pos in free:
indices[pos] = idx
generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free)
for pos1, pos2 in dum:
typ1 = index_types[pos1]
indname = generate_dummy_name(typ1)
indices[pos1] = TensorIndex(indname, typ1, True)
indices[pos2] = TensorIndex(indname, typ1, False)
return _IndexStructure._replace_dummy_names(indices, free, dum)
@staticmethod
def _get_generator_for_dummy_indices(free):
cdt = defaultdict(int)
# if the free indices have names with dummy_name, start with an
# index higher than those for the dummy indices
# to avoid name collisions
for indx, ipos in free:
if indx.name.split('_')[0] == indx.tensor_index_type.dummy_name:
cdt[indx.tensor_index_type] = max(cdt[indx.tensor_index_type], int(indx.name.split('_')[1]) + 1)
def dummy_name_gen(tensor_index_type):
nd = str(cdt[tensor_index_type])
cdt[tensor_index_type] += 1
return tensor_index_type.dummy_name + '_' + nd
return dummy_name_gen
@staticmethod
def _replace_dummy_names(indices, free, dum):
dum.sort(key=lambda x: x[0])
new_indices = [ind for ind in indices]
assert len(indices) == len(free) + 2*len(dum)
generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free)
for ipos1, ipos2 in dum:
typ1 = new_indices[ipos1].tensor_index_type
indname = generate_dummy_name(typ1)
new_indices[ipos1] = TensorIndex(indname, typ1, True)
new_indices[ipos2] = TensorIndex(indname, typ1, False)
return new_indices
def get_free_indices(self): # type: () -> List[TensorIndex]
"""
Get a list of free indices.
"""
# get sorted indices according to their position:
free = sorted(self.free, key=lambda x: x[1])
return [i[0] for i in free]
def __str__(self):
return "_IndexStructure({0}, {1}, {2})".format(self.free, self.dum, self.index_types)
def __repr__(self):
return self.__str__()
def _get_sorted_free_indices_for_canon(self):
sorted_free = self.free[:]
sorted_free.sort(key=lambda x: x[0])
return sorted_free
def _get_sorted_dum_indices_for_canon(self):
return sorted(self.dum, key=lambda x: x[0])
def _get_lexicographically_sorted_index_types(self):
permutation = self.indices_canon_args()[0]
index_types = [None]*self._ext_rank
for i, it in enumerate(self.index_types):
index_types[permutation(i)] = it
return index_types
def _get_lexicographically_sorted_indices(self):
permutation = self.indices_canon_args()[0]
indices = [None]*self._ext_rank
for i, it in enumerate(self.indices):
indices[permutation(i)] = it
return indices
def perm2tensor(self, g, is_canon_bp=False):
"""
Returns a ``_IndexStructure`` instance corresponding to the permutation ``g``
``g`` permutation corresponding to the tensor in the representation
used in canonicalization
``is_canon_bp`` if True, then ``g`` is the permutation
corresponding to the canonical form of the tensor
"""
sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()]
lex_index_types = self._get_lexicographically_sorted_index_types()
lex_indices = self._get_lexicographically_sorted_indices()
nfree = len(sorted_free)
rank = self._ext_rank
dum = [[None]*2 for i in range((rank - nfree)//2)]
free = []
index_types = [None]*rank
indices = [None]*rank
for i in range(rank):
gi = g[i]
index_types[i] = lex_index_types[gi]
indices[i] = lex_indices[gi]
if gi < nfree:
ind = sorted_free[gi]
assert index_types[i] == sorted_free[gi].tensor_index_type
free.append((ind, i))
else:
j = gi - nfree
idum, cov = divmod(j, 2)
if cov:
dum[idum][1] = i
else:
dum[idum][0] = i
dum = [tuple(x) for x in dum]
return _IndexStructure(free, dum, index_types, indices)
def indices_canon_args(self):
"""
Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize``
see ``canonicalize`` in ``tensor_can.py`` in combinatorics module
"""
# to be called after sorted_components
from sympy.combinatorics.permutations import _af_new
n = self._ext_rank
g = [None]*n + [n, n+1]
# Converts the symmetry of the metric into msym from .canonicalize()
# method in the combinatorics module
def metric_symmetry_to_msym(metric):
if metric is None:
return None
sym = metric.symmetry
if sym == TensorSymmetry.fully_symmetric(2):
return 0
if sym == TensorSymmetry.fully_symmetric(-2):
return 1
return None
# ordered indices: first the free indices, ordered by types
# then the dummy indices, ordered by types and contravariant before
# covariant
# g[position in tensor] = position in ordered indices
for i, (indx, ipos) in enumerate(self._get_sorted_free_indices_for_canon()):
g[ipos] = i
pos = len(self.free)
j = len(self.free)
dummies = []
prev = None
a = []
msym = []
for ipos1, ipos2 in self._get_sorted_dum_indices_for_canon():
g[ipos1] = j
g[ipos2] = j + 1
j += 2
typ = self.index_types[ipos1]
if typ != prev:
if a:
dummies.append(a)
a = [pos, pos + 1]
prev = typ
msym.append(metric_symmetry_to_msym(typ.metric))
else:
a.extend([pos, pos + 1])
pos += 2
if a:
dummies.append(a)
return _af_new(g), dummies, msym
def components_canon_args(components):
numtyp = []
prev = None
for t in components:
if t == prev:
numtyp[-1][1] += 1
else:
prev = t
numtyp.append([prev, 1])
v = []
for h, n in numtyp:
if h.comm == 0 or h.comm == 1:
comm = h.comm
else:
comm = TensorManager.get_comm(h.comm, h.comm)
v.append((h.symmetry.base, h.symmetry.generators, n, comm))
return v
class _TensorDataLazyEvaluator(CantSympify):
"""
EXPERIMENTAL: do not rely on this class, it may change without deprecation
warnings in future versions of SymPy.
This object contains the logic to associate components data to a tensor
expression. Components data are set via the ``.data`` property of tensor
expressions, is stored inside this class as a mapping between the tensor
expression and the ``ndarray``.
Computations are executed lazily: whereas the tensor expressions can have
contractions, tensor products, and additions, components data are not
computed until they are accessed by reading the ``.data`` property
associated to the tensor expression.
"""
_substitutions_dict = dict() # type: tDict[Any, Any]
_substitutions_dict_tensmul = dict() # type: tDict[Any, Any]
def __getitem__(self, key):
dat = self._get(key)
if dat is None:
return None
from .array import NDimArray
if not isinstance(dat, NDimArray):
return dat
if dat.rank() == 0:
return dat[()]
elif dat.rank() == 1 and len(dat) == 1:
return dat[0]
return dat
def _get(self, key):
"""
Retrieve ``data`` associated with ``key``.
This algorithm looks into ``self._substitutions_dict`` for all
``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a
TensorHead instance). It reconstructs the components data that the
tensor expression should have by performing on components data the
operations that correspond to the abstract tensor operations applied.
Metric tensor is handled in a different manner: it is pre-computed in
``self._substitutions_dict_tensmul``.
"""
if key in self._substitutions_dict:
return self._substitutions_dict[key]
if isinstance(key, TensorHead):
return None
if isinstance(key, Tensor):
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in key.get_indices()])
srch = (key.component,) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
array_list = [self.data_from_tensor(key)]
return self.data_contract_dum(array_list, key.dum, key.ext_rank)
if isinstance(key, TensMul):
tensmul_args = key.args
if len(tensmul_args) == 1 and len(tensmul_args[0].components) == 1:
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in tensmul_args[0].get_indices()])
srch = (tensmul_args[0].components[0],) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
#data_list = [self.data_from_tensor(i) for i in tensmul_args if isinstance(i, TensExpr)]
data_list = [self.data_from_tensor(i) if isinstance(i, Tensor) else i.data for i in tensmul_args if isinstance(i, TensExpr)]
coeff = prod([i for i in tensmul_args if not isinstance(i, TensExpr)])
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
data_result = self.data_contract_dum(data_list, key.dum, key.ext_rank)
return coeff*data_result
if isinstance(key, TensAdd):
data_list = []
free_args_list = []
for arg in key.args:
if isinstance(arg, TensExpr):
data_list.append(arg.data)
free_args_list.append([x[0] for x in arg.free])
else:
data_list.append(arg)
free_args_list.append([])
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
sum_list = []
from .array import permutedims
for data, free_args in zip(data_list, free_args_list):
if len(free_args) < 2:
sum_list.append(data)
else:
free_args_pos = {y: x for x, y in enumerate(free_args)}
axes = [free_args_pos[arg] for arg in key.free_args]
sum_list.append(permutedims(data, axes))
return reduce(lambda x, y: x+y, sum_list)
return None
@staticmethod
def data_contract_dum(ndarray_list, dum, ext_rank):
from .array import tensorproduct, tensorcontraction, MutableDenseNDimArray
arrays = list(map(MutableDenseNDimArray, ndarray_list))
prodarr = tensorproduct(*arrays)
return tensorcontraction(prodarr, *dum)
def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead):
"""
This method is used when assigning components data to a ``TensMul``
object, it converts components data to a fully contravariant ndarray,
which is then stored according to the ``TensorHead`` key.
"""
if data is None:
return None
return self._correct_signature_from_indices(
data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum,
True)
def data_from_tensor(self, tensor):
"""
This method corrects the components data to the right signature
(covariant/contravariant) using the metric associated with each
``TensorIndexType``.
"""
tensorhead = tensor.component
if tensorhead.data is None:
return None
return self._correct_signature_from_indices(
tensorhead.data,
tensor.get_indices(),
tensor.free,
tensor.dum)
def _assign_data_to_tensor_expr(self, key, data):
if isinstance(key, TensAdd):
raise ValueError('cannot assign data to TensAdd')
# here it is assumed that `key` is a `TensMul` instance.
if len(key.components) != 1:
raise ValueError('cannot assign data to TensMul with multiple components')
tensorhead = key.components[0]
newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead)
return tensorhead, newdata
def _check_permutations_on_data(self, tens, data):
from .array import permutedims
from .array.arrayop import Flatten
if isinstance(tens, TensorHead):
rank = tens.rank
generators = tens.symmetry.generators
elif isinstance(tens, Tensor):
rank = tens.rank
generators = tens.components[0].symmetry.generators
elif isinstance(tens, TensorIndexType):
rank = tens.metric.rank
generators = tens.metric.symmetry.generators
# Every generator is a permutation, check that by permuting the array
# by that permutation, the array will be the same, except for a
# possible sign change if the permutation admits it.
for gener in generators:
sign_change = +1 if (gener(rank) == rank) else -1
data_swapped = data
last_data = data
permute_axes = list(map(gener, list(range(rank))))
# the order of a permutation is the number of times to get the
# identity by applying that permutation.
for i in range(gener.order()-1):
data_swapped = permutedims(data_swapped, permute_axes)
# if any value in the difference array is non-zero, raise an error:
if any(Flatten(last_data - sign_change*data_swapped)):
raise ValueError("Component data symmetry structure error")
last_data = data_swapped
def __setitem__(self, key, value):
"""
Set the components data of a tensor object/expression.
Components data are transformed to the all-contravariant form and stored
with the corresponding ``TensorHead`` object. If a ``TensorHead`` object
cannot be uniquely identified, it will raise an error.
"""
data = _TensorDataLazyEvaluator.parse_data(value)
self._check_permutations_on_data(key, data)
# TensorHead and TensorIndexType can be assigned data directly, while
# TensMul must first convert data to a fully contravariant form, and
# assign it to its corresponding TensorHead single component.
if not isinstance(key, (TensorHead, TensorIndexType)):
key, data = self._assign_data_to_tensor_expr(key, data)
if isinstance(key, TensorHead):
for dim, indextype in zip(data.shape, key.index_types):
if indextype.data is None:
raise ValueError("index type {} has no components data"\
" associated (needed to raise/lower index)".format(indextype))
if not indextype.dim.is_number:
continue
if dim != indextype.dim:
raise ValueError("wrong dimension of ndarray")
self._substitutions_dict[key] = data
def __delitem__(self, key):
del self._substitutions_dict[key]
def __contains__(self, key):
return key in self._substitutions_dict
def add_metric_data(self, metric, data):
"""
Assign data to the ``metric`` tensor. The metric tensor behaves in an
anomalous way when raising and lowering indices.
A fully covariant metric is the inverse transpose of the fully
contravariant metric (it is meant matrix inverse). If the metric is
symmetric, the transpose is not necessary and mixed
covariant/contravariant metrics are Kronecker deltas.
"""
# hard assignment, data should not be added to `TensorHead` for metric:
# the problem with `TensorHead` is that the metric is anomalous, i.e.
# raising and lowering the index means considering the metric or its
# inverse, this is not the case for other tensors.
self._substitutions_dict_tensmul[metric, True, True] = data
inverse_transpose = self.inverse_transpose_matrix(data)
# in symmetric spaces, the transpose is the same as the original matrix,
# the full covariant metric tensor is the inverse transpose, so this
# code will be able to handle non-symmetric metrics.
self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose
# now mixed cases, these are identical to the unit matrix if the metric
# is symmetric.
m = data.tomatrix()
invt = inverse_transpose.tomatrix()
self._substitutions_dict_tensmul[metric, True, False] = m * invt
self._substitutions_dict_tensmul[metric, False, True] = invt * m
@staticmethod
def _flip_index_by_metric(data, metric, pos):
from .array import tensorproduct, tensorcontraction
mdim = metric.rank()
ddim = data.rank()
if pos == 0:
data = tensorcontraction(
tensorproduct(
metric,
data
),
(1, mdim+pos)
)
else:
data = tensorcontraction(
tensorproduct(
data,
metric
),
(pos, ddim)
)
return data
@staticmethod
def inverse_matrix(ndarray):
m = ndarray.tomatrix().inv()
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def inverse_transpose_matrix(ndarray):
m = ndarray.tomatrix().inv().T
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def _correct_signature_from_indices(data, indices, free, dum, inverse=False):
"""
Utility function to correct the values inside the components data
ndarray according to whether indices are covariant or contravariant.
It uses the metric matrix to lower values of covariant indices.
"""
# change the ndarray values according covariantness/contravariantness of the indices
# use the metric
for i, indx in enumerate(indices):
if not indx.is_up and not inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx.tensor_index_type.data, i)
elif not indx.is_up and inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(
data,
_TensorDataLazyEvaluator.inverse_matrix(indx.tensor_index_type.data),
i
)
return data
@staticmethod
def _sort_data_axes(old, new):
from .array import permutedims
new_data = old.data.copy()
old_free = [i[0] for i in old.free]
new_free = [i[0] for i in new.free]
for i in range(len(new_free)):
for j in range(i, len(old_free)):
if old_free[j] == new_free[i]:
old_free[i], old_free[j] = old_free[j], old_free[i]
new_data = permutedims(new_data, (i, j))
break
return new_data
@staticmethod
def add_rearrange_tensmul_parts(new_tensmul, old_tensmul):
def sorted_compo():
return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul)
_TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo()
@staticmethod
def parse_data(data):
"""
Transform ``data`` to array. The parameter ``data`` may
contain data in various formats, e.g. nested lists, sympy ``Matrix``,
and so on.
Examples
========
>>> from sympy.tensor.tensor import _TensorDataLazyEvaluator
>>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12])
[1, 3, -6, 12]
>>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]])
[[1, 2], [4, 7]]
"""
from .array import MutableDenseNDimArray
if not isinstance(data, MutableDenseNDimArray):
if len(data) == 2 and hasattr(data[0], '__call__'):
data = MutableDenseNDimArray(data[0], data[1])
else:
data = MutableDenseNDimArray(data)
return data
_tensor_data_substitution_dict = _TensorDataLazyEvaluator()
class _TensorManager(object):
"""
Class to manage tensor properties.
Notes
=====
Tensors belong to tensor commutation groups; each group has a label
``comm``; there are predefined labels:
``0`` tensors commuting with any other tensor
``1`` tensors anticommuting among themselves
``2`` tensors not commuting, apart with those with ``comm=0``
Other groups can be defined using ``set_comm``; tensors in those
groups commute with those with ``comm=0``; by default they
do not commute with any other group.
"""
def __init__(self):
self._comm_init()
def _comm_init(self):
self._comm = [{} for i in range(3)]
for i in range(3):
self._comm[0][i] = 0
self._comm[i][0] = 0
self._comm[1][1] = 1
self._comm[2][1] = None
self._comm[1][2] = None
self._comm_symbols2i = {0:0, 1:1, 2:2}
self._comm_i2symbol = {0:0, 1:1, 2:2}
@property
def comm(self):
return self._comm
def comm_symbols2i(self, i):
"""
get the commutation group number corresponding to ``i``
``i`` can be a symbol or a number or a string
If ``i`` is not already defined its commutation group number
is set.
"""
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
return n
return self._comm_symbols2i[i]
def comm_i2symbol(self, i):
"""
Returns the symbol corresponding to the commutation group number.
"""
return self._comm_i2symbol[i]
def set_comm(self, i, j, c):
"""
set the commutation parameter ``c`` for commutation groups ``i, j``
Parameters
==========
i, j : symbols representing commutation groups
c : group commutation number
Notes
=====
``i, j`` can be symbols, strings or numbers,
apart from ``0, 1`` and ``2`` which are reserved respectively
for commuting, anticommuting tensors and tensors not commuting
with any other group apart with the commuting tensors.
For the remaining cases, use this method to set the commutation rules;
by default ``c=None``.
The group commutation number ``c`` is assigned in correspondence
to the group commutation symbols; it can be
0 commuting
1 anticommuting
None no commutation property
Examples
========
``G`` and ``GH`` do not commute with themselves and commute with
each other; A is commuting.
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorManager, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> A = TensorHead('A', [Lorentz])
>>> G = TensorHead('G', [Lorentz], TensorSymmetry.no_symmetry(1), 'Gcomm')
>>> GH = TensorHead('GH', [Lorentz], TensorSymmetry.no_symmetry(1), 'GHcomm')
>>> TensorManager.set_comm('Gcomm', 'GHcomm', 0)
>>> (GH(i1)*G(i0)).canon_bp()
G(i0)*GH(i1)
>>> (G(i1)*G(i0)).canon_bp()
G(i1)*G(i0)
>>> (G(i1)*A(i0)).canon_bp()
A(i0)*G(i1)
"""
if c not in (0, 1, None):
raise ValueError('`c` can assume only the values 0, 1 or None')
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
if j not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[0][n] = 0
self._comm[n][0] = 0
self._comm_symbols2i[j] = n
self._comm_i2symbol[n] = j
ni = self._comm_symbols2i[i]
nj = self._comm_symbols2i[j]
self._comm[ni][nj] = c
self._comm[nj][ni] = c
def set_comms(self, *args):
"""
set the commutation group numbers ``c`` for symbols ``i, j``
Parameters
==========
args : sequence of ``(i, j, c)``
"""
for i, j, c in args:
self.set_comm(i, j, c)
def get_comm(self, i, j):
"""
Return the commutation parameter for commutation group numbers ``i, j``
see ``_TensorManager.set_comm``
"""
return self._comm[i].get(j, 0 if i == 0 or j == 0 else None)
def clear(self):
"""
Clear the TensorManager.
"""
self._comm_init()
TensorManager = _TensorManager()
class TensorIndexType(Basic):
"""
A TensorIndexType is characterized by its name and its metric.
Parameters
==========
name : name of the tensor type
dummy_name : name of the head of dummy indices
dim : dimension, it can be a symbol or an integer or ``None``
eps_dim : dimension of the epsilon tensor
metric_symmetry : integer that denotes metric symmetry or `None` for no metirc
metric_name : string with the name of the metric tensor
Attributes
==========
``metric`` : the metric tensor
``delta`` : ``Kronecker delta``
``epsilon`` : the ``Levi-Civita epsilon`` tensor
``data`` : (deprecated) a property to add ``ndarray`` values, to work in a specified basis.
Notes
=====
The possible values of the `metric_symmetry` parameter are:
``1`` : metric tensor is fully symmetric
``0`` : metric tensor possesses no index symmetry
``-1`` : metric tensor is fully antisymmetric
``None``: there is no metric tensor (metric equals to `None`)
The metric is assumed to be symmetric by default. It can also be set
to a custom tensor by the `.set_metric()` method.
If there is a metric the metric is used to raise and lower indices.
In the case of non-symmetric metric, the following raising and
lowering conventions will be adopted:
``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)``
From these it is easy to find:
``g(-a, b) = delta(-a, b)``
where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta``
(see ``TensorIndex`` for the conventions on indices).
For antisymmetric metrics there is also the following equality:
``g(a, -b) = -delta(a, -b)``
If there is no metric it is not possible to raise or lower indices;
e.g. the index of the defining representation of ``SU(N)``
is 'covariant' and the conjugate representation is
'contravariant'; for ``N > 2`` they are linearly independent.
``eps_dim`` is by default equal to ``dim``, if the latter is an integer;
else it can be assigned (for use in naive dimensional regularization);
if ``eps_dim`` is not an integer ``epsilon`` is ``None``.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> Lorentz.metric
metric(Lorentz,Lorentz)
"""
def __new__(cls, name, dummy_name=None, dim=None, eps_dim=None,
metric_symmetry=1, metric_name='metric', **kwargs):
if 'dummy_fmt' in kwargs:
SymPyDeprecationWarning(useinstead="dummy_name",
feature="dummy_fmt", issue=17517,
deprecated_since_version="1.5").warn()
dummy_name = kwargs.get('dummy_fmt')
if isinstance(name, str):
name = Symbol(name)
if dummy_name is None:
dummy_name = str(name)[0]
if isinstance(dummy_name, str):
dummy_name = Symbol(dummy_name)
if dim is None:
dim = Symbol("dim_" + dummy_name.name)
else:
dim = sympify(dim)
if eps_dim is None:
eps_dim = dim
else:
eps_dim = sympify(eps_dim)
metric_symmetry = sympify(metric_symmetry)
if isinstance(metric_name, str):
metric_name = Symbol(metric_name)
if 'metric' in kwargs:
SymPyDeprecationWarning(useinstead="metric_symmetry or .set_metric()",
feature="metric argument", issue=17517,
deprecated_since_version="1.5").warn()
metric = kwargs.get('metric')
if metric is not None:
if metric in (True, False, 0, 1):
metric_name = 'metric'
#metric_antisym = metric
else:
metric_name = metric.name
#metric_antisym = metric.antisym
if metric:
metric_symmetry = -1
else:
metric_symmetry = 1
obj = Basic.__new__(cls, name, dummy_name, dim, eps_dim,
metric_symmetry, metric_name)
obj._autogenerated = []
return obj
@property
def name(self):
return self.args[0].name
@property
def dummy_name(self):
return self.args[1].name
@property
def dim(self):
return self.args[2]
@property
def eps_dim(self):
return self.args[3]
@memoize_property
def metric(self):
metric_symmetry = self.args[4]
metric_name = self.args[5]
if metric_symmetry is None:
return None
if metric_symmetry == 0:
symmetry = TensorSymmetry.no_symmetry(2)
elif metric_symmetry == 1:
symmetry = TensorSymmetry.fully_symmetric(2)
elif metric_symmetry == -1:
symmetry = TensorSymmetry.fully_symmetric(-2)
return TensorHead(metric_name, [self]*2, symmetry)
@memoize_property
def delta(self):
return TensorHead('KD', [self]*2, TensorSymmetry.fully_symmetric(2))
@memoize_property
def epsilon(self):
if not isinstance(self.eps_dim, (SYMPY_INTS, Integer)):
return None
symmetry = TensorSymmetry.fully_symmetric(-self.eps_dim)
return TensorHead('Eps', [self]*self.eps_dim, symmetry)
def set_metric(self, tensor):
self._metric = tensor
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return self.name
__repr__ = __str__
# Everything below this line is deprecated
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
deprecate_data()
# This assignment is a bit controversial, should metric components be assigned
# to the metric only or also to the TensorIndexType object? The advantage here
# is the ability to assign a 1D array and transform it to a 2D diagonal array.
from .array import MutableDenseNDimArray
data = _TensorDataLazyEvaluator.parse_data(data)
if data.rank() > 2:
raise ValueError("data have to be of rank 1 (diagonal metric) or 2.")
if data.rank() == 1:
if self.dim.is_number:
nda_dim = data.shape[0]
if nda_dim != self.dim:
raise ValueError("Dimension mismatch")
dim = data.shape[0]
newndarray = MutableDenseNDimArray.zeros(dim, dim)
for i, val in enumerate(data):
newndarray[i, i] = val
data = newndarray
dim1, dim2 = data.shape
if dim1 != dim2:
raise ValueError("Non-square matrix tensor.")
if self.dim.is_number:
if self.dim != dim1:
raise ValueError("Dimension mismatch")
_tensor_data_substitution_dict[self] = data
_tensor_data_substitution_dict.add_metric_data(self.metric, data)
delta = self.get_kronecker_delta()
i1 = TensorIndex('i1', self)
i2 = TensorIndex('i2', self)
delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1))
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
@deprecated(useinstead=".delta", issue=17517,
deprecated_since_version="1.5")
def get_kronecker_delta(self):
sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
delta = TensorHead('KD', [self]*2, sym2)
return delta
@deprecated(useinstead=".delta", issue=17517,
deprecated_since_version="1.5")
def get_epsilon(self):
if not isinstance(self._eps_dim, (SYMPY_INTS, Integer)):
return None
sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1))
epsilon = TensorHead('Eps', [self]*self._eps_dim, sym)
return epsilon
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
This destroys components data associated to the ``TensorIndexType``, if
any, specifically:
* metric tensor data
* Kronecker tensor data
"""
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def delete_tensmul_data(key):
if key in _tensor_data_substitution_dict._substitutions_dict_tensmul:
del _tensor_data_substitution_dict._substitutions_dict_tensmul[key]
# delete metric data:
delete_tensmul_data((self.metric, True, True))
delete_tensmul_data((self.metric, True, False))
delete_tensmul_data((self.metric, False, True))
delete_tensmul_data((self.metric, False, False))
# delete delta tensor data:
delta = self.get_kronecker_delta()
if delta in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[delta]
class TensorIndex(Basic):
"""
Represents a tensor index
Parameters
==========
name : name of the index, or ``True`` if you want it to be automatically assigned
tensor_index_type : ``TensorIndexType`` of the index
is_up : flag for contravariant index (is_up=True by default)
Attributes
==========
``name``
``tensor_index_type``
``is_up``
Notes
=====
Tensor indices are contracted with the Einstein summation convention.
An index can be in contravariant or in covariant form; in the latter
case it is represented prepending a ``-`` to the index name. Adding
``-`` to a covariant (is_up=False) index makes it contravariant.
Dummy indices have a name with head given by
``tensor_inde_type.dummy_name`` with underscore and a number.
Similar to ``symbols`` multiple contravariant indices can be created
at once using ``tensor_indices(s, typ)``, where ``s`` is a string
of names.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorHead, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> mu = TensorIndex('mu', Lorentz, is_up=False)
>>> nu, rho = tensor_indices('nu, rho', Lorentz)
>>> A = TensorHead('A', [Lorentz, Lorentz])
>>> A(mu, nu)
A(-mu, nu)
>>> A(-mu, -rho)
A(mu, -rho)
>>> A(mu, -mu)
A(-L_0, L_0)
"""
def __new__(cls, name, tensor_index_type, is_up=True):
if isinstance(name, str):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
elif name is True:
name = "_i{0}".format(len(tensor_index_type._autogenerated))
name_symbol = Symbol(name)
tensor_index_type._autogenerated.append(name_symbol)
else:
raise ValueError("invalid name")
is_up = sympify(is_up)
return Basic.__new__(cls, name_symbol, tensor_index_type, is_up)
@property
def name(self):
return self.args[0].name
@property
def tensor_index_type(self):
return self.args[1]
@property
def is_up(self):
return self.args[2]
def _print(self):
s = self.name
if not self.is_up:
s = '-%s' % s
return s
def __lt__(self, other):
return ((self.tensor_index_type, self.name) <
(other.tensor_index_type, other.name))
def __neg__(self):
t1 = TensorIndex(self.name, self.tensor_index_type,
(not self.is_up))
return t1
def tensor_indices(s, typ):
"""
Returns list of tensor indices given their names and their types
Parameters
==========
s : string of comma separated names of indices
typ : ``TensorIndexType`` of the indices
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
"""
if isinstance(s, str):
a = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
tilist = [TensorIndex(i, typ) for i in a]
if len(tilist) == 1:
return tilist[0]
return tilist
class TensorSymmetry(Basic):
"""
Monoterm symmetry of a tensor (i.e. any symmetric or anti-symmetric
index permutation). For the relevant terminology see ``tensor_can.py``
section of the combinatorics module.
Parameters
==========
bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor
Attributes
==========
``base`` : base of the BSGS
``generators`` : generators of the BSGS
``rank`` : rank of the tensor
Notes
=====
A tensor can have an arbitrary monoterm symmetry provided by its BSGS.
Multiterm symmetries, like the cyclic symmetry of the Riemann tensor
(i.e., Bianchi identity), are not covered. See combinatorics module for
information on how to generate BSGS for a general index permutation group.
Simple symmetries can be generated using built-in methods.
See Also
========
sympy.combinatorics.tensor_can.get_symmetric_group_sgs
Examples
========
Define a symmetric tensor of rank 2
>>> from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorHead
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> sym = TensorSymmetry(get_symmetric_group_sgs(2))
>>> T = TensorHead('T', [Lorentz]*2, sym)
Note, that the same can also be done using built-in TensorSymmetry methods
>>> sym2 = TensorSymmetry.fully_symmetric(2)
>>> sym == sym2
True
"""
def __new__(cls, *args, **kw_args):
if len(args) == 1:
base, generators = args[0]
elif len(args) == 2:
base, generators = args
else:
raise TypeError("bsgs required, either two separate parameters or one tuple")
if not isinstance(base, Tuple):
base = Tuple(*base)
if not isinstance(generators, Tuple):
generators = Tuple(*generators)
return Basic.__new__(cls, base, generators, **kw_args)
@property
def base(self):
return self.args[0]
@property
def generators(self):
return self.args[1]
@property
def rank(self):
return self.generators[0].size - 2
@classmethod
def fully_symmetric(cls, rank):
"""
Returns a fully symmetric (antisymmetric if ``rank``<0)
TensorSymmetry object for ``abs(rank)`` indices.
"""
if rank > 0:
bsgs = get_symmetric_group_sgs(rank, False)
elif rank < 0:
bsgs = get_symmetric_group_sgs(-rank, True)
elif rank == 0:
bsgs = ([], [Permutation(1)])
return TensorSymmetry(bsgs)
@classmethod
def direct_product(cls, *args):
"""
Returns a TensorSymmetry object that is being a direct product of
fully (anti-)symmetric index permutation groups.
Notes
=====
Some examples for different values of ``(*args)``:
``(1)`` vector, equivalent to ``TensorSymmetry.fully_symmetric(1)``
``(2)`` tensor with 2 symmetric indices, equivalent to ``.fully_symmetric(2)``
``(-2)`` tensor with 2 antisymmetric indices, equivalent to ``.fully_symmetric(-2)``
``(2, -2)`` tensor with the first 2 indices commuting and the last 2 anticommuting
``(1, 1, 1)`` tensor with 3 indices without any symmetry
"""
base, sgs = [], [Permutation(1)]
for arg in args:
if arg > 0:
bsgs2 = get_symmetric_group_sgs(arg, False)
elif arg < 0:
bsgs2 = get_symmetric_group_sgs(-arg, True)
else:
continue
base, sgs = bsgs_direct_product(base, sgs, *bsgs2)
return TensorSymmetry(base, sgs)
@classmethod
def riemann(cls):
"""
Returns a monotorem symmetry of the Riemann tensor
"""
return TensorSymmetry(riemann_bsgs)
@classmethod
def no_symmetry(cls, rank):
"""
TensorSymmetry object for ``rank`` indices with no symmetry
"""
return TensorSymmetry([], [Permutation(rank+1)])
@deprecated(useinstead="TensorSymmetry class constructor and methods", issue=17108,
deprecated_since_version="1.5")
def tensorsymmetry(*args):
"""
Returns a ``TensorSymmetry`` object. This method is deprecated, use
``TensorSymmetry.direct_product()`` or ``.riemann()`` instead.
One can represent a tensor with any monoterm slot symmetry group
using a BSGS.
``args`` can be a BSGS
``args[0]`` base
``args[1]`` sgs
Usually tensors are in (direct products of) representations
of the symmetric group;
``args`` can be a list of lists representing the shapes of Young tableaux
Notes
=====
For instance:
``[[1]]`` vector
``[[1]*n]`` symmetric tensor of rank ``n``
``[[n]]`` antisymmetric tensor of rank ``n``
``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor
``[[1],[1]]`` vector*vector
``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector
Notice that with the shape ``[2, 2]`` we associate only the monoterm
symmetries of the Riemann tensor; this is an abuse of notation,
since the shape ``[2, 2]`` corresponds usually to the irreducible
representation characterized by the monoterm symmetries and by the
cyclic symmetry.
"""
from sympy.combinatorics import Permutation
def tableau2bsgs(a):
if len(a) == 1:
# antisymmetric vector
n = a[0]
bsgs = get_symmetric_group_sgs(n, 1)
else:
if all(x == 1 for x in a):
# symmetric vector
n = len(a)
bsgs = get_symmetric_group_sgs(n)
elif a == [2, 2]:
bsgs = riemann_bsgs
else:
raise NotImplementedError
return bsgs
if not args:
return TensorSymmetry(Tuple(), Tuple(Permutation(1)))
if len(args) == 2 and isinstance(args[1][0], Permutation):
return TensorSymmetry(args)
base, sgs = tableau2bsgs(args[0])
for a in args[1:]:
basex, sgsx = tableau2bsgs(a)
base, sgs = bsgs_direct_product(base, sgs, basex, sgsx)
return TensorSymmetry(Tuple(base, sgs))
class TensorType(Basic):
"""
Class of tensor types. Deprecated, use tensor_heads() instead.
Parameters
==========
index_types : list of ``TensorIndexType`` of the tensor indices
symmetry : ``TensorSymmetry`` of the tensor
Attributes
==========
``index_types``
``symmetry``
``types`` : list of ``TensorIndexType`` without repetitions
"""
is_commutative = False
def __new__(cls, index_types, symmetry, **kw_args):
deprecate_TensorType()
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args)
return obj
@property
def index_types(self):
return self.args[0]
@property
def symmetry(self):
return self.args[1]
@property
def types(self):
return sorted(set(self.index_types), key=lambda x: x.name)
def __str__(self):
return 'TensorType(%s)' % ([str(x) for x in self.index_types])
def __call__(self, s, comm=0):
"""
Return a TensorHead object or a list of TensorHead objects.
``s`` name or string of names
``comm``: commutation group number
see ``_TensorManager.set_comm``
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
if len(names) == 1:
return TensorHead(names[0], self.index_types, self.symmetry, comm)
else:
return [TensorHead(name, self.index_types, self.symmetry, comm) for name in names]
@deprecated(useinstead="TensorHead class constructor or tensor_heads()",
issue=17108, deprecated_since_version="1.5")
def tensorhead(name, typ, sym=None, comm=0):
"""
Function generating tensorhead(s). This method is deprecated,
use TensorHead constructor or tensor_heads() instead.
Parameters
==========
name : name or sequence of names (as in ``symbols``)
typ : index types
sym : same as ``*args`` in ``tensorsymmetry``
comm : commutation group number
see ``_TensorManager.set_comm``
"""
if sym is None:
sym = [[1] for i in range(len(typ))]
sym = tensorsymmetry(*sym)
return TensorHead(name, typ, sym, comm)
class TensorHead(Basic):
"""
Tensor head of the tensor
Parameters
==========
name : name of the tensor
index_types : list of TensorIndexType
symmetry : TensorSymmetry of the tensor
comm : commutation group number
Attributes
==========
``name``
``index_types``
``rank`` : total number of indices
``symmetry``
``comm`` : commutation group
Notes
=====
Similar to ``symbols`` multiple TensorHeads can be created using
``tensorhead(s, typ, sym=None, comm=0)`` function, where ``s``
is the string of names and ``sym`` is the monoterm tensor symmetry
(see ``tensorsymmetry``).
A ``TensorHead`` belongs to a commutation group, defined by a
symbol on number ``comm`` (see ``_TensorManager.set_comm``);
tensors in a commutation group have the same commutation properties;
by default ``comm`` is ``0``, the group of the commuting tensors.
Examples
========
Define a fully antisymmetric tensor of rank 2:
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> asym2 = TensorSymmetry.fully_symmetric(-2)
>>> A = TensorHead('A', [Lorentz, Lorentz], asym2)
Examples with ndarray values, the components data assigned to the
``TensorHead`` object are assumed to be in a fully-contravariant
representation. In case it is necessary to assign components data which
represents the values of a non-fully covariant tensor, see the other
examples.
>>> from sympy.tensor.tensor import tensor_indices
>>> from sympy import diag
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
Specify a replacement dictionary to keep track of the arrays to use for
replacements in the tensorial expression. The ``TensorIndexType`` is
associated to the metric used for contractions (in fully covariant form):
>>> repl = {Lorentz: diag(1, -1, -1, -1)}
Let's see some examples of working with components with the electromagnetic
tensor:
>>> from sympy import symbols
>>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z')
>>> c = symbols('c', positive=True)
Let's define `F`, an antisymmetric tensor:
>>> F = TensorHead('F', [Lorentz, Lorentz], asym2)
Let's update the dictionary to contain the matrix to use in the
replacements:
>>> repl.update({F(-i0, -i1): [
... [0, Ex/c, Ey/c, Ez/c],
... [-Ex/c, 0, -Bz, By],
... [-Ey/c, Bz, 0, -Bx],
... [-Ez/c, -By, Bx, 0]]})
Now it is possible to retrieve the contravariant form of the Electromagnetic
tensor:
>>> F(i0, i1).replace_with_arrays(repl, [i0, i1])
[[0, -E_x/c, -E_y/c, -E_z/c], [E_x/c, 0, -B_z, B_y], [E_y/c, B_z, 0, -B_x], [E_z/c, -B_y, B_x, 0]]
and the mixed contravariant-covariant form:
>>> F(i0, -i1).replace_with_arrays(repl, [i0, -i1])
[[0, E_x/c, E_y/c, E_z/c], [E_x/c, 0, B_z, -B_y], [E_y/c, -B_z, 0, B_x], [E_z/c, B_y, -B_x, 0]]
Energy-momentum of a particle may be represented as:
>>> from sympy import symbols
>>> P = TensorHead('P', [Lorentz], TensorSymmetry.no_symmetry(1))
>>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True)
>>> repl.update({P(i0): [E, px, py, pz]})
The contravariant and covariant components are, respectively:
>>> P(i0).replace_with_arrays(repl, [i0])
[E, p_x, p_y, p_z]
>>> P(-i0).replace_with_arrays(repl, [-i0])
[E, -p_x, -p_y, -p_z]
The contraction of a 1-index tensor by itself:
>>> expr = P(i0)*P(-i0)
>>> expr.replace_with_arrays(repl, [])
E**2 - p_x**2 - p_y**2 - p_z**2
"""
is_commutative = False
def __new__(cls, name, index_types, symmetry=None, comm=0):
if isinstance(name, str):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
else:
raise ValueError("invalid name")
if symmetry is None:
symmetry = TensorSymmetry.no_symmetry(len(index_types))
else:
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), symmetry)
obj.comm = TensorManager.comm_symbols2i(comm)
return obj
@property
def name(self):
return self.args[0].name
@property
def index_types(self):
return list(self.args[1])
@property
def symmetry(self):
return self.args[2]
@property
def rank(self):
return len(self.index_types)
def __lt__(self, other):
return (self.name, self.index_types) < (other.name, other.index_types)
def commutes_with(self, other):
"""
Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute.
Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute.
"""
r = TensorManager.get_comm(self.comm, other.comm)
return r
def _print(self):
return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types]))
def __call__(self, *indices, **kw_args):
"""
Returns a tensor with indices.
There is a special behavior in case of indices denoted by ``True``,
they are considered auto-matrix indices, their slots are automatically
filled, and confer to the tensor the behavior of a matrix or vector
upon multiplication with another tensor containing auto-matrix indices
of the same ``TensorIndexType``. This means indices get summed over the
same way as in matrix multiplication. For matrix behavior, define two
auto-matrix indices, for vector behavior define just one.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorHead
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
>>> t = A(a, -b)
>>> t
A(a, -b)
"""
tensor = Tensor(self, indices, **kw_args)
return tensor.doit()
# Everything below this line is deprecated
def __pow__(self, other):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
if self.data is None:
raise ValueError("No power on abstract tensors.")
deprecate_data()
from .array import tensorproduct, tensorcontraction
metrics = [_.data for _ in self.index_types]
marray = self.data
marraydim = marray.rank()
for metric in metrics:
marray = tensorproduct(marray, metric, marray)
marray = tensorcontraction(marray, (0, marraydim), (marraydim+1, marraydim+2))
return marray ** (other * S.Half)
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
deprecate_data()
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
deprecate_data()
return self.data.__iter__()
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
Destroy components data associated to the ``TensorHead`` object, this
checks for attached components data, and destroys components data too.
"""
# do not garbage collect Kronecker tensor (it should be done by
# ``TensorIndexType`` garbage collection)
deprecate_data()
if self.name == "KD":
return
# the data attached to a tensor must be deleted only by the TensorHead
# destructor. If the TensorHead is deleted, it means that there are no
# more instances of that tensor anywhere.
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def tensor_heads(s, index_types, symmetry=None, comm=0):
"""
Returns a sequence of TensorHeads from a string `s`
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
thlist = [TensorHead(name, index_types, symmetry, comm) for name in names]
if len(thlist) == 1:
return thlist[0]
return thlist
class _TensorMetaclass(ManagedProperties, ABCMeta):
pass
class TensExpr(Expr, metaclass=_TensorMetaclass):
"""
Abstract base class for tensor expressions
Notes
=====
A tensor expression is an expression formed by tensors;
currently the sums of tensors are distributed.
A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``.
``TensMul`` objects are formed by products of component tensors,
and include a coefficient, which is a SymPy expression.
In the internal representation contracted indices are represented
by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position
of the component tensor with contravariant index, ``ipos1`` is the
slot which the index occupies in that component tensor.
Contracted indices are therefore nameless in the internal representation.
"""
_op_priority = 12.0
is_commutative = False
def __neg__(self):
return self*S.NegativeOne
def __abs__(self):
raise NotImplementedError
def __add__(self, other):
return TensAdd(self, other).doit()
def __radd__(self, other):
return TensAdd(other, self).doit()
def __sub__(self, other):
return TensAdd(self, -other).doit()
def __rsub__(self, other):
return TensAdd(other, -self).doit()
def __mul__(self, other):
"""
Multiply two tensors using Einstein summation convention.
If the two tensors have an index in common, one contravariant
and the other covariant, in their product the indices are summed
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t1 = p(m0)
>>> t2 = q(-m0)
>>> t1*t2
p(L_0)*q(-L_0)
"""
return TensMul(self, other).doit()
def __rmul__(self, other):
return TensMul(other, self).doit()
def __div__(self, other):
other = _sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensMul(self, S.One/other).doit()
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __pow__(self, other):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
if self.data is None:
raise ValueError("No power without ndarray data.")
deprecate_data()
from .array import tensorproduct, tensorcontraction
free = self.free
marray = self.data
mdim = marray.rank()
for metric in free:
marray = tensorcontraction(
tensorproduct(
marray,
metric[0].tensor_index_type.data,
marray),
(0, mdim), (mdim+1, mdim+2)
)
return marray ** (other * S.Half)
def __rpow__(self, other):
raise NotImplementedError
__truediv__ = __div__
__rtruediv__ = __rdiv__
@property
@abstractmethod
def nocoeff(self):
raise NotImplementedError("abstract method")
@property
@abstractmethod
def coeff(self):
raise NotImplementedError("abstract method")
@abstractmethod
def get_indices(self):
raise NotImplementedError("abstract method")
@abstractmethod
def get_free_indices(self): # type: () -> List[TensorIndex]
raise NotImplementedError("abstract method")
@abstractmethod
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr
raise NotImplementedError("abstract method")
def fun_eval(self, *index_tuples):
deprecate_fun_eval()
return self.substitute_indices(*index_tuples)
def get_matrix(self):
"""
DEPRECATED: do not use.
Returns ndarray components data as a matrix, if components data are
available and ndarray dimension does not exceed 2.
"""
from sympy import Matrix
deprecate_data()
if 0 < self.rank <= 2:
rows = self.data.shape[0]
columns = self.data.shape[1] if self.rank == 2 else 1
if self.rank == 2:
mat_list = [] * rows
for i in range(rows):
mat_list.append([])
for j in range(columns):
mat_list[i].append(self[i, j])
else:
mat_list = [None] * rows
for i in range(rows):
mat_list[i] = self[i]
return Matrix(mat_list)
else:
raise NotImplementedError(
"missing multidimensional reduction to matrix.")
@staticmethod
def _get_indices_permutation(indices1, indices2):
return [indices1.index(i) for i in indices2]
def expand(self, **hints):
return _expand(self, **hints).doit()
def _expand(self, **kwargs):
return self
def _get_free_indices_set(self):
indset = set([])
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_free_indices_set())
return indset
def _get_dummy_indices_set(self):
indset = set([])
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_dummy_indices_set())
return indset
def _get_indices_set(self):
indset = set([])
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_indices_set())
return indset
@property
def _iterate_dummy_indices(self):
dummy_set = self._get_dummy_indices_set()
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
if expr in dummy_set:
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
for i in recursor(arg, pos+(p,)):
yield i
return recursor(self, ())
@property
def _iterate_free_indices(self):
free_set = self._get_free_indices_set()
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
if expr in free_set:
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
for i in recursor(arg, pos+(p,)):
yield i
return recursor(self, ())
@property
def _iterate_indices(self):
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
for i in recursor(arg, pos+(p,)):
yield i
return recursor(self, ())
@staticmethod
def _match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict):
from .array import tensorcontraction, tensorproduct, permutedims
index_types1 = [i.tensor_index_type for i in free_ind1]
# Check if variance of indices needs to be fixed:
pos2up = []
pos2down = []
free2remaining = free_ind2[:]
for pos1, index1 in enumerate(free_ind1):
if index1 in free2remaining:
pos2 = free2remaining.index(index1)
free2remaining[pos2] = None
continue
if -index1 in free2remaining:
pos2 = free2remaining.index(-index1)
free2remaining[pos2] = None
free_ind2[pos2] = index1
if index1.is_up:
pos2up.append(pos2)
else:
pos2down.append(pos2)
else:
index2 = free2remaining[pos1]
if index2 is None:
raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2))
free2remaining[pos1] = None
free_ind2[pos1] = index1
if index1.is_up ^ index2.is_up:
if index1.is_up:
pos2up.append(pos1)
else:
pos2down.append(pos1)
if len(set(free_ind1) & set(free_ind2)) < len(free_ind1):
raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2))
# TODO: add possibility of metric after (spinors)
def contract_and_permute(metric, array, pos):
array = tensorcontraction(tensorproduct(metric, array), (1, 2+pos))
permu = list(range(len(free_ind1)))
permu[0], permu[pos] = permu[pos], permu[0]
return permutedims(array, permu)
# Raise indices:
for pos in pos2up:
index_type_pos = index_types1[pos] # type: TensorIndexType
if index_type_pos not in replacement_dict:
raise ValueError("No metric provided to lower index")
metric = replacement_dict[index_type_pos]
metric_inverse = _TensorDataLazyEvaluator.inverse_matrix(metric)
array = contract_and_permute(metric_inverse, array, pos)
# Lower indices:
for pos in pos2down:
index_type_pos = index_types1[pos] # type: TensorIndexType
if index_type_pos not in replacement_dict:
raise ValueError("No metric provided to lower index")
metric = replacement_dict[index_type_pos]
array = contract_and_permute(metric, array, pos)
if free_ind1:
permutation = TensExpr._get_indices_permutation(free_ind2, free_ind1)
array = permutedims(array, permutation)
if hasattr(array, "rank") and array.rank() == 0:
array = array[()]
return free_ind2, array
def replace_with_arrays(self, replacement_dict, indices=None):
"""
Replace the tensorial expressions with arrays. The final array will
correspond to the N-dimensional array with indices arranged according
to ``indices``.
Parameters
==========
replacement_dict
dictionary containing the replacement rules for tensors.
indices
the index order with respect to which the array is read. The
original index order will be used if no value is passed.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> from sympy.tensor.tensor import TensorHead
>>> from sympy import symbols, diag
>>> L = TensorIndexType("L")
>>> i, j = tensor_indices("i j", L)
>>> A = TensorHead("A", [L])
>>> A(i).replace_with_arrays({A(i): [1, 2]}, [i])
[1, 2]
Since 'indices' is optional, we can also call replace_with_arrays by
this way if no specific index order is needed:
>>> A(i).replace_with_arrays({A(i): [1, 2]})
[1, 2]
>>> expr = A(i)*A(j)
>>> expr.replace_with_arrays({A(i): [1, 2]})
[[1, 2], [2, 4]]
For contractions, specify the metric of the ``TensorIndexType``, which
in this case is ``L``, in its covariant form:
>>> expr = A(i)*A(-i)
>>> expr.replace_with_arrays({A(i): [1, 2], L: diag(1, -1)})
-3
Symmetrization of an array:
>>> H = TensorHead("H", [L, L])
>>> a, b, c, d = symbols("a b c d")
>>> expr = H(i, j)/2 + H(j, i)/2
>>> expr.replace_with_arrays({H(i, j): [[a, b], [c, d]]})
[[a, b/2 + c/2], [b/2 + c/2, d]]
Anti-symmetrization of an array:
>>> expr = H(i, j)/2 - H(j, i)/2
>>> repl = {H(i, j): [[a, b], [c, d]]}
>>> expr.replace_with_arrays(repl)
[[0, b/2 - c/2], [-b/2 + c/2, 0]]
The same expression can be read as the transpose by inverting ``i`` and
``j``:
>>> expr.replace_with_arrays(repl, [j, i])
[[0, -b/2 + c/2], [b/2 - c/2, 0]]
"""
from .array import Array
indices = indices or []
replacement_dict = {tensor: Array(array) for tensor, array in replacement_dict.items()}
# Check dimensions of replaced arrays:
for tensor, array in replacement_dict.items():
if isinstance(tensor, TensorIndexType):
expected_shape = [tensor.dim for i in range(2)]
else:
expected_shape = [index_type.dim for index_type in tensor.index_types]
if len(expected_shape) != array.rank() or (not all([dim1 == dim2 if
dim1.is_number else True for dim1, dim2 in zip(expected_shape,
array.shape)])):
raise ValueError("shapes for tensor %s expected to be %s, "\
"replacement array shape is %s" % (tensor, expected_shape,
array.shape))
ret_indices, array = self._extract_data(replacement_dict)
last_indices, array = self._match_indices_with_other_tensor(array, indices, ret_indices, replacement_dict)
return array
def _check_add_Sum(self, expr, index_symbols):
from sympy import Sum
indices = self.get_indices()
dum = self.dum
sum_indices = [ (index_symbols[i], 0,
indices[i].tensor_index_type.dim-1) for i, j in dum]
if sum_indices:
expr = Sum(expr, *sum_indices)
return expr
def _expand_partial_derivative(self):
# simply delegate the _expand_partial_derivative() to
# its arguments to expand a possibly found PartialDerivative
return self.func(*[
a._expand_partial_derivative()
if isinstance(a, TensExpr) else a
for a in self.args])
class TensAdd(TensExpr, AssocOp):
"""
Sum of tensors
Parameters
==========
free_args : list of the free indices
Attributes
==========
``args`` : tuple of addends
``rank`` : rank of the tensor
``free_args`` : list of the free indices in sorted order
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_heads, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(a) + q(a); t
p(a) + q(a)
Examples with components data added to the tensor expression:
>>> from sympy import symbols, diag
>>> x, y, z, t = symbols("x y z t")
>>> repl = {}
>>> repl[Lorentz] = diag(1, -1, -1, -1)
>>> repl[p(a)] = [1, 2, 3, 4]
>>> repl[q(a)] = [x, y, z, t]
The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58
>>> expr = p(a) + q(a)
>>> expr.replace_with_arrays(repl, [a])
[x + 1, y + 2, z + 3, t + 4]
"""
def __new__(cls, *args, **kw_args):
args = [_sympify(x) for x in args if x]
args = TensAdd._tensAdd_flatten(args)
args.sort(key=default_sort_key)
if not args:
return S.Zero
if len(args) == 1:
return args[0]
return Basic.__new__(cls, *args, **kw_args)
@property
def coeff(self):
return S.One
@property
def nocoeff(self):
return self
def get_free_indices(self): # type: () -> List[TensorIndex]
return self.free_indices
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr
newargs = [arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args]
return self.func(*newargs)
@memoize_property
def rank(self):
if isinstance(self.args[0], TensExpr):
return self.args[0].rank
else:
return 0
@memoize_property
def free_args(self):
if isinstance(self.args[0], TensExpr):
return self.args[0].free_args
else:
return []
@memoize_property
def free_indices(self):
if isinstance(self.args[0], TensExpr):
return self.args[0].get_free_indices()
else:
return set()
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
if not args:
return S.Zero
if len(args) == 1 and not isinstance(args[0], TensExpr):
return args[0]
# now check that all addends have the same indices:
TensAdd._tensAdd_check(args)
# if TensAdd has only 1 element in its `args`:
if len(args) == 1: # and isinstance(args[0], TensMul):
return args[0]
# Remove zeros:
args = [x for x in args if x]
# if there are no more args (i.e. have cancelled out),
# just return zero:
if not args:
return S.Zero
if len(args) == 1:
return args[0]
# Collect terms appearing more than once, differing by their coefficients:
args = TensAdd._tensAdd_collect_terms(args)
# collect canonicalized terms
def sort_key(t):
if not isinstance(t, TensExpr):
return [], [], []
if hasattr(t, "_index_structure") and hasattr(t, "components"):
x = get_index_structure(t)
return t.components, x.free, x.dum
return [], [], []
args.sort(key=sort_key)
if not args:
return S.Zero
# it there is only a component tensor return it
if len(args) == 1:
return args[0]
obj = self.func(*args)
return obj
@staticmethod
def _tensAdd_flatten(args):
# flatten TensAdd, coerce terms which are not tensors to tensors
a = []
for x in args:
if isinstance(x, (Add, TensAdd)):
a.extend(list(x.args))
else:
a.append(x)
args = [x for x in a if x.coeff]
return args
@staticmethod
def _tensAdd_check(args):
# check that all addends have the same free indices
def get_indices_set(x): # type: (Expr) -> Set[TensorIndex]
if isinstance(x, TensExpr):
return set(x.get_free_indices())
return set()
indices0 = get_indices_set(args[0]) # type: Set[TensorIndex]
list_indices = [get_indices_set(arg) for arg in args[1:]] # type: List[Set[TensorIndex]]
if not all(x == indices0 for x in list_indices):
raise ValueError('all tensors must have the same indices')
@staticmethod
def _tensAdd_collect_terms(args):
# collect TensMul terms differing at most by their coefficient
terms_dict = defaultdict(list)
scalars = S.Zero
if isinstance(args[0], TensExpr):
free_indices = set(args[0].get_free_indices())
else:
free_indices = set([])
for arg in args:
if not isinstance(arg, TensExpr):
if free_indices != set([]):
raise ValueError("wrong valence")
scalars += arg
continue
if free_indices != set(arg.get_free_indices()):
raise ValueError("wrong valence")
# TODO: what is the part which is not a coeff?
# needs an implementation similar to .as_coeff_Mul()
terms_dict[arg.nocoeff].append(arg.coeff)
new_args = [TensMul(Add(*coeff), t).doit() for t, coeff in terms_dict.items() if Add(*coeff) != 0]
if isinstance(scalars, Add):
new_args = list(scalars.args) + new_args
elif scalars != 0:
new_args = [scalars] + new_args
return new_args
def get_indices(self):
indices = []
for arg in self.args:
indices.extend([i for i in get_indices(arg) if i not in indices])
return indices
def _expand(self, **hints):
return TensAdd(*[_expand(i, **hints) for i in self.args])
def __call__(self, *indices):
deprecate_fun_eval()
free_args = self.free_args
indices = list(indices)
if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
index_tuples = list(zip(free_args, indices))
a = [x.func(*x.substitute_indices(*index_tuples).args) for x in self.args]
res = TensAdd(*a).doit()
return res
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
"""
expr = self.expand()
args = [canon_bp(x) for x in expr.args]
res = TensAdd(*args).doit()
return res
def equals(self, other):
other = _sympify(other)
if isinstance(other, TensMul) and other.coeff == 0:
return all(x.coeff == 0 for x in self.args)
if isinstance(other, TensExpr):
if self.rank != other.rank:
return False
if isinstance(other, TensAdd):
if set(self.args) != set(other.args):
return False
else:
return True
t = self - other
if not isinstance(t, TensExpr):
return t == 0
else:
if isinstance(t, TensMul):
return t.coeff == 0
else:
return all(x.coeff == 0 for x in t.args)
def __getitem__(self, item):
deprecate_data()
return self.data[item]
def contract_delta(self, delta):
args = [x.contract_delta(delta) for x in self.args]
t = TensAdd(*args).doit()
return canon_bp(t)
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
contract_all : if True, eliminate all ``g`` which are contracted
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
"""
args = [contract_metric(x, g) for x in self.args]
t = TensAdd(*args).doit()
return canon_bp(t)
def substitute_indices(self, *index_tuples):
new_args = []
for arg in self.args:
if isinstance(arg, TensExpr):
arg = arg.substitute_indices(*index_tuples)
new_args.append(arg)
return TensAdd(*new_args).doit()
def _print(self):
a = []
args = self.args
for x in args:
a.append(str(x))
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
def _extract_data(self, replacement_dict):
from sympy.tensor.array import Array, permutedims
args_indices, arrays = zip(*[
arg._extract_data(replacement_dict) if
isinstance(arg, TensExpr) else ([], arg) for arg in self.args
])
arrays = [Array(i) for i in arrays]
ref_indices = args_indices[0]
for i in range(1, len(args_indices)):
indices = args_indices[i]
array = arrays[i]
permutation = TensMul._get_indices_permutation(indices, ref_indices)
arrays[i] = permutedims(array, permutation)
return ref_indices, sum(arrays, Array.zeros(*array.shape))
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self.expand()]
@data.setter
def data(self, data):
deprecate_data()
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
deprecate_data()
if not self.data:
raise ValueError("No iteration on abstract tensors")
return self.data.flatten().__iter__()
def _eval_rewrite_as_Indexed(self, *args):
return Add.fromiter(args)
def _eval_partial_derivative(self, s):
# Evaluation like Add
list_addends = []
for a in self.args:
if isinstance(a, TensExpr):
list_addends.append(a._eval_partial_derivative(s))
# do not call diff if s is no symbol
elif s._diff_wrt:
list_addends.append(a._eval_derivative(s))
return self.func(*list_addends)
class Tensor(TensExpr):
"""
Base tensor class, i.e. this represents a tensor, the single unit to be
put into an expression.
This object is usually created from a ``TensorHead``, by attaching indices
to it. Indices preceded by a minus sign are considered contravariant,
otherwise covariant.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead
>>> Lorentz = TensorIndexType("Lorentz", dummy_name="L")
>>> mu, nu = tensor_indices('mu nu', Lorentz)
>>> A = TensorHead("A", [Lorentz, Lorentz])
>>> A(mu, -nu)
A(mu, -nu)
>>> A(mu, -mu)
A(L_0, -L_0)
It is also possible to use symbols instead of inidices (appropriate indices
are then generated automatically).
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> A(x, mu)
A(x, mu)
>>> A(x, -x)
A(L_0, -L_0)
"""
is_commutative = False
_index_structure = None # type: _IndexStructure
def __new__(cls, tensor_head, indices, **kw_args):
is_canon_bp = kw_args.pop('is_canon_bp', False)
indices = cls._parse_indices(tensor_head, indices)
obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args)
obj._index_structure = _IndexStructure.from_indices(*indices)
obj._free = obj._index_structure.free[:]
obj._dum = obj._index_structure.dum[:]
obj._ext_rank = obj._index_structure._ext_rank
obj._coeff = S.One
obj._nocoeff = obj
obj._component = tensor_head
obj._components = [tensor_head]
if tensor_head.rank != len(indices):
raise ValueError("wrong number of indices")
obj.is_canon_bp = is_canon_bp
obj._index_map = Tensor._build_index_map(indices, obj._index_structure)
return obj
@property
def free(self):
return self._free
@property
def dum(self):
return self._dum
@property
def ext_rank(self):
return self._ext_rank
@property
def coeff(self):
return self._coeff
@property
def nocoeff(self):
return self._nocoeff
@property
def component(self):
return self._component
@property
def components(self):
return self._components
@property
def head(self):
return self.args[0]
@property
def indices(self):
return self.args[1]
@property
def free_indices(self):
return set(self._index_structure.get_free_indices())
@property
def index_types(self):
return self.head.index_types
@property
def rank(self):
return len(self.free_indices)
@staticmethod
def _build_index_map(indices, index_structure):
index_map = {}
for idx in indices:
index_map[idx] = (indices.index(idx),)
return index_map
def doit(self, **kwargs):
args, indices, free, dum = TensMul._tensMul_contract_indices([self])
return args[0]
@staticmethod
def _parse_indices(tensor_head, indices):
if not isinstance(indices, (tuple, list, Tuple)):
raise TypeError("indices should be an array, got %s" % type(indices))
indices = list(indices)
for i, index in enumerate(indices):
if isinstance(index, Symbol):
indices[i] = TensorIndex(index, tensor_head.index_types[i], True)
elif isinstance(index, Mul):
c, e = index.as_coeff_Mul()
if c == -1 and isinstance(e, Symbol):
indices[i] = TensorIndex(e, tensor_head.index_types[i], False)
else:
raise ValueError("index not understood: %s" % index)
elif not isinstance(index, TensorIndex):
raise TypeError("wrong type for index: %s is %s" % (index, type(index)))
return indices
def _set_new_index_structure(self, im, is_canon_bp=False):
indices = im.get_indices()
return self._set_indices(*indices, is_canon_bp=is_canon_bp)
def _set_indices(self, *indices, **kw_args):
if len(indices) != self.ext_rank:
raise ValueError("indices length mismatch")
return self.func(self.args[0], indices, is_canon_bp=kw_args.pop('is_canon_bp', False)).doit()
def _get_free_indices_set(self):
return set([i[0] for i in self._index_structure.free])
def _get_dummy_indices_set(self):
dummy_pos = set(itertools.chain(*self._index_structure.dum))
return set(idx for i, idx in enumerate(self.args[1]) if i in dummy_pos)
def _get_indices_set(self):
return set(self.args[1].args)
@property
def free_in_args(self):
return [(ind, pos, 0) for ind, pos in self.free]
@property
def dum_in_args(self):
return [(p1, p2, 0, 0) for p1, p2 in self.dum]
@property
def free_args(self):
return sorted([x[0] for x in self.free])
def commutes_with(self, other):
"""
:param other:
:return:
0 commute
1 anticommute
None neither commute nor anticommute
"""
if not isinstance(other, TensExpr):
return 0
elif isinstance(other, Tensor):
return self.component.commutes_with(other.component)
return NotImplementedError
def perm2tensor(self, g, is_canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, is_canon_bp)
def canon_bp(self):
if self.is_canon_bp:
return self
expr = self.expand()
g, dummies, msym = expr._index_structure.indices_canon_args()
v = components_canon_args([expr.component])
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tensor = self.perm2tensor(can, True)
return tensor
def split(self):
return [self]
def _expand(self, **kwargs):
return self
def sorted_components(self):
return self
def get_indices(self): # type: () -> List[TensorIndex]
"""
Get a list of indices, corresponding to those of the tensor.
"""
return list(self.args[1])
def get_free_indices(self): # type: () -> List[TensorIndex]
"""
Get a list of free indices, corresponding to those of the tensor.
"""
return self._index_structure.get_free_indices()
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> Tensor
# TODO: this could be optimized by only swapping the indices
# instead of visiting the whole expression tree:
return self.xreplace(repl)
def as_base_exp(self):
return self, S.One
def substitute_indices(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i, k),(-j, l))
A(k, L_0)*B(-L_0, l)
"""
indices = []
for index in self.indices:
for ind_old, ind_new in index_tuples:
if (index.name == ind_old.name and index.tensor_index_type ==
ind_old.tensor_index_type):
if index.is_up == ind_old.is_up:
indices.append(ind_new)
else:
indices.append(-ind_new)
break
else:
indices.append(index)
return self.head(*indices)
def __call__(self, *indices):
deprecate_fun_eval()
free_args = self.free_args
indices = list(indices)
if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.substitute_indices(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
# TODO: put this into TensExpr?
def __iter__(self):
deprecate_data()
return self.data.__iter__()
# TODO: put this into TensExpr?
def __getitem__(self, item):
deprecate_data()
return self.data[item]
def _extract_data(self, replacement_dict):
from .array import Array
for k, v in replacement_dict.items():
if isinstance(k, Tensor) and k.args[0] == self.args[0]:
other = k
array = v
break
else:
raise ValueError("%s not found in %s" % (self, replacement_dict))
# TODO: inefficient, this should be done at root level only:
replacement_dict = {k: Array(v) for k, v in replacement_dict.items()}
array = Array(array)
dum1 = self.dum
dum2 = other.dum
if len(dum2) > 0:
for pair in dum2:
# allow `dum2` if the contained values are also in `dum1`.
if pair not in dum1:
raise NotImplementedError("%s with contractions is not implemented" % other)
# Remove elements in `dum2` from `dum1`:
dum1 = [pair for pair in dum1 if pair not in dum2]
if len(dum1) > 0:
indices2 = other.get_indices()
repl = {}
for p1, p2 in dum1:
repl[indices2[p2]] = -indices2[p1]
other = other.xreplace(repl).doit()
array = _TensorDataLazyEvaluator.data_contract_dum([array], dum1, len(indices2))
free_ind1 = self.get_free_indices()
free_ind2 = other.get_free_indices()
return self._match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict)
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
deprecate_data()
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
def _print(self):
indices = [str(ind) for ind in self.indices]
component = self.component
if component.rank > 0:
return ('%s(%s)' % (component.name, ', '.join(indices)))
else:
return ('%s' % component.name)
def equals(self, other):
if other == 0:
return self.coeff == 0
other = _sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return S.One == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (t.coeff, tuple(t.components), \
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def contract_metric(self, g):
# if metric is not the same, ignore this step:
if self.component != g:
return self
# in case there are free components, do not perform anything:
if len(self.free) != 0:
return self
#antisym = g.index_types[0].metric_antisym
if g.symmetry == TensorSymmetry.fully_symmetric(-2):
antisym = 1
elif g.symmetry == TensorSymmetry.fully_symmetric(2):
antisym = 0
elif g.symmetry == TensorSymmetry.no_symmetry(2):
antisym = None
else:
raise NotImplementedError
sign = S.One
typ = g.index_types[0]
if not antisym:
# g(i, -i)
sign = sign*typ.dim
else:
# g(i, -i)
sign = sign*typ.dim
dp0, dp1 = self.dum[0]
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
return sign
def contract_delta(self, metric):
return self.contract_metric(metric)
def _eval_rewrite_as_Indexed(self, tens, indices):
from sympy import Indexed
# TODO: replace .args[0] with .name:
index_symbols = [i.args[0] for i in self.get_indices()]
expr = Indexed(tens.args[0], *index_symbols)
return self._check_add_Sum(expr, index_symbols)
def _eval_partial_derivative(self, s): # type: (Tensor) -> Expr
if not isinstance(s, Tensor):
return S.Zero
else:
# @a_i/@a_k = delta_i^k
# @a_i/@a^k = g_ij delta^j_k
# @a^i/@a^k = delta^i_k
# @a^i/@a_k = g^ij delta_j^k
# TODO: if there is no metric present, the derivative should be zero?
if self.head != s.head:
return S.Zero
# if heads are the same, provide delta and/or metric products
# for every free index pair in the appropriate tensor
# assumed that the free indices are in proper order
# A contravariante index in the derivative becomes covariant
# after performing the derivative and vice versa
kronecker_delta_list = [1]
# not guarantee a correct index order
for (count, (iself, iother)) in enumerate(zip(self.get_free_indices(), s.get_free_indices())):
if iself.tensor_index_type != iother.tensor_index_type:
raise ValueError("index types not compatible")
else:
tensor_index_type = iself.tensor_index_type
tensor_metric = tensor_index_type.metric
dummy = TensorIndex("d_" + str(count), tensor_index_type,
is_up=iself.is_up)
if iself.is_up == iother.is_up:
kroneckerdelta = tensor_index_type.delta(iself, -iother)
else:
kroneckerdelta = (
TensMul(tensor_metric(iself, dummy),
tensor_index_type.delta(-dummy, -iother))
)
kronecker_delta_list.append(kroneckerdelta)
return TensMul.fromiter(kronecker_delta_list).doit()
# doit necessary to rename dummy indices accordingly
class TensMul(TensExpr, AssocOp):
"""
Product of tensors
Parameters
==========
coeff : SymPy coefficient of the tensor
args
Attributes
==========
``components`` : list of ``TensorHead`` of the component tensors
``types`` : list of nonrepeated ``TensorIndexType``
``free`` : list of ``(ind, ipos, icomp)``, see Notes
``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes
``ext_rank`` : rank of the tensor counting the dummy indices
``rank`` : rank of the tensor
``coeff`` : SymPy coefficient of the tensor
``free_args`` : list of the free indices in sorted order
``is_canon_bp`` : ``True`` if the tensor in in canonical form
Notes
=====
``args[0]`` list of ``TensorHead`` of the component tensors.
``args[1]`` list of ``(ind, ipos, icomp)``
where ``ind`` is a free index, ``ipos`` is the slot position
of ``ind`` in the ``icomp``-th component tensor.
``args[2]`` list of tuples representing dummy indices.
``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant
dummy index is the ``ipos1``-th slot position in the ``icomp1``-th
component tensor; the corresponding covariant index is
in the ``ipos2`` slot position in the ``icomp2``-th component tensor.
"""
identity = S.One
_index_structure = None # type: _IndexStructure
def __new__(cls, *args, **kw_args):
is_canon_bp = kw_args.get('is_canon_bp', False)
args = list(map(_sympify, args))
# Flatten:
args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])]
args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False)
# Data for indices:
index_types = [i.tensor_index_type for i in indices]
index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp)
obj = TensExpr.__new__(cls, *args)
obj._indices = indices
obj._index_types = index_types[:]
obj._index_structure = index_structure
obj._free = index_structure.free[:]
obj._dum = index_structure.dum[:]
obj._free_indices = set([x[0] for x in obj.free])
obj._rank = len(obj.free)
obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum)
obj._coeff = S.One
obj._is_canon_bp = is_canon_bp
return obj
index_types = property(lambda self: self._index_types)
free = property(lambda self: self._free)
dum = property(lambda self: self._dum)
free_indices = property(lambda self: self._free_indices)
rank = property(lambda self: self._rank)
ext_rank = property(lambda self: self._ext_rank)
@staticmethod
def _indices_to_free_dum(args_indices):
free2pos1 = {}
free2pos2 = {}
dummy_data = []
indices = []
# Notation for positions (to better understand the code):
# `pos1`: position in the `args`.
# `pos2`: position in the indices.
# Example:
# A(i, j)*B(k, m, n)*C(p)
# `pos1` of `n` is 1 because it's in `B` (second `args` of TensMul).
# `pos2` of `n` is 4 because it's the fifth overall index.
# Counter for the index position wrt the whole expression:
pos2 = 0
for pos1, arg_indices in enumerate(args_indices):
for index_pos, index in enumerate(arg_indices):
if not isinstance(index, TensorIndex):
raise TypeError("expected TensorIndex")
if -index in free2pos1:
# Dummy index detected:
other_pos1 = free2pos1.pop(-index)
other_pos2 = free2pos2.pop(-index)
if index.is_up:
dummy_data.append((index, pos1, other_pos1, pos2, other_pos2))
else:
dummy_data.append((-index, other_pos1, pos1, other_pos2, pos2))
indices.append(index)
elif index in free2pos1:
raise ValueError("Repeated index: %s" % index)
else:
free2pos1[index] = pos1
free2pos2[index] = pos2
indices.append(index)
pos2 += 1
free = [(i, p) for (i, p) in free2pos2.items()]
free_names = [i.name for i in free2pos2.keys()]
dummy_data.sort(key=lambda x: x[3])
return indices, free, free_names, dummy_data
@staticmethod
def _dummy_data_to_dum(dummy_data):
return [(p2a, p2b) for (i, p1a, p1b, p2a, p2b) in dummy_data]
@staticmethod
def _tensMul_contract_indices(args, replace_indices=True):
replacements = [{} for _ in args]
#_index_order = all([_has_index_order(arg) for arg in args])
args_indices = [get_indices(arg) for arg in args]
indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices)
cdt = defaultdict(int)
def dummy_name_gen(tensor_index_type):
nd = str(cdt[tensor_index_type])
cdt[tensor_index_type] += 1
return tensor_index_type.dummy_name + '_' + nd
if replace_indices:
for old_index, pos1cov, pos1contra, pos2cov, pos2contra in dummy_data:
index_type = old_index.tensor_index_type
while True:
dummy_name = dummy_name_gen(index_type)
if dummy_name not in free_names:
break
dummy = TensorIndex(dummy_name, index_type, True)
replacements[pos1cov][old_index] = dummy
replacements[pos1contra][-old_index] = -dummy
indices[pos2cov] = dummy
indices[pos2contra] = -dummy
args = [
arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg
for arg, repl in zip(args, replacements)]
dum = TensMul._dummy_data_to_dum(dummy_data)
return args, indices, free, dum
@staticmethod
def _get_components_from_args(args):
"""
Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied
by one another.
"""
components = []
for arg in args:
if not isinstance(arg, TensExpr):
continue
if isinstance(arg, TensAdd):
continue
components.extend(arg.components)
return components
@staticmethod
def _rebuild_tensors_list(args, index_structure):
indices = index_structure.get_indices()
#tensors = [None for i in components] # pre-allocate list
ind_pos = 0
for i, arg in enumerate(args):
if not isinstance(arg, TensExpr):
continue
prev_pos = ind_pos
ind_pos += arg.ext_rank
args[i] = Tensor(arg.component, indices[prev_pos:ind_pos])
def doit(self, **kwargs):
is_canon_bp = self._is_canon_bp
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
args = [arg for arg in args if arg != self.identity]
# Extract non-tensor coefficients:
coeff = reduce(lambda a, b: a*b, [arg for arg in args if not isinstance(arg, TensExpr)], S.One)
args = [arg for arg in args if isinstance(arg, TensExpr)]
if len(args) == 0:
return coeff
if coeff != self.identity:
args = [coeff] + args
if coeff == 0:
return S.Zero
if len(args) == 1:
return args[0]
args, indices, free, dum = TensMul._tensMul_contract_indices(args)
# Data for indices:
index_types = [i.tensor_index_type for i in indices]
index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp)
obj = self.func(*args)
obj._index_types = index_types
obj._index_structure = index_structure
obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum)
obj._coeff = coeff
obj._is_canon_bp = is_canon_bp
return obj
# TODO: this method should be private
# TODO: should this method be renamed _from_components_free_dum ?
@staticmethod
def from_data(coeff, components, free, dum, **kw_args):
return TensMul(coeff, *TensMul._get_tensors_from_components_free_dum(components, free, dum), **kw_args).doit()
@staticmethod
def _get_tensors_from_components_free_dum(components, free, dum):
"""
Get a list of ``Tensor`` objects by distributing ``free`` and ``dum`` indices on the ``components``.
"""
index_structure = _IndexStructure.from_components_free_dum(components, free, dum)
indices = index_structure.get_indices()
tensors = [None for i in components] # pre-allocate list
# distribute indices on components to build a list of tensors:
ind_pos = 0
for i, component in enumerate(components):
prev_pos = ind_pos
ind_pos += component.rank
tensors[i] = Tensor(component, indices[prev_pos:ind_pos])
return tensors
def _get_free_indices_set(self):
return set([i[0] for i in self.free])
def _get_dummy_indices_set(self):
dummy_pos = set(itertools.chain(*self.dum))
return set(idx for i, idx in enumerate(self._index_structure.get_indices()) if i in dummy_pos)
def _get_position_offset_for_indices(self):
arg_offset = [None for i in range(self.ext_rank)]
counter = 0
for i, arg in enumerate(self.args):
if not isinstance(arg, TensExpr):
continue
for j in range(arg.ext_rank):
arg_offset[j + counter] = counter
counter += arg.ext_rank
return arg_offset
@property
def free_args(self):
return sorted([x[0] for x in self.free])
@property
def components(self):
return self._get_components_from_args(self.args)
@property
def free_in_args(self):
arg_offset = self._get_position_offset_for_indices()
argpos = self._get_indices_to_args_pos()
return [(ind, pos-arg_offset[pos], argpos[pos]) for (ind, pos) in self.free]
@property
def coeff(self):
# return Mul.fromiter([c for c in self.args if not isinstance(c, TensExpr)])
return self._coeff
@property
def nocoeff(self):
return self.func(*[t for t in self.args if isinstance(t, TensExpr)]).doit()
@property
def dum_in_args(self):
arg_offset = self._get_position_offset_for_indices()
argpos = self._get_indices_to_args_pos()
return [(p1-arg_offset[p1], p2-arg_offset[p2], argpos[p1], argpos[p2]) for p1, p2 in self.dum]
def equals(self, other):
if other == 0:
return self.coeff == 0
other = _sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return self.coeff == other
return self.canon_bp() == other.canon_bp()
def get_indices(self):
"""
Returns the list of indices of the tensor
The indices are listed in the order in which they appear in the
component tensors.
The dummy indices are given a name which does not collide with
the names of the free indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(m1)*g(m0,m2)
>>> t.get_indices()
[m1, m0, m2]
>>> t2 = p(m1)*g(-m1, m2)
>>> t2.get_indices()
[L_0, -L_0, m2]
"""
return self._indices
def get_free_indices(self): # type: () -> List[TensorIndex]
"""
Returns the list of free indices of the tensor
The indices are listed in the order in which they appear in the
component tensors.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(m1)*g(m0,m2)
>>> t.get_free_indices()
[m1, m0, m2]
>>> t2 = p(m1)*g(-m1, m2)
>>> t2.get_free_indices()
[m2]
"""
return self._index_structure.get_free_indices()
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr
return self.func(*[arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args])
def split(self):
"""
Returns a list of tensors, whose product is ``self``
Dummy indices contracted among different tensor components
become free indices with the same name as the one used to
represent the dummy indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
>>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
>>> t = A(a,b)*B(-b,c)
>>> t
A(a, L_0)*B(-L_0, c)
>>> t.split()
[A(a, L_0), B(-L_0, c)]
"""
if self.args == ():
return [self]
splitp = []
res = 1
for arg in self.args:
if isinstance(arg, Tensor):
splitp.append(res*arg)
res = 1
else:
res *= arg
return splitp
def _expand(self, **hints):
# TODO: temporary solution, in the future this should be linked to
# `Expr.expand`.
args = [_expand(arg, **hints) for arg in self.args]
args1 = [arg.args if isinstance(arg, (Add, TensAdd)) else (arg,) for arg in args]
return TensAdd(*[
TensMul(*i) for i in itertools.product(*args1)]
)
def __neg__(self):
return TensMul(S.NegativeOne, self, is_canon_bp=self._is_canon_bp).doit()
def __getitem__(self, item):
deprecate_data()
return self.data[item]
def _get_args_for_traditional_printer(self):
args = list(self.args)
if (self.coeff < 0) == True:
# expressions like "-A(a)"
sign = "-"
if self.coeff == S.NegativeOne:
args = args[1:]
else:
args[0] = -args[0]
else:
sign = ""
return sign, args
def _sort_args_for_sorted_components(self):
"""
Returns the ``args`` sorted according to the components commutation
properties.
The sorting is done taking into account the commutation group
of the component tensors.
"""
cv = [arg for arg in self.args if isinstance(arg, TensExpr)]
sign = 1
n = len(cv) - 1
for i in range(n):
for j in range(n, i, -1):
c = cv[j-1].commutes_with(cv[j])
# if `c` is `None`, it does neither commute nor anticommute, skip:
if c not in [0, 1]:
continue
typ1 = sorted(set(cv[j-1].component.index_types), key=lambda x: x.name)
typ2 = sorted(set(cv[j].component.index_types), key=lambda x: x.name)
if (typ1, cv[j-1].component.name) > (typ2, cv[j].component.name):
cv[j-1], cv[j] = cv[j], cv[j-1]
# if `c` is 1, the anticommute, so change sign:
if c:
sign = -sign
coeff = sign * self.coeff
if coeff != 1:
return [coeff] + cv
return cv
def sorted_components(self):
"""
Returns a tensor product with sorted components.
"""
return TensMul(*self._sort_args_for_sorted_components()).doit()
def perm2tensor(self, g, is_canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, is_canon_bp=is_canon_bp)
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
>>> t = A(m0,-m1)*A(m1,-m0)
>>> t.canon_bp()
-A(L_0, L_1)*A(-L_0, -L_1)
>>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0)
>>> t.canon_bp()
0
"""
if self._is_canon_bp:
return self
expr = self.expand()
if isinstance(expr, TensAdd):
return expr.canon_bp()
if not expr.components:
return expr
t = expr.sorted_components()
g, dummies, msym = t._index_structure.indices_canon_args()
v = components_canon_args(t.components)
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tmul = t.perm2tensor(can, True)
return tmul
def contract_delta(self, delta):
t = self.contract_metric(delta)
return t
def _get_indices_to_args_pos(self):
"""
Get a dict mapping the index position to TensMul's argument number.
"""
pos_map = dict()
pos_counter = 0
for arg_i, arg in enumerate(self.args):
if not isinstance(arg, TensExpr):
continue
assert isinstance(arg, Tensor)
for i in range(arg.ext_rank):
pos_map[pos_counter] = arg_i
pos_counter += 1
return pos_map
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(m0)*q(m1)*g(-m0, -m1)
>>> t.canon_bp()
metric(L_0, L_1)*p(-L_0)*q(-L_1)
>>> t.contract_metric(g).canon_bp()
p(L_0)*q(-L_0)
"""
expr = self.expand()
if self != expr:
expr = expr.canon_bp()
return expr.contract_metric(g)
pos_map = self._get_indices_to_args_pos()
args = list(self.args)
#antisym = g.index_types[0].metric_antisym
if g.symmetry == TensorSymmetry.fully_symmetric(-2):
antisym = 1
elif g.symmetry == TensorSymmetry.fully_symmetric(2):
antisym = 0
elif g.symmetry == TensorSymmetry.no_symmetry(2):
antisym = None
else:
raise NotImplementedError
# list of positions of the metric ``g`` inside ``args``
gpos = [i for i, x in enumerate(self.args) if isinstance(x, Tensor) and x.component == g]
if not gpos:
return self
# Sign is either 1 or -1, to correct the sign after metric contraction
# (for spinor indices).
sign = 1
dum = self.dum[:]
free = self.free[:]
elim = set()
for gposx in gpos:
if gposx in elim:
continue
free1 = [x for x in free if pos_map[x[1]] == gposx]
dum1 = [x for x in dum if pos_map[x[0]] == gposx or pos_map[x[1]] == gposx]
if not dum1:
continue
elim.add(gposx)
# subs with the multiplication neutral element, that is, remove it:
args[gposx] = 1
if len(dum1) == 2:
if not antisym:
dum10, dum11 = dum1
if pos_map[dum10[1]] == gposx:
# the index with pos p0 contravariant
p0 = dum10[0]
else:
# the index with pos p0 is covariant
p0 = dum10[1]
if pos_map[dum11[1]] == gposx:
# the index with pos p1 is contravariant
p1 = dum11[0]
else:
# the index with pos p1 is covariant
p1 = dum11[1]
dum.append((p0, p1))
else:
dum10, dum11 = dum1
# change the sign to bring the indices of the metric to contravariant
# form; change the sign if dum10 has the metric index in position 0
if pos_map[dum10[1]] == gposx:
# the index with pos p0 is contravariant
p0 = dum10[0]
if dum10[1] == 1:
sign = -sign
else:
# the index with pos p0 is covariant
p0 = dum10[1]
if dum10[0] == 0:
sign = -sign
if pos_map[dum11[1]] == gposx:
# the index with pos p1 is contravariant
p1 = dum11[0]
sign = -sign
else:
# the index with pos p1 is covariant
p1 = dum11[1]
dum.append((p0, p1))
elif len(dum1) == 1:
if not antisym:
dp0, dp1 = dum1[0]
if pos_map[dp0] == pos_map[dp1]:
# g(i, -i)
typ = g.index_types[0]
sign = sign*typ.dim
else:
# g(i0, i1)*p(-i1)
if pos_map[dp0] == gposx:
p1 = dp1
else:
p1 = dp0
ind, p = free1[0]
free.append((ind, p1))
else:
dp0, dp1 = dum1[0]
if pos_map[dp0] == pos_map[dp1]:
# g(i, -i)
typ = g.index_types[0]
sign = sign*typ.dim
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
else:
# g(i0, i1)*p(-i1)
if pos_map[dp0] == gposx:
p1 = dp1
if dp0 == 0:
sign = -sign
else:
p1 = dp0
ind, p = free1[0]
free.append((ind, p1))
dum = [x for x in dum if x not in dum1]
free = [x for x in free if x not in free1]
# shift positions:
shift = 0
shifts = [0]*len(args)
for i in range(len(args)):
if i in elim:
shift += 2
continue
shifts[i] = shift
free = [(ind, p - shifts[pos_map[p]]) for (ind, p) in free if pos_map[p] not in elim]
dum = [(p0 - shifts[pos_map[p0]], p1 - shifts[pos_map[p1]]) for i, (p0, p1) in enumerate(dum) if pos_map[p0] not in elim and pos_map[p1] not in elim]
res = sign*TensMul(*args).doit()
if not isinstance(res, TensExpr):
return res
im = _IndexStructure.from_components_free_dum(res.components, free, dum)
return res._set_new_index_structure(im)
def _set_new_index_structure(self, im, is_canon_bp=False):
indices = im.get_indices()
return self._set_indices(*indices, is_canon_bp=is_canon_bp)
def _set_indices(self, *indices, **kw_args):
if len(indices) != self.ext_rank:
raise ValueError("indices length mismatch")
args = list(self.args)[:]
pos = 0
is_canon_bp = kw_args.pop('is_canon_bp', False)
for i, arg in enumerate(args):
if not isinstance(arg, TensExpr):
continue
assert isinstance(arg, Tensor)
ext_rank = arg.ext_rank
args[i] = arg._set_indices(*indices[pos:pos+ext_rank])
pos += ext_rank
return TensMul(*args, is_canon_bp=is_canon_bp).doit()
@staticmethod
def _index_replacement_for_contract_metric(args, free, dum):
for arg in args:
if not isinstance(arg, TensExpr):
continue
assert isinstance(arg, Tensor)
def substitute_indices(self, *index_tuples):
new_args = []
for arg in self.args:
if isinstance(arg, TensExpr):
arg = arg.substitute_indices(*index_tuples)
new_args.append(arg)
return TensMul(*new_args).doit()
def __call__(self, *indices):
deprecate_fun_eval()
free_args = self.free_args
indices = list(indices)
if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.substitute_indices(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
def _extract_data(self, replacement_dict):
args_indices, arrays = zip(*[arg._extract_data(replacement_dict) for arg in self.args if isinstance(arg, TensExpr)])
coeff = reduce(operator.mul, [a for a in self.args if not isinstance(a, TensExpr)], S.One)
indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices)
dum = TensMul._dummy_data_to_dum(dummy_data)
ext_rank = self.ext_rank
free.sort(key=lambda x: x[1])
free_indices = [i[0] for i in free]
return free_indices, coeff*_TensorDataLazyEvaluator.data_contract_dum(arrays, dum, ext_rank)
@property
def data(self):
deprecate_data()
dat = _tensor_data_substitution_dict[self.expand()]
return dat
@data.setter
def data(self, data):
deprecate_data()
raise ValueError("Not possible to set component data to a tensor expression")
@data.deleter
def data(self):
deprecate_data()
raise ValueError("Not possible to delete component data to a tensor expression")
def __iter__(self):
deprecate_data()
if self.data is None:
raise ValueError("No iteration on abstract tensors")
return self.data.__iter__()
def _eval_rewrite_as_Indexed(self, *args):
from sympy import Sum
index_symbols = [i.args[0] for i in self.get_indices()]
args = [arg.args[0] if isinstance(arg, Sum) else arg for arg in args]
expr = Mul.fromiter(args)
return self._check_add_Sum(expr, index_symbols)
def _eval_partial_derivative(self, s):
# Evaluation like Mul
terms = []
for i, arg in enumerate(self.args):
# checking whether some tensor instance is differentiated
# or some other thing is necessary, but ugly
if isinstance(arg, TensExpr):
d = arg._eval_partial_derivative(s)
else:
# do not call diff is s is no symbol
if s._diff_wrt:
d = arg._eval_derivative(s)
else:
d = S.Zero
if d:
terms.append(TensMul.fromiter(self.args[:i] + (d,) + self.args[i + 1:]))
return TensAdd.fromiter(terms)
class TensorElement(TensExpr):
"""
Tensor with evaluated components.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry
>>> from sympy import symbols
>>> L = TensorIndexType("L")
>>> i, j, k = symbols("i j k")
>>> A = TensorHead("A", [L, L], TensorSymmetry.fully_symmetric(2))
>>> A(i, j).get_free_indices()
[i, j]
If we want to set component ``i`` to a specific value, use the
``TensorElement`` class:
>>> from sympy.tensor.tensor import TensorElement
>>> te = TensorElement(A(i, j), {i: 2})
As index ``i`` has been accessed (``{i: 2}`` is the evaluation of its 3rd
element), the free indices will only contain ``j``:
>>> te.get_free_indices()
[j]
"""
def __new__(cls, expr, index_map):
if not isinstance(expr, Tensor):
# remap
if not isinstance(expr, TensExpr):
raise TypeError("%s is not a tensor expression" % expr)
return expr.func(*[TensorElement(arg, index_map) for arg in expr.args])
expr_free_indices = expr.get_free_indices()
name_translation = {i.args[0]: i for i in expr_free_indices}
index_map = {name_translation.get(index, index): value for index, value in index_map.items()}
index_map = {index: value for index, value in index_map.items() if index in expr_free_indices}
if len(index_map) == 0:
return expr
free_indices = [i for i in expr_free_indices if i not in index_map.keys()]
index_map = Dict(index_map)
obj = TensExpr.__new__(cls, expr, index_map)
obj._free_indices = free_indices
return obj
@property
def free(self):
return [(index, i) for i, index in enumerate(self.get_free_indices())]
@property
def dum(self):
# TODO: inherit dummies from expr
return []
@property
def expr(self):
return self._args[0]
@property
def index_map(self):
return self._args[1]
@property
def coeff(self):
return S.One
@property
def nocoeff(self):
return self
def get_free_indices(self):
return self._free_indices
def _replace_indices(self, repl): # type: (tDict[TensorIndex, TensorIndex]) -> TensExpr
# TODO: can be improved:
return self.xreplace(repl)
def get_indices(self):
return self.get_free_indices()
def _extract_data(self, replacement_dict):
ret_indices, array = self.expr._extract_data(replacement_dict)
index_map = self.index_map
slice_tuple = tuple(index_map.get(i, slice(None)) for i in ret_indices)
ret_indices = [i for i in ret_indices if i not in index_map]
array = array.__getitem__(slice_tuple)
return ret_indices, array
def canon_bp(p):
"""
Butler-Portugal canonicalization. See ``tensor_can.py`` from the
combinatorics module for the details.
"""
if isinstance(p, TensExpr):
return p.canon_bp()
return p
def tensor_mul(*a):
"""
product of tensors
"""
if not a:
return TensMul.from_data(S.One, [], [], [])
t = a[0]
for tx in a[1:]:
t = t*tx
return t
def riemann_cyclic_replace(t_r):
"""
replace Riemann tensor with an equivalent expression
``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)``
"""
free = sorted(t_r.free, key=lambda x: x[1])
m, n, p, q = [x[0] for x in free]
t0 = t_r*Rational(2, 3)
t1 = -t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))*Rational(1, 3)
t2 = t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))*Rational(1, 3)
t3 = t0 + t1 + t2
return t3
def riemann_cyclic(t2):
"""
replace each Riemann tensor with an equivalent expression
satisfying the cyclic identity.
This trick is discussed in the reference guide to Cadabra.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, riemann_cyclic, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
>>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l))
>>> riemann_cyclic(t)
0
"""
t2 = t2.expand()
if isinstance(t2, (TensMul, Tensor)):
args = [t2]
else:
args = t2.args
a1 = [x.split() for x in args]
a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1]
a3 = [tensor_mul(*v) for v in a2]
t3 = TensAdd(*a3).doit()
if not t3:
return t3
else:
return canon_bp(t3)
def get_lines(ex, index_type):
"""
returns ``(lines, traces, rest)`` for an index type,
where ``lines`` is the list of list of positions of a matrix line,
``traces`` is the list of list of traced matrix lines,
``rest`` is the rest of the elements ot the tensor.
"""
def _join_lines(a):
i = 0
while i < len(a):
x = a[i]
xend = x[-1]
xstart = x[0]
hit = True
while hit:
hit = False
for j in range(i + 1, len(a)):
if j >= len(a):
break
if a[j][0] == xend:
hit = True
x.extend(a[j][1:])
xend = x[-1]
a.pop(j)
continue
if a[j][0] == xstart:
hit = True
a[i] = reversed(a[j][1:]) + x
x = a[i]
xstart = a[i][0]
a.pop(j)
continue
if a[j][-1] == xend:
hit = True
x.extend(reversed(a[j][:-1]))
xend = x[-1]
a.pop(j)
continue
if a[j][-1] == xstart:
hit = True
a[i] = a[j][:-1] + x
x = a[i]
xstart = x[0]
a.pop(j)
continue
i += 1
return a
arguments = ex.args
dt = {}
for c in ex.args:
if not isinstance(c, TensExpr):
continue
if c in dt:
continue
index_types = c.index_types
a = []
for i in range(len(index_types)):
if index_types[i] is index_type:
a.append(i)
if len(a) > 2:
raise ValueError('at most two indices of type %s allowed' % index_type)
if len(a) == 2:
dt[c] = a
#dum = ex.dum
lines = []
traces = []
traces1 = []
#indices_to_args_pos = ex._get_indices_to_args_pos()
# TODO: add a dum_to_components_map ?
for p0, p1, c0, c1 in ex.dum_in_args:
if arguments[c0] not in dt:
continue
if c0 == c1:
traces.append([c0])
continue
ta0 = dt[arguments[c0]]
ta1 = dt[arguments[c1]]
if p0 not in ta0:
continue
if ta0.index(p0) == ta1.index(p1):
# case gamma(i,s0,-s1) in c0, gamma(j,-s0,s2) in c1;
# to deal with this case one could add to the position
# a flag for transposition;
# one could write [(c0, False), (c1, True)]
raise NotImplementedError
# if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1
# if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0
ta0 = dt[arguments[c0]]
b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0)
lines1 = lines[:]
for line in lines:
if line[-1] == b0:
if line[0] == b1:
n = line.index(min(line))
traces1.append(line)
traces.append(line[n:] + line[:n])
else:
line.append(b1)
break
elif line[0] == b1:
line.insert(0, b0)
break
else:
lines1.append([b0, b1])
lines = [x for x in lines1 if x not in traces1]
lines = _join_lines(lines)
rest = []
for line in lines:
for y in line:
rest.append(y)
for line in traces:
for y in line:
rest.append(y)
rest = [x for x in range(len(arguments)) if x not in rest]
return lines, traces, rest
def get_free_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_free_indices()
def get_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_indices()
def get_index_structure(t):
if isinstance(t, TensExpr):
return t._index_structure
return _IndexStructure([], [], [], [])
def get_coeff(t):
if isinstance(t, Tensor):
return S.One
if isinstance(t, TensMul):
return t.coeff
if isinstance(t, TensExpr):
raise ValueError("no coefficient associated to this tensor expression")
return t
def contract_metric(t, g):
if isinstance(t, TensExpr):
return t.contract_metric(g)
return t
def perm2tensor(t, g, is_canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
if not isinstance(t, TensExpr):
return t
elif isinstance(t, (Tensor, TensMul)):
nim = get_index_structure(t).perm2tensor(g, is_canon_bp=is_canon_bp)
res = t._set_new_index_structure(nim, is_canon_bp=is_canon_bp)
if g[-1] != len(g) - 1:
return -res
return res
raise NotImplementedError()
def substitute_indices(t, *index_tuples):
if not isinstance(t, TensExpr):
return t
return t.substitute_indices(*index_tuples)
def _expand(expr, **kwargs):
if isinstance(expr, TensExpr):
return expr._expand(**kwargs)
else:
return expr.expand(**kwargs)
|
66030a3aa25aa5f109cc5427192a7c3dcbb6366917a889807cd9f567cb27b201 | r"""Module that defines indexed objects
The classes ``IndexedBase``, ``Indexed``, and ``Idx`` represent a
matrix element ``M[i, j]`` as in the following diagram::
1) The Indexed class represents the entire indexed object.
|
___|___
' '
M[i, j]
/ \__\______
| |
| |
| 2) The Idx class represents indices; each Idx can
| optionally contain information about its range.
|
3) IndexedBase represents the 'stem' of an indexed object, here `M`.
The stem used by itself is usually taken to represent the entire
array.
There can be any number of indices on an Indexed object. No
transformation properties are implemented in these Base objects, but
implicit contraction of repeated indices is supported.
Note that the support for complicated (i.e. non-atomic) integer
expressions as indices is limited. (This should be improved in
future releases.)
Examples
========
To express the above matrix element example you would write:
>>> from sympy import symbols, IndexedBase, Idx
>>> M = IndexedBase('M')
>>> i, j = symbols('i j', cls=Idx)
>>> M[i, j]
M[i, j]
Repeated indices in a product implies a summation, so to express a
matrix-vector product in terms of Indexed objects:
>>> x = IndexedBase('x')
>>> M[i, j]*x[j]
M[i, j]*x[j]
If the indexed objects will be converted to component based arrays, e.g.
with the code printers or the autowrap framework, you also need to provide
(symbolic or numerical) dimensions. This can be done by passing an
optional shape parameter to IndexedBase upon construction:
>>> dim1, dim2 = symbols('dim1 dim2', integer=True)
>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2))
>>> A.shape
(dim1, 2*dim1, dim2)
>>> A[i, j, 3].shape
(dim1, 2*dim1, dim2)
If an IndexedBase object has no shape information, it is assumed that the
array is as large as the ranges of its indices:
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> M[i, j].shape
(m, n)
>>> M[i, j].ranges
[(0, m - 1), (0, n - 1)]
The above can be compared with the following:
>>> A[i, 2, j].shape
(dim1, 2*dim1, dim2)
>>> A[i, 2, j].ranges
[(0, m - 1), None, (0, n - 1)]
To analyze the structure of indexed expressions, you can use the methods
get_indices() and get_contraction_structure():
>>> from sympy.tensor import get_indices, get_contraction_structure
>>> get_indices(A[i, j, j])
({i}, {})
>>> get_contraction_structure(A[i, j, j])
{(j,): {A[i, j, j]}}
See the appropriate docstrings for a detailed explanation of the output.
"""
# TODO: (some ideas for improvement)
#
# o test and guarantee numpy compatibility
# - implement full support for broadcasting
# - strided arrays
#
# o more functions to analyze indexed expressions
# - identify standard constructs, e.g matrix-vector product in a subexpression
#
# o functions to generate component based arrays (numpy and sympy.Matrix)
# - generate a single array directly from Indexed
# - convert simple sub-expressions
#
# o sophisticated indexing (possibly in subclasses to preserve simplicity)
# - Idx with range smaller than dimension of Indexed
# - Idx with stepsize != 1
# - Idx with step determined by function call
from __future__ import print_function, division
from sympy.core.assumptions import StdFactKB
from sympy.core import Expr, Tuple, sympify, S
from sympy.core.symbol import _filter_assumptions, Symbol
from sympy.core.compatibility import (is_sequence, NotIterable,
Iterable)
from sympy.core.logic import fuzzy_bool, fuzzy_not
from sympy.core.sympify import _sympify
from sympy.functions.special.tensor_functions import KroneckerDelta
class IndexException(Exception):
pass
class Indexed(Expr):
"""Represents a mathematical object with indices.
>>> from sympy import Indexed, IndexedBase, Idx, symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j)
A[i, j]
It is recommended that ``Indexed`` objects be created by indexing ``IndexedBase``:
``IndexedBase('A')[i, j]`` instead of ``Indexed(IndexedBase('A'), i, j)``.
>>> A = IndexedBase('A')
>>> a_ij = A[i, j] # Prefer this,
>>> b_ij = Indexed(A, i, j) # over this.
>>> a_ij == b_ij
True
"""
is_commutative = True
is_Indexed = True
is_symbol = True
is_Atom = True
def __new__(cls, base, *args, **kw_args):
from sympy.utilities.misc import filldedent
from sympy.tensor.array.ndim_array import NDimArray
from sympy.matrices.matrices import MatrixBase
if not args:
raise IndexException("Indexed needs at least one index.")
if isinstance(base, (str, Symbol)):
base = IndexedBase(base)
elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase):
raise TypeError(filldedent("""
The base can only be replaced with a string, Symbol,
IndexedBase or an object with a method for getting
items (i.e. an object with a `__getitem__` method).
"""))
args = list(map(sympify, args))
if isinstance(base, (NDimArray, Iterable, Tuple, MatrixBase)) and all([i.is_number for i in args]):
if len(args) == 1:
return base[args[0]]
else:
return base[args]
obj = Expr.__new__(cls, base, *args, **kw_args)
try:
IndexedBase._set_assumptions(obj, base.assumptions0)
except AttributeError:
IndexedBase._set_assumptions(obj, {})
return obj
def _hashable_content(self):
return super(Indexed, self)._hashable_content() + tuple(sorted(self.assumptions0.items()))
@property
def name(self):
return str(self)
@property
def _diff_wrt(self):
"""Allow derivatives with respect to an ``Indexed`` object."""
return True
def _eval_derivative(self, wrt):
from sympy.tensor.array.ndim_array import NDimArray
if isinstance(wrt, Indexed) and wrt.base == self.base:
if len(self.indices) != len(wrt.indices):
msg = "Different # of indices: d({!s})/d({!s})".format(self,
wrt)
raise IndexException(msg)
result = S.One
for index1, index2 in zip(self.indices, wrt.indices):
result *= KroneckerDelta(index1, index2)
return result
elif isinstance(self.base, NDimArray):
from sympy.tensor.array import derive_by_array
return Indexed(derive_by_array(self.base, wrt), *self.args[1:])
else:
if Tuple(self.indices).has(wrt):
return S.NaN
return S.Zero
@property
def assumptions0(self):
return {k: v for k, v in self._assumptions.items() if v is not None}
@property
def base(self):
"""Returns the ``IndexedBase`` of the ``Indexed`` object.
Examples
========
>>> from sympy import Indexed, IndexedBase, Idx, symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).base
A
>>> B = IndexedBase('B')
>>> B == B[i, j].base
True
"""
return self.args[0]
@property
def indices(self):
"""
Returns the indices of the ``Indexed`` object.
Examples
========
>>> from sympy import Indexed, Idx, symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).indices
(i, j)
"""
return self.args[1:]
@property
def rank(self):
"""
Returns the rank of the ``Indexed`` object.
Examples
========
>>> from sympy import Indexed, Idx, symbols
>>> i, j, k, l, m = symbols('i:m', cls=Idx)
>>> Indexed('A', i, j).rank
2
>>> q = Indexed('A', i, j, k, l, m)
>>> q.rank
5
>>> q.rank == len(q.indices)
True
"""
return len(self.args) - 1
@property
def shape(self):
"""Returns a list with dimensions of each index.
Dimensions is a property of the array, not of the indices. Still, if
the ``IndexedBase`` does not define a shape attribute, it is assumed
that the ranges of the indices correspond to the shape of the array.
>>> from sympy import IndexedBase, Idx, symbols
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', m)
>>> A = IndexedBase('A', shape=(n, n))
>>> B = IndexedBase('B')
>>> A[i, j].shape
(n, n)
>>> B[i, j].shape
(m, m)
"""
from sympy.utilities.misc import filldedent
if self.base.shape:
return self.base.shape
sizes = []
for i in self.indices:
upper = getattr(i, 'upper', None)
lower = getattr(i, 'lower', None)
if None in (upper, lower):
raise IndexException(filldedent("""
Range is not defined for all indices in: %s""" % self))
try:
size = upper - lower + 1
except TypeError:
raise IndexException(filldedent("""
Shape cannot be inferred from Idx with
undefined range: %s""" % self))
sizes.append(size)
return Tuple(*sizes)
@property
def ranges(self):
"""Returns a list of tuples with lower and upper range of each index.
If an index does not define the data members upper and lower, the
corresponding slot in the list contains ``None`` instead of a tuple.
Examples
========
>>> from sympy import Indexed,Idx, symbols
>>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges
[(0, 1), (0, 3), (0, 7)]
>>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges
[(0, 2), (0, 2), (0, 2)]
>>> x, y, z = symbols('x y z', integer=True)
>>> Indexed('A', x, y, z).ranges
[None, None, None]
"""
ranges = []
for i in self.indices:
sentinel = object()
upper = getattr(i, 'upper', sentinel)
lower = getattr(i, 'lower', sentinel)
if sentinel not in (upper, lower):
ranges.append(Tuple(lower, upper))
else:
ranges.append(None)
return ranges
def _sympystr(self, p):
indices = list(map(p.doprint, self.indices))
return "%s[%s]" % (p.doprint(self.base), ", ".join(indices))
@property
def free_symbols(self):
base_free_symbols = self.base.free_symbols
indices_free_symbols = {
fs for i in self.indices for fs in i.free_symbols}
if base_free_symbols:
return {self} | base_free_symbols | indices_free_symbols
else:
return indices_free_symbols
@property
def expr_free_symbols(self):
return {self}
class IndexedBase(Expr, NotIterable):
"""Represent the base or stem of an indexed object
The IndexedBase class represent an array that contains elements. The main purpose
of this class is to allow the convenient creation of objects of the Indexed
class. The __getitem__ method of IndexedBase returns an instance of
Indexed. Alone, without indices, the IndexedBase class can be used as a
notation for e.g. matrix equations, resembling what you could do with the
Symbol class. But, the IndexedBase class adds functionality that is not
available for Symbol instances:
- An IndexedBase object can optionally store shape information. This can
be used in to check array conformance and conditions for numpy
broadcasting. (TODO)
- An IndexedBase object implements syntactic sugar that allows easy symbolic
representation of array operations, using implicit summation of
repeated indices.
- The IndexedBase object symbolizes a mathematical structure equivalent
to arrays, and is recognized as such for code generation and automatic
compilation and wrapping.
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> A = IndexedBase('A'); A
A
>>> type(A)
<class 'sympy.tensor.indexed.IndexedBase'>
When an IndexedBase object receives indices, it returns an array with named
axes, represented by an Indexed object:
>>> i, j = symbols('i j', integer=True)
>>> A[i, j, 2]
A[i, j, 2]
>>> type(A[i, j, 2])
<class 'sympy.tensor.indexed.Indexed'>
The IndexedBase constructor takes an optional shape argument. If given,
it overrides any shape information in the indices. (But not the index
ranges!)
>>> m, n, o, p = symbols('m n o p', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> A[i, j].shape
(m, n)
>>> B = IndexedBase('B', shape=(o, p))
>>> B[i, j].shape
(o, p)
Assumptions can be specified with keyword arguments the same way as for Symbol:
>>> A_real = IndexedBase('A', real=True)
>>> A_real.is_real
True
>>> A != A_real
True
Assumptions can also be inherited if a Symbol is used to initialize the IndexedBase:
>>> I = symbols('I', integer=True)
>>> C_inherit = IndexedBase(I)
>>> C_explicit = IndexedBase('I', integer=True)
>>> C_inherit == C_explicit
True
"""
is_commutative = True
is_symbol = True
is_Atom = True
@staticmethod
def _set_assumptions(obj, assumptions):
"""Set assumptions on obj, making sure to apply consistent values."""
tmp_asm_copy = assumptions.copy()
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
assumptions['commutative'] = is_commutative
obj._assumptions = StdFactKB(assumptions)
obj._assumptions._generator = tmp_asm_copy # Issue #8873
def __new__(cls, label, shape=None, **kw_args):
from sympy import MatrixBase, NDimArray
assumptions, kw_args = _filter_assumptions(kw_args)
if isinstance(label, str):
label = Symbol(label, **assumptions)
elif isinstance(label, Symbol):
assumptions = label._merge(assumptions)
elif isinstance(label, (MatrixBase, NDimArray)):
return label
elif isinstance(label, Iterable):
return _sympify(label)
else:
label = _sympify(label)
if is_sequence(shape):
shape = Tuple(*shape)
elif shape is not None:
shape = Tuple(shape)
offset = kw_args.pop('offset', S.Zero)
strides = kw_args.pop('strides', None)
if shape is not None:
obj = Expr.__new__(cls, label, shape)
else:
obj = Expr.__new__(cls, label)
obj._shape = shape
obj._offset = offset
obj._strides = strides
obj._name = str(label)
IndexedBase._set_assumptions(obj, assumptions)
return obj
@property
def name(self):
return self._name
def _hashable_content(self):
return super(IndexedBase, self)._hashable_content() + tuple(sorted(self.assumptions0.items()))
@property
def assumptions0(self):
return {k: v for k, v in self._assumptions.items() if v is not None}
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return Indexed(self, *indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return Indexed(self, indices, **kw_args)
@property
def shape(self):
"""Returns the shape of the ``IndexedBase`` object.
Examples
========
>>> from sympy import IndexedBase, Idx, Symbol
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).shape
(x, y)
Note: If the shape of the ``IndexedBase`` is specified, it will override
any shape information given by the indices.
>>> A = IndexedBase('A', shape=(x, y))
>>> B = IndexedBase('B')
>>> i = Idx('i', 2)
>>> j = Idx('j', 1)
>>> A[i, j].shape
(x, y)
>>> B[i, j].shape
(2, 1)
"""
return self._shape
@property
def strides(self):
"""Returns the strided scheme for the ``IndexedBase`` object.
Normally this is a tuple denoting the number of
steps to take in the respective dimension when traversing
an array. For code generation purposes strides='C' and
strides='F' can also be used.
strides='C' would mean that code printer would unroll
in row-major order and 'F' means unroll in column major
order.
"""
return self._strides
@property
def offset(self):
"""Returns the offset for the ``IndexedBase`` object.
This is the value added to the resulting index when the
2D Indexed object is unrolled to a 1D form. Used in code
generation.
Examples
==========
>>> from sympy.printing import ccode
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> l, m, n, o = symbols('l m n o', integer=True)
>>> A = IndexedBase('A', strides=(l, m, n), offset=o)
>>> i, j, k = map(Idx, 'ijk')
>>> ccode(A[i, j, k])
'A[l*i + m*j + n*k + o]'
"""
return self._offset
@property
def label(self):
"""Returns the label of the ``IndexedBase`` object.
Examples
========
>>> from sympy import IndexedBase
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).label
A
"""
return self.args[0]
def _sympystr(self, p):
return p.doprint(self.label)
class Idx(Expr):
"""Represents an integer index as an ``Integer`` or integer expression.
There are a number of ways to create an ``Idx`` object. The constructor
takes two arguments:
``label``
An integer or a symbol that labels the index.
``range``
Optionally you can specify a range as either
* ``Symbol`` or integer: This is interpreted as a dimension. Lower and
upper bounds are set to ``0`` and ``range - 1``, respectively.
* ``tuple``: The two elements are interpreted as the lower and upper
bounds of the range, respectively.
Note: bounds of the range are assumed to be either integer or infinite (oo
and -oo are allowed to specify an unbounded range). If ``n`` is given as a
bound, then ``n.is_integer`` must not return false.
For convenience, if the label is given as a string it is automatically
converted to an integer symbol. (Note: this conversion is not done for
range or dimension arguments.)
Examples
========
>>> from sympy import IndexedBase, Idx, symbols, oo
>>> n, i, L, U = symbols('n i L U', integer=True)
If a string is given for the label an integer ``Symbol`` is created and the
bounds are both ``None``:
>>> idx = Idx('qwerty'); idx
qwerty
>>> idx.lower, idx.upper
(None, None)
Both upper and lower bounds can be specified:
>>> idx = Idx(i, (L, U)); idx
i
>>> idx.lower, idx.upper
(L, U)
When only a single bound is given it is interpreted as the dimension
and the lower bound defaults to 0:
>>> idx = Idx(i, n); idx.lower, idx.upper
(0, n - 1)
>>> idx = Idx(i, 4); idx.lower, idx.upper
(0, 3)
>>> idx = Idx(i, oo); idx.lower, idx.upper
(0, oo)
"""
is_integer = True
is_finite = True
is_real = True
is_symbol = True
is_Atom = True
_diff_wrt = True
def __new__(cls, label, range=None, **kw_args):
from sympy.utilities.misc import filldedent
if isinstance(label, str):
label = Symbol(label, integer=True)
label, range = list(map(sympify, (label, range)))
if label.is_Number:
if not label.is_integer:
raise TypeError("Index is not an integer number.")
return label
if not label.is_integer:
raise TypeError("Idx object requires an integer label.")
elif is_sequence(range):
if len(range) != 2:
raise ValueError(filldedent("""
Idx range tuple must have length 2, but got %s""" % len(range)))
for bound in range:
if (bound.is_integer is False and bound is not S.Infinity
and bound is not S.NegativeInfinity):
raise TypeError("Idx object requires integer bounds.")
args = label, Tuple(*range)
elif isinstance(range, Expr):
if range is not S.Infinity and fuzzy_not(range.is_integer):
raise TypeError("Idx object requires an integer dimension.")
args = label, Tuple(0, range - 1)
elif range:
raise TypeError(filldedent("""
The range must be an ordered iterable or
integer SymPy expression."""))
else:
args = label,
obj = Expr.__new__(cls, *args, **kw_args)
obj._assumptions["finite"] = True
obj._assumptions["real"] = True
return obj
@property
def label(self):
"""Returns the label (Integer or integer expression) of the Idx object.
Examples
========
>>> from sympy import Idx, Symbol
>>> x = Symbol('x', integer=True)
>>> Idx(x).label
x
>>> j = Symbol('j', integer=True)
>>> Idx(j).label
j
>>> Idx(j + 1).label
j + 1
"""
return self.args[0]
@property
def lower(self):
"""Returns the lower bound of the ``Idx``.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).lower
0
>>> Idx('j', 5).lower
0
>>> Idx('j').lower is None
True
"""
try:
return self.args[1][0]
except IndexError:
return
@property
def upper(self):
"""Returns the upper bound of the ``Idx``.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).upper
1
>>> Idx('j', 5).upper
4
>>> Idx('j').upper is None
True
"""
try:
return self.args[1][1]
except IndexError:
return
def _sympystr(self, p):
return p.doprint(self.label)
@property
def name(self):
return self.label.name if self.label.is_Symbol else str(self.label)
@property
def free_symbols(self):
return {self}
def __le__(self, other):
if isinstance(other, Idx):
other_upper = other if other.upper is None else other.upper
other_lower = other if other.lower is None else other.lower
else:
other_upper = other
other_lower = other
if self.upper is not None and (self.upper <= other_lower) == True:
return True
if self.lower is not None and (self.lower > other_upper) == True:
return False
return super(Idx, self).__le__(other)
def __ge__(self, other):
if isinstance(other, Idx):
other_upper = other if other.upper is None else other.upper
other_lower = other if other.lower is None else other.lower
else:
other_upper = other
other_lower = other
if self.lower is not None and (self.lower >= other_upper) == True:
return True
if self.upper is not None and (self.upper < other_lower) == True:
return False
return super(Idx, self).__ge__(other)
def __lt__(self, other):
if isinstance(other, Idx):
other_upper = other if other.upper is None else other.upper
other_lower = other if other.lower is None else other.lower
else:
other_upper = other
other_lower = other
if self.upper is not None and (self.upper < other_lower) == True:
return True
if self.lower is not None and (self.lower >= other_upper) == True:
return False
return super(Idx, self).__lt__(other)
def __gt__(self, other):
if isinstance(other, Idx):
other_upper = other if other.upper is None else other.upper
other_lower = other if other.lower is None else other.lower
else:
other_upper = other
other_lower = other
if self.lower is not None and (self.lower > other_upper) == True:
return True
if self.upper is not None and (self.upper <= other_lower) == True:
return False
return super(Idx, self).__gt__(other)
|
21d1d7ce5e482275e58266c2ebf39ff24506861f1280b4d311c32d214370281f | from typing import Dict, Any
import inspect
from .dispatcher import Dispatcher, MethodDispatcher, ambiguity_warn
# XXX: This parameter to dispatch isn't documented and isn't used anywhere in
# sympy. Maybe it should just be removed.
global_namespace = dict() # type: Dict[str, Any]
def dispatch(*types, **kwargs):
""" Dispatch function on the types of the inputs
Supports dispatch on all non-keyword arguments.
Collects implementations based on the function name. Ignores namespaces.
If ambiguous type signatures occur a warning is raised when the function is
defined suggesting the additional method to break the ambiguity.
Examples
--------
>>> from sympy.multipledispatch import dispatch
>>> @dispatch(int)
... def f(x):
... return x + 1
>>> @dispatch(float)
... def f(x):
... return x - 1
>>> f(3)
4
>>> f(3.0)
2.0
Specify an isolated namespace with the namespace keyword argument
>>> my_namespace = dict()
>>> @dispatch(int, namespace=my_namespace)
... def foo(x):
... return x + 1
Dispatch on instance methods within classes
>>> class MyClass(object):
... @dispatch(list)
... def __init__(self, data):
... self.data = data
... @dispatch(int)
... def __init__(self, datum):
... self.data = [datum]
"""
namespace = kwargs.get('namespace', global_namespace)
on_ambiguity = kwargs.get('on_ambiguity', ambiguity_warn)
types = tuple(types)
def _(func):
name = func.__name__
if ismethod(func):
dispatcher = inspect.currentframe().f_back.f_locals.get(
name,
MethodDispatcher(name))
else:
if name not in namespace:
namespace[name] = Dispatcher(name)
dispatcher = namespace[name]
dispatcher.add(types, func, on_ambiguity=on_ambiguity)
return dispatcher
return _
def ismethod(func):
""" Is func a method?
Note that this has to work as the method is defined but before the class is
defined. At this stage methods look like functions.
"""
if hasattr(inspect, "signature"):
signature = inspect.signature(func)
return signature.parameters.get('self', None) is not None
else:
spec = inspect.getargspec(func)
return spec and spec.args and spec.args[0] == 'self'
|
1fc91a8dbab1411b118821cdd24499e3116f9df58cb5b48f1643885d1a844f3a | from typing import Set
from warnings import warn
import inspect
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
from .utils import expand_tuples
import itertools as itl
class MDNotImplementedError(NotImplementedError):
""" A NotImplementedError for multiple dispatch """
def ambiguity_warn(dispatcher, ambiguities):
""" Raise warning when ambiguity is detected
Parameters
----------
dispatcher : Dispatcher
The dispatcher on which the ambiguity was detected
ambiguities : set
Set of type signature pairs that are ambiguous within this dispatcher
See Also:
Dispatcher.add
warning_text
"""
warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning)
_unresolved_dispatchers = set() # type: Set[Dispatcher]
_resolve = [True]
def halt_ordering():
_resolve[0] = False
def restart_ordering(on_ambiguity=ambiguity_warn):
_resolve[0] = True
while _unresolved_dispatchers:
dispatcher = _unresolved_dispatchers.pop()
dispatcher.reorder(on_ambiguity=on_ambiguity)
class Dispatcher(object):
""" Dispatch methods based on type signature
Use ``dispatch`` to add implementations
Examples
--------
>>> from sympy.multipledispatch import dispatch
>>> @dispatch(int)
... def f(x):
... return x + 1
>>> @dispatch(float)
... def f(x):
... return x - 1
>>> f(3)
4
>>> f(3.0)
2.0
"""
__slots__ = '__name__', 'name', 'funcs', 'ordering', '_cache', 'doc'
def __init__(self, name, doc=None):
self.name = self.__name__ = name
self.funcs = dict()
self._cache = dict()
self.ordering = []
self.doc = doc
def register(self, *types, **kwargs):
""" Register dispatcher with new implementation
>>> from sympy.multipledispatch.dispatcher import Dispatcher
>>> f = Dispatcher('f')
>>> @f.register(int)
... def inc(x):
... return x + 1
>>> @f.register(float)
... def dec(x):
... return x - 1
>>> @f.register(list)
... @f.register(tuple)
... def reverse(x):
... return x[::-1]
>>> f(1)
2
>>> f(1.0)
0.0
>>> f([1, 2, 3])
[3, 2, 1]
"""
def _(func):
self.add(types, func, **kwargs)
return func
return _
@classmethod
def get_func_params(cls, func):
if hasattr(inspect, "signature"):
sig = inspect.signature(func)
return sig.parameters.values()
@classmethod
def get_func_annotations(cls, func):
""" Get annotations of function positional parameters
"""
params = cls.get_func_params(func)
if params:
Parameter = inspect.Parameter
params = (param for param in params
if param.kind in
(Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD))
annotations = tuple(
param.annotation
for param in params)
if all(ann is not Parameter.empty for ann in annotations):
return annotations
def add(self, signature, func, on_ambiguity=ambiguity_warn):
""" Add new types/method pair to dispatcher
>>> from sympy.multipledispatch import Dispatcher
>>> D = Dispatcher('add')
>>> D.add((int, int), lambda x, y: x + y)
>>> D.add((float, float), lambda x, y: x + y)
>>> D(1, 2)
3
>>> D(1, 2.0)
Traceback (most recent call last):
...
NotImplementedError: Could not find signature for add: <int, float>
When ``add`` detects a warning it calls the ``on_ambiguity`` callback
with a dispatcher/itself, and a set of ambiguous type signature pairs
as inputs. See ``ambiguity_warn`` for an example.
"""
# Handle annotations
if not signature:
annotations = self.get_func_annotations(func)
if annotations:
signature = annotations
# Handle union types
if any(isinstance(typ, tuple) for typ in signature):
for typs in expand_tuples(signature):
self.add(typs, func, on_ambiguity)
return
for typ in signature:
if not isinstance(typ, type):
str_sig = ', '.join(c.__name__ if isinstance(c, type)
else str(c) for c in signature)
raise TypeError("Tried to dispatch on non-type: %s\n"
"In signature: <%s>\n"
"In function: %s" %
(typ, str_sig, self.name))
self.funcs[signature] = func
self.reorder(on_ambiguity=on_ambiguity)
self._cache.clear()
def reorder(self, on_ambiguity=ambiguity_warn):
if _resolve[0]:
self.ordering = ordering(self.funcs)
amb = ambiguities(self.funcs)
if amb:
on_ambiguity(self, amb)
else:
_unresolved_dispatchers.add(self)
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
try:
func = self._cache[types]
except KeyError:
func = self.dispatch(*types)
if not func:
raise NotImplementedError(
'Could not find signature for %s: <%s>' %
(self.name, str_signature(types)))
self._cache[types] = func
try:
return func(*args, **kwargs)
except MDNotImplementedError:
funcs = self.dispatch_iter(*types)
next(funcs) # burn first
for func in funcs:
try:
return func(*args, **kwargs)
except MDNotImplementedError:
pass
raise NotImplementedError("Matching functions for "
"%s: <%s> found, but none completed successfully"
% (self.name, str_signature(types)))
def __str__(self):
return "<dispatched %s>" % self.name
__repr__ = __str__
def dispatch(self, *types):
""" Deterimine appropriate implementation for this type signature
This method is internal. Users should call this object as a function.
Implementation resolution occurs within the ``__call__`` method.
>>> from sympy.multipledispatch import dispatch
>>> @dispatch(int)
... def inc(x):
... return x + 1
>>> implementation = inc.dispatch(int)
>>> implementation(3)
4
>>> print(inc.dispatch(float))
None
See Also:
``sympy.multipledispatch.conflict`` - module to determine resolution order
"""
if types in self.funcs:
return self.funcs[types]
try:
return next(self.dispatch_iter(*types))
except StopIteration:
return None
def dispatch_iter(self, *types):
n = len(types)
for signature in self.ordering:
if len(signature) == n and all(map(issubclass, types, signature)):
result = self.funcs[signature]
yield result
def resolve(self, types):
""" Deterimine appropriate implementation for this type signature
.. deprecated:: 0.4.4
Use ``dispatch(*types)`` instead
"""
warn("resolve() is deprecated, use dispatch(*types)",
DeprecationWarning)
return self.dispatch(*types)
def __getstate__(self):
return {'name': self.name,
'funcs': self.funcs}
def __setstate__(self, d):
self.name = d['name']
self.funcs = d['funcs']
self.ordering = ordering(self.funcs)
self._cache = dict()
@property
def __doc__(self):
docs = ["Multiply dispatched method: %s" % self.name]
if self.doc:
docs.append(self.doc)
other = []
for sig in self.ordering[::-1]:
func = self.funcs[sig]
if func.__doc__:
s = 'Inputs: <%s>\n' % str_signature(sig)
s += '-' * len(s) + '\n'
s += func.__doc__.strip()
docs.append(s)
else:
other.append(str_signature(sig))
if other:
docs.append('Other signatures:\n ' + '\n '.join(other))
return '\n\n'.join(docs)
def _help(self, *args):
return self.dispatch(*map(type, args)).__doc__
def help(self, *args, **kwargs):
""" Print docstring for the function corresponding to inputs """
print(self._help(*args))
def _source(self, *args):
func = self.dispatch(*map(type, args))
if not func:
raise TypeError("No function found")
return source(func)
def source(self, *args, **kwargs):
""" Print source code for the function corresponding to inputs """
print(self._source(*args))
def source(func):
s = 'File: %s\n\n' % inspect.getsourcefile(func)
s = s + inspect.getsource(func)
return s
class MethodDispatcher(Dispatcher):
""" Dispatch methods based on type signature
See Also:
Dispatcher
"""
@classmethod
def get_func_params(cls, func):
if hasattr(inspect, "signature"):
sig = inspect.signature(func)
return itl.islice(sig.parameters.values(), 1, None)
def __get__(self, instance, owner):
self.obj = instance
self.cls = owner
return self
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
func = self.dispatch(*types)
if not func:
raise NotImplementedError('Could not find signature for %s: <%s>' %
(self.name, str_signature(types)))
return func(self.obj, *args, **kwargs)
def str_signature(sig):
""" String representation of type signature
>>> from sympy.multipledispatch.dispatcher import str_signature
>>> str_signature((int, float))
'int, float'
"""
return ', '.join(cls.__name__ for cls in sig)
def warning_text(name, amb):
""" The text for ambiguity warnings """
text = "\nAmbiguities exist in dispatched function %s\n\n" % (name)
text += "The following signatures may result in ambiguous behavior:\n"
for pair in amb:
text += "\t" + \
', '.join('[' + str_signature(s) + ']' for s in pair) + "\n"
text += "\n\nConsider making the following additions:\n\n"
text += '\n\n'.join(['@dispatch(' + str_signature(super_signature(s))
+ ')\ndef %s(...)' % name for s in amb])
return text
|
7b394e488a991163950fd649dfab64aaadb412f4c2a9a57fe3f5a716bf31e9f0 | from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.core.core import BasicMeta, Registry, all_classes
class ClassRegistry(Registry):
"""
Namespace for SymPy classes
This is needed to avoid problems with cyclic imports.
To get a SymPy class, use `C.<class_name>` e.g. `C.Rational`, `C.Add`.
For performance reasons, this is coupled with a set `all_classes` holding
the classes, which should not be modified directly.
"""
__slots__ = ()
def __setattr__(self, name, cls):
Registry.__setattr__(self, name, cls)
all_classes.add(cls)
def __delattr__(self, name):
cls = getattr(self, name)
Registry.__delattr__(self, name)
# The same class could have different names, so make sure
# it's really gone from C before removing it from all_classes.
if cls not in self.__class__.__dict__.itervalues():
all_classes.remove(cls)
def __getattr__(self, name):
# Warning on hasattr(C, '__wrapped__') leadds to warnings during test
# collection when running doctests under pytest.
if name != '__wrapped__':
SymPyDeprecationWarning(
feature='C, including its class ClassRegistry,',
last_supported_version='1.0',
useinstead='direct imports from the defining module',
issue=9371,
deprecated_since_version='1.0').warn(stacklevel=2)
return any(cls.__name__ == name for cls in all_classes)
@property
def _sympy_(self):
# until C is deprecated, any sympification of an expression
# with C when C has not been defined can raise this error
# since the user is trying to use C like a symbol -- and if
# we get here, it hasn't been defined as a symbol
raise NameError("name 'C' is not defined as a Symbol")
C = ClassRegistry()
C.BasicMeta = BasicMeta
|
01dc8421d08f17650833e22c75b782088754ee15a18b42661112b355cf17d1c7 | """
Boolean algebra module for SymPy
"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import chain, combinations, product
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core.compatibility import ordered, as_int
from sympy.core.function import Application, Derivative
from sympy.core.numbers import Number
from sympy.core.operations import LatticeOp
from sympy.core.singleton import Singleton, S
from sympy.core.sympify import converter, _sympify, sympify
from sympy.utilities.iterables import sift, ibin
from sympy.utilities.misc import filldedent
def as_Boolean(e):
"""Like bool, return the Boolean value of an expression, e,
which can be any instance of Boolean or bool.
Examples
========
>>> from sympy import true, false, nan
>>> from sympy.logic.boolalg import as_Boolean
>>> from sympy.abc import x
>>> as_Boolean(1) is true
True
>>> as_Boolean(x)
x
>>> as_Boolean(2)
Traceback (most recent call last):
...
TypeError: expecting bool or Boolean, not `2`.
"""
from sympy.core.symbol import Symbol
if e == True:
return S.true
if e == False:
return S.false
if isinstance(e, Symbol):
z = e.is_zero
if z is None:
return e
return S.false if z else S.true
if isinstance(e, Boolean):
return e
raise TypeError('expecting bool or Boolean, not `%s`.' % e)
class Boolean(Basic):
"""A boolean object is an object for which logic operations make sense."""
__slots__ = ()
def __and__(self, other):
"""Overloading for & operator"""
return And(self, other)
__rand__ = __and__
def __or__(self, other):
"""Overloading for |"""
return Or(self, other)
__ror__ = __or__
def __invert__(self):
"""Overloading for ~"""
return Not(self)
def __rshift__(self, other):
"""Overloading for >>"""
return Implies(self, other)
def __lshift__(self, other):
"""Overloading for <<"""
return Implies(other, self)
__rrshift__ = __lshift__
__rlshift__ = __rshift__
def __xor__(self, other):
return Xor(self, other)
__rxor__ = __xor__
def equals(self, other):
"""
Returns True if the given formulas have the same truth table.
For two formulas to be equal they must have the same literals.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.boolalg import And, Or, Not
>>> (A >> B).equals(~B >> ~A)
True
>>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C)))
False
>>> Not(And(A, Not(A))).equals(Or(B, Not(B)))
False
"""
from sympy.logic.inference import satisfiable
from sympy.core.relational import Relational
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
not satisfiable(Not(Equivalent(self, other)))
def to_nnf(self, simplify=True):
# override where necessary
return self
def as_set(self):
"""
Rewrites Boolean expression in terms of real sets.
Examples
========
>>> from sympy import Symbol, Eq, Or, And
>>> x = Symbol('x', real=True)
>>> Eq(x, 0).as_set()
FiniteSet(0)
>>> (x > 0).as_set()
Interval.open(0, oo)
>>> And(-2 < x, x < 2).as_set()
Interval.open(-2, 2)
>>> Or(x < -2, 2 < x).as_set()
Union(Interval.open(-oo, -2), Interval.open(2, oo))
"""
from sympy.calculus.util import periodicity
from sympy.core.relational import Relational
free = self.free_symbols
if len(free) == 1:
x = free.pop()
reps = {}
for r in self.atoms(Relational):
if periodicity(r, x) not in (0, None):
s = r._eval_as_set()
if s in (S.EmptySet, S.UniversalSet, S.Reals):
reps[r] = s.as_relational(x)
continue
raise NotImplementedError(filldedent('''
as_set is not implemented for relationals
with periodic solutions
'''))
return self.subs(reps)._eval_as_set()
else:
raise NotImplementedError("Sorry, as_set has not yet been"
" implemented for multivariate"
" expressions")
@property
def binary_symbols(self):
from sympy.core.relational import Eq, Ne
return set().union(*[i.binary_symbols for i in self.args
if i.is_Boolean or i.is_Symbol
or isinstance(i, (Eq, Ne))])
class BooleanAtom(Boolean):
"""
Base class of BooleanTrue and BooleanFalse.
"""
is_Boolean = True
is_Atom = True
_op_priority = 11 # higher than Expr
def simplify(self, *a, **kw):
return self
def expand(self, *a, **kw):
return self
@property
def canonical(self):
return self
def _noop(self, other=None):
raise TypeError('BooleanAtom not allowed in this context.')
__add__ = _noop
__radd__ = _noop
__sub__ = _noop
__rsub__ = _noop
__mul__ = _noop
__rmul__ = _noop
__pow__ = _noop
__rpow__ = _noop
__rdiv__ = _noop
__truediv__ = _noop
__div__ = _noop
__rtruediv__ = _noop
__mod__ = _noop
__rmod__ = _noop
_eval_power = _noop
# /// drop when Py2 is no longer supported
def __lt__(self, other):
from sympy.utilities.misc import filldedent
raise TypeError(filldedent('''
A Boolean argument can only be used in
Eq and Ne; all other relationals expect
real expressions.
'''))
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
# \\\
class BooleanTrue(BooleanAtom, metaclass=Singleton):
"""
SymPy version of True, a singleton that can be accessed via S.true.
This is the SymPy version of True, for use in the logic module. The
primary advantage of using true instead of True is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
True they act bitwise on 1. Functions in the logic module will return this
class when they evaluate to true.
Notes
=====
There is liable to be some confusion as to when ``True`` should
be used and when ``S.true`` should be used in various contexts
throughout SymPy. An important thing to remember is that
``sympify(True)`` returns ``S.true``. This means that for the most
part, you can just use ``True`` and it will automatically be converted
to ``S.true`` when necessary, similar to how you can generally use 1
instead of ``S.One``.
The rule of thumb is:
"If the boolean in question can be replaced by an arbitrary symbolic
``Boolean``, like ``Or(x, y)`` or ``x > 1``, use ``S.true``.
Otherwise, use ``True``"
In other words, use ``S.true`` only on those contexts where the
boolean is being used as a symbolic representation of truth.
For example, if the object ends up in the ``.args`` of any expression,
then it must necessarily be ``S.true`` instead of ``True``, as
elements of ``.args`` must be ``Basic``. On the other hand,
``==`` is not a symbolic operation in SymPy, since it always returns
``True`` or ``False``, and does so in terms of structural equality
rather than mathematical, so it should return ``True``. The assumptions
system should use ``True`` and ``False``. Aside from not satisfying
the above rule of thumb, the assumptions system uses a three-valued logic
(``True``, ``False``, ``None``), whereas ``S.true`` and ``S.false``
represent a two-valued logic. When in doubt, use ``True``.
"``S.true == True is True``."
While "``S.true is True``" is ``False``, "``S.true == True``"
is ``True``, so if there is any doubt over whether a function or
expression will return ``S.true`` or ``True``, just use ``==``
instead of ``is`` to do the comparison, and it will work in either
case. Finally, for boolean flags, it's better to just use ``if x``
instead of ``if x is True``. To quote PEP 8:
Don't compare boolean values to ``True`` or ``False``
using ``==``.
* Yes: ``if greeting:``
* No: ``if greeting == True:``
* Worse: ``if greeting is True:``
Examples
========
>>> from sympy import sympify, true, false, Or
>>> sympify(True)
True
>>> _ is True, _ is true
(False, True)
>>> Or(true, false)
True
>>> _ is true
True
Python operators give a boolean result for true but a
bitwise result for True
>>> ~true, ~True
(False, -2)
>>> true >> true, True >> True
(True, 0)
Python operators give a boolean result for true but a
bitwise result for True
>>> ~true, ~True
(False, -2)
>>> true >> true, True >> True
(True, 0)
See Also
========
sympy.logic.boolalg.BooleanFalse
"""
def __nonzero__(self):
return True
__bool__ = __nonzero__
def __hash__(self):
return hash(True)
@property
def negated(self):
return S.false
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import true
>>> true.as_set()
UniversalSet
"""
return S.UniversalSet
class BooleanFalse(BooleanAtom, metaclass=Singleton):
"""
SymPy version of False, a singleton that can be accessed via S.false.
This is the SymPy version of False, for use in the logic module. The
primary advantage of using false instead of False is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
False they act bitwise on 0. Functions in the logic module will return this
class when they evaluate to false.
Notes
======
See note in :py:class`sympy.logic.boolalg.BooleanTrue`
Examples
========
>>> from sympy import sympify, true, false, Or
>>> sympify(False)
False
>>> _ is False, _ is false
(False, True)
>>> Or(true, false)
True
>>> _ is true
True
Python operators give a boolean result for false but a
bitwise result for False
>>> ~false, ~False
(True, -1)
>>> false >> false, False >> False
(True, 0)
See Also
========
sympy.logic.boolalg.BooleanTrue
"""
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __hash__(self):
return hash(False)
@property
def negated(self):
return S.true
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import false
>>> false.as_set()
EmptySet
"""
return S.EmptySet
true = BooleanTrue()
false = BooleanFalse()
# We want S.true and S.false to work, rather than S.BooleanTrue and
# S.BooleanFalse, but making the class and instance names the same causes some
# major issues (like the inability to import the class directly from this
# file).
S.true = true
S.false = false
converter[bool] = lambda x: S.true if x else S.false
class BooleanFunction(Application, Boolean):
"""Boolean function is a function that lives in a boolean space
It is used as base class for And, Or, Not, etc.
"""
is_Boolean = True
def _eval_simplify(self, **kwargs):
rv = self.func(*[
a._eval_simplify(**kwargs) for a in self.args])
return simplify_logic(rv)
def simplify(self, **kwargs):
from sympy.simplify.simplify import simplify
return simplify(self, **kwargs)
# /// drop when Py2 is no longer supported
def __lt__(self, other):
from sympy.utilities.misc import filldedent
raise TypeError(filldedent('''
A Boolean argument can only be used in
Eq and Ne; all other relationals expect
real expressions.
'''))
__le__ = __lt__
__ge__ = __lt__
__gt__ = __lt__
# \\\
@classmethod
def binary_check_and_simplify(self, *args):
from sympy.core.relational import Relational, Eq, Ne
args = [as_Boolean(i) for i in args]
bin = set().union(*[i.binary_symbols for i in args])
rel = set().union(*[i.atoms(Relational) for i in args])
reps = {}
for x in bin:
for r in rel:
if x in bin and x in r.free_symbols:
if isinstance(r, (Eq, Ne)):
if not (
S.true in r.args or
S.false in r.args):
reps[r] = S.false
else:
raise TypeError(filldedent('''
Incompatible use of binary symbol `%s` as a
real variable in `%s`
''' % (x, r)))
return [i.subs(reps) for i in args]
def to_nnf(self, simplify=True):
return self._to_nnf(*self.args, simplify=simplify)
def to_anf(self, deep=True):
return self._to_anf(*self.args, deep=deep)
@classmethod
def _to_nnf(cls, *args, **kwargs):
simplify = kwargs.get('simplify', True)
argset = set([])
for arg in args:
if not is_literal(arg):
arg = arg.to_nnf(simplify)
if simplify:
if isinstance(arg, cls):
arg = arg.args
else:
arg = (arg,)
for a in arg:
if Not(a) in argset:
return cls.zero
argset.add(a)
else:
argset.add(arg)
return cls(*argset)
@classmethod
def _to_anf(cls, *args, **kwargs):
deep = kwargs.get('deep', True)
argset = set([])
for arg in args:
if deep:
if not is_literal(arg) or isinstance(arg, Not):
arg = arg.to_anf(deep=deep)
argset.add(arg)
else:
argset.add(arg)
return cls(*argset, remove_true=False)
# the diff method below is copied from Expr class
def diff(self, *symbols, **assumptions):
assumptions.setdefault("evaluate", True)
return Derivative(self, *symbols, **assumptions)
def _eval_derivative(self, x):
from sympy.core.relational import Eq
from sympy.functions.elementary.piecewise import Piecewise
if x in self.binary_symbols:
return Piecewise(
(0, Eq(self.subs(x, 0), self.subs(x, 1))),
(1, True))
elif x in self.free_symbols:
# not implemented, see https://www.encyclopediaofmath.org/
# index.php/Boolean_differential_calculus
pass
else:
return S.Zero
def _apply_patternbased_simplification(self, rv, patterns, measure,
dominatingvalue,
replacementvalue=None):
"""
Replace patterns of Relational
Parameters
==========
rv : Expr
Boolean expression
patterns : tuple
Tuple of tuples, with (pattern to simplify, simplified pattern)
measure : function
Simplification measure
dominatingvalue : boolean or None
The dominating value for the function of consideration.
For example, for And S.false is dominating. As soon as one
expression is S.false in And, the whole expression is S.false.
replacementvalue : boolean or None, optional
The resulting value for the whole expression if one argument
evaluates to dominatingvalue.
For example, for Nand S.false is dominating, but in this case
the resulting value is S.true. Default is None. If replacementvalue
is None and dominatingvalue is not None,
replacementvalue = dominatingvalue
"""
from sympy.core.relational import Relational, _canonical
if replacementvalue is None and dominatingvalue is not None:
replacementvalue = dominatingvalue
# Use replacement patterns for Relationals
changed = True
Rel, nonRel = sift(rv.args, lambda i: isinstance(i, Relational),
binary=True)
if len(Rel) <= 1:
return rv
Rel, nonRealRel = sift(Rel, lambda i: all(s.is_real is not False
for s in i.free_symbols),
binary=True)
Rel = [i.canonical for i in Rel]
while changed and len(Rel) >= 2:
changed = False
# Sort based on ordered
Rel = list(ordered(Rel))
# Create a list of possible replacements
results = []
# Try all combinations
for ((i, pi), (j, pj)) in combinations(enumerate(Rel), 2):
for k, (pattern, simp) in enumerate(patterns):
res = []
# use SymPy matching
oldexpr = rv.func(pi, pj)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
# Try reversing first relational
# This and the rest should not be required with a better
# canonical
oldexpr = rv.func(pi.reversed, pj)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
# Try reversing second relational
oldexpr = rv.func(pi, pj.reversed)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
# Try reversing both relationals
oldexpr = rv.func(pi.reversed, pj.reversed)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
if res:
for tmpres, oldexpr in res:
# we have a matching, compute replacement
np = simp.subs(tmpres)
if np == dominatingvalue:
# if dominatingvalue, the whole expression
# will be replacementvalue
return replacementvalue
# add replacement
if not isinstance(np, ITE):
# We only want to use ITE replacements if
# they simplify to a relational
costsaving = measure(oldexpr) - measure(np)
if costsaving > 0:
results.append((costsaving, (i, j, np)))
if results:
# Sort results based on complexity
results = list(reversed(sorted(results,
key=lambda pair: pair[0])))
# Replace the one providing most simplification
cost, replacement = results[0]
i, j, newrel = replacement
# Remove the old relationals
del Rel[j]
del Rel[i]
if dominatingvalue is None or newrel != ~dominatingvalue:
# Insert the new one (no need to insert a value that will
# not affect the result)
Rel.append(newrel)
# We did change something so try again
changed = True
rv = rv.func(*([_canonical(i) for i in ordered(Rel)]
+ nonRel + nonRealRel))
return rv
class And(LatticeOp, BooleanFunction):
"""
Logical AND function.
It evaluates its arguments in order, giving False immediately
if any of them are False, and True if they are all True.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.abc import x, y
>>> from sympy.logic.boolalg import And
>>> x & y
x & y
Notes
=====
The ``&`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
and. Hence, ``And(a, b)`` and ``a & b`` will return different things if
``a`` and ``b`` are integers.
>>> And(x, y).subs(x, 1)
y
"""
zero = false
identity = true
nargs = None
@classmethod
def _new_args_filter(cls, args):
newargs = []
rel = []
args = BooleanFunction.binary_check_and_simplify(*args)
for x in reversed(args):
if x.is_Relational:
c = x.canonical
if c in rel:
continue
nc = c.negated.canonical
if any(r == nc for r in rel):
return [S.false]
rel.append(c)
newargs.append(x)
return LatticeOp._new_args_filter(newargs, And)
def _eval_subs(self, old, new):
args = []
bad = None
for i in self.args:
try:
i = i.subs(old, new)
except TypeError:
# store TypeError
if bad is None:
bad = i
continue
if i == False:
return S.false
elif i != True:
args.append(i)
if bad is not None:
# let it raise
bad.subs(old, new)
return self.func(*args)
def _eval_simplify(self, **kwargs):
from sympy.core.relational import Equality, Relational
from sympy.solvers.solveset import linear_coeffs
# standard simplify
rv = super(And, self)._eval_simplify(**kwargs)
if not isinstance(rv, And):
return rv
# simplify args that are equalities involving
# symbols so x == 0 & x == y -> x==0 & y == 0
Rel, nonRel = sift(rv.args, lambda i: isinstance(i, Relational),
binary=True)
if not Rel:
return rv
eqs, other = sift(Rel, lambda i: isinstance(i, Equality), binary=True)
if not eqs:
return rv
measure, ratio = kwargs['measure'], kwargs['ratio']
reps = {}
sifted = {}
if eqs:
# group by length of free symbols
sifted = sift(ordered([
(i.free_symbols, i) for i in eqs]),
lambda x: len(x[0]))
eqs = []
while 1 in sifted:
for free, e in sifted.pop(1):
x = free.pop()
if e.lhs != x or x in e.rhs.free_symbols:
try:
m, b = linear_coeffs(
e.rewrite(Add, evaluate=False), x)
enew = e.func(x, -b/m)
if measure(enew) <= ratio*measure(e):
e = enew
else:
eqs.append(e)
continue
except ValueError:
pass
if x in reps:
eqs.append(e.func(e.rhs, reps[x]))
else:
reps[x] = e.rhs
eqs.append(e)
resifted = defaultdict(list)
for k in sifted:
for f, e in sifted[k]:
e = e.subs(reps)
f = e.free_symbols
resifted[len(f)].append((f, e))
sifted = resifted
for k in sifted:
eqs.extend([e for f, e in sifted[k]])
other = [ei.subs(reps) for ei in other]
rv = rv.func(*([i.canonical for i in (eqs + other)] + nonRel))
patterns = simplify_patterns_and()
return self._apply_patternbased_simplification(rv, patterns,
measure, False)
def _eval_as_set(self):
from sympy.sets.sets import Intersection
return Intersection(*[arg.as_set() for arg in self.args])
def _eval_rewrite_as_Nor(self, *args, **kwargs):
return Nor(*[Not(arg) for arg in self.args])
def to_anf(self, deep=True):
if deep:
result = And._to_anf(*self.args, deep=deep)
return distribute_xor_over_and(result)
return self
class Or(LatticeOp, BooleanFunction):
"""
Logical OR function
It evaluates its arguments in order, giving True immediately
if any of them are True, and False if they are all False.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.abc import x, y
>>> from sympy.logic.boolalg import Or
>>> x | y
x | y
Notes
=====
The ``|`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
or. Hence, ``Or(a, b)`` and ``a | b`` will return different things if
``a`` and ``b`` are integers.
>>> Or(x, y).subs(x, 0)
y
"""
zero = true
identity = false
@classmethod
def _new_args_filter(cls, args):
newargs = []
rel = []
args = BooleanFunction.binary_check_and_simplify(*args)
for x in args:
if x.is_Relational:
c = x.canonical
if c in rel:
continue
nc = c.negated.canonical
if any(r == nc for r in rel):
return [S.true]
rel.append(c)
newargs.append(x)
return LatticeOp._new_args_filter(newargs, Or)
def _eval_subs(self, old, new):
args = []
bad = None
for i in self.args:
try:
i = i.subs(old, new)
except TypeError:
# store TypeError
if bad is None:
bad = i
continue
if i == True:
return S.true
elif i != False:
args.append(i)
if bad is not None:
# let it raise
bad.subs(old, new)
return self.func(*args)
def _eval_as_set(self):
from sympy.sets.sets import Union
return Union(*[arg.as_set() for arg in self.args])
def _eval_rewrite_as_Nand(self, *args, **kwargs):
return Nand(*[Not(arg) for arg in self.args])
def _eval_simplify(self, **kwargs):
# standard simplify
rv = super(Or, self)._eval_simplify(**kwargs)
if not isinstance(rv, Or):
return rv
patterns = simplify_patterns_or()
return self._apply_patternbased_simplification(rv, patterns,
kwargs['measure'], S.true)
def to_anf(self, deep=True):
args = range(1, len(self.args) + 1)
args = (combinations(self.args, j) for j in args)
args = chain.from_iterable(args) # powerset
args = (And(*arg) for arg in args)
args = map(lambda x: to_anf(x, deep=deep) if deep else x, args)
return Xor(*list(args), remove_true=False)
class Not(BooleanFunction):
"""
Logical Not function (negation)
Returns True if the statement is False
Returns False if the statement is True
Examples
========
>>> from sympy.logic.boolalg import Not, And, Or
>>> from sympy.abc import x, A, B
>>> Not(True)
False
>>> Not(False)
True
>>> Not(And(True, False))
True
>>> Not(Or(True, False))
False
>>> Not(And(And(True, x), Or(x, False)))
~x
>>> ~x
~x
>>> Not(And(Or(A, B), Or(~A, ~B)))
~((A | B) & (~A | ~B))
Notes
=====
- The ``~`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
not. In particular, ``~a`` and ``Not(a)`` will be different if ``a`` is
an integer. Furthermore, since bools in Python subclass from ``int``,
``~True`` is the same as ``~1`` which is ``-2``, which has a boolean
value of True. To avoid this issue, use the SymPy boolean types
``true`` and ``false``.
>>> from sympy import true
>>> ~True
-2
>>> ~true
False
"""
is_Not = True
@classmethod
def eval(cls, arg):
from sympy import (
Equality, GreaterThan, LessThan,
StrictGreaterThan, StrictLessThan, Unequality)
if isinstance(arg, Number) or arg in (True, False):
return false if arg else true
if arg.is_Not:
return arg.args[0]
# Simplify Relational objects.
if isinstance(arg, Equality):
return Unequality(*arg.args)
if isinstance(arg, Unequality):
return Equality(*arg.args)
if isinstance(arg, StrictLessThan):
return GreaterThan(*arg.args)
if isinstance(arg, StrictGreaterThan):
return LessThan(*arg.args)
if isinstance(arg, LessThan):
return StrictGreaterThan(*arg.args)
if isinstance(arg, GreaterThan):
return StrictLessThan(*arg.args)
def _eval_as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import Not, Symbol
>>> x = Symbol('x')
>>> Not(x > 0).as_set()
Interval(-oo, 0)
"""
return self.args[0].as_set().complement(S.Reals)
def to_nnf(self, simplify=True):
if is_literal(self):
return self
expr = self.args[0]
func, args = expr.func, expr.args
if func == And:
return Or._to_nnf(*[~arg for arg in args], simplify=simplify)
if func == Or:
return And._to_nnf(*[~arg for arg in args], simplify=simplify)
if func == Implies:
a, b = args
return And._to_nnf(a, ~b, simplify=simplify)
if func == Equivalent:
return And._to_nnf(Or(*args), Or(*[~arg for arg in args]),
simplify=simplify)
if func == Xor:
result = []
for i in range(1, len(args)+1, 2):
for neg in combinations(args, i):
clause = [~s if s in neg else s for s in args]
result.append(Or(*clause))
return And._to_nnf(*result, simplify=simplify)
if func == ITE:
a, b, c = args
return And._to_nnf(Or(a, ~c), Or(~a, ~b), simplify=simplify)
raise ValueError("Illegal operator %s in expression" % func)
def to_anf(self, deep=True):
return Xor._to_anf(true, self.args[0], deep=deep)
class Xor(BooleanFunction):
"""
Logical XOR (exclusive OR) function.
Returns True if an odd number of the arguments are True and the rest are
False.
Returns False if an even number of the arguments are True and the rest are
False.
Examples
========
>>> from sympy.logic.boolalg import Xor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Xor(True, False)
True
>>> Xor(True, True)
False
>>> Xor(True, False, True, True, False)
True
>>> Xor(True, False, True, False)
False
>>> x ^ y
x ^ y
Notes
=====
The ``^`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise xor. In
particular, ``a ^ b`` and ``Xor(a, b)`` will be different if ``a`` and
``b`` are integers.
>>> Xor(x, y).subs(y, 0)
x
"""
def __new__(cls, *args, **kwargs):
argset = set([])
remove_true = kwargs.pop('remove_true', True)
obj = super(Xor, cls).__new__(cls, *args, **kwargs)
for arg in obj._args:
if isinstance(arg, Number) or arg in (True, False):
if arg:
arg = true
else:
continue
if isinstance(arg, Xor):
for a in arg.args:
argset.remove(a) if a in argset else argset.add(a)
elif arg in argset:
argset.remove(arg)
else:
argset.add(arg)
rel = [(r, r.canonical, r.negated.canonical)
for r in argset if r.is_Relational]
odd = False # is number of complimentary pairs odd? start 0 -> False
remove = []
for i, (r, c, nc) in enumerate(rel):
for j in range(i + 1, len(rel)):
rj, cj = rel[j][:2]
if cj == nc:
odd = ~odd
break
elif cj == c:
break
else:
continue
remove.append((r, rj))
if odd:
argset.remove(true) if true in argset else argset.add(true)
for a, b in remove:
argset.remove(a)
argset.remove(b)
if len(argset) == 0:
return false
elif len(argset) == 1:
return argset.pop()
elif True in argset and remove_true:
argset.remove(True)
return Not(Xor(*argset))
else:
obj._args = tuple(ordered(argset))
obj._argset = frozenset(argset)
return obj
# XXX: This should be cached on the object rather than using cacheit
# Maybe it can be computed in __new__?
@property # type: ignore
@cacheit
def args(self):
return tuple(ordered(self._argset))
def to_nnf(self, simplify=True):
args = []
for i in range(0, len(self.args)+1, 2):
for neg in combinations(self.args, i):
clause = [~s if s in neg else s for s in self.args]
args.append(Or(*clause))
return And._to_nnf(*args, simplify=simplify)
def _eval_rewrite_as_Or(self, *args, **kwargs):
a = self.args
return Or(*[_convert_to_varsSOP(x, self.args)
for x in _get_odd_parity_terms(len(a))])
def _eval_rewrite_as_And(self, *args, **kwargs):
a = self.args
return And(*[_convert_to_varsPOS(x, self.args)
for x in _get_even_parity_terms(len(a))])
def _eval_simplify(self, **kwargs):
# as standard simplify uses simplify_logic which writes things as
# And and Or, we only simplify the partial expressions before using
# patterns
rv = self.func(*[a._eval_simplify(**kwargs) for a in self.args])
if not isinstance(rv, Xor): # This shouldn't really happen here
return rv
patterns = simplify_patterns_xor()
return self._apply_patternbased_simplification(rv, patterns,
kwargs['measure'], None)
class Nand(BooleanFunction):
"""
Logical NAND function.
It evaluates its arguments in order, giving True immediately if any
of them are False, and False if they are all True.
Returns True if any of the arguments are False
Returns False if all arguments are True
Examples
========
>>> from sympy.logic.boolalg import Nand
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Nand(False, True)
True
>>> Nand(True, True)
False
>>> Nand(x, y)
~(x & y)
"""
@classmethod
def eval(cls, *args):
return Not(And(*args))
class Nor(BooleanFunction):
"""
Logical NOR function.
It evaluates its arguments in order, giving False immediately if any
of them are True, and True if they are all False.
Returns False if any argument is True
Returns True if all arguments are False
Examples
========
>>> from sympy.logic.boolalg import Nor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Nor(True, False)
False
>>> Nor(True, True)
False
>>> Nor(False, True)
False
>>> Nor(False, False)
True
>>> Nor(x, y)
~(x | y)
"""
@classmethod
def eval(cls, *args):
return Not(Or(*args))
class Xnor(BooleanFunction):
"""
Logical XNOR function.
Returns False if an odd number of the arguments are True and the rest are
False.
Returns True if an even number of the arguments are True and the rest are
False.
Examples
========
>>> from sympy.logic.boolalg import Xnor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Xnor(True, False)
False
>>> Xnor(True, True)
True
>>> Xnor(True, False, True, True, False)
False
>>> Xnor(True, False, True, False)
True
"""
@classmethod
def eval(cls, *args):
return Not(Xor(*args))
class Implies(BooleanFunction):
"""
Logical implication.
A implies B is equivalent to !A v B
Accepts two Boolean arguments; A and B.
Returns False if A is True and B is False
Returns True otherwise.
Examples
========
>>> from sympy.logic.boolalg import Implies
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Implies(True, False)
False
>>> Implies(False, False)
True
>>> Implies(True, True)
True
>>> Implies(False, True)
True
>>> x >> y
Implies(x, y)
>>> y << x
Implies(x, y)
Notes
=====
The ``>>`` and ``<<`` operators are provided as a convenience, but note
that their use here is different from their normal use in Python, which is
bit shifts. Hence, ``Implies(a, b)`` and ``a >> b`` will return different
things if ``a`` and ``b`` are integers. In particular, since Python
considers ``True`` and ``False`` to be integers, ``True >> True`` will be
the same as ``1 >> 1``, i.e., 0, which has a truth value of False. To
avoid this issue, use the SymPy objects ``true`` and ``false``.
>>> from sympy import true, false
>>> True >> False
1
>>> true >> false
False
"""
@classmethod
def eval(cls, *args):
try:
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
else:
newargs.append(x)
A, B = newargs
except ValueError:
raise ValueError(
"%d operand(s) used for an Implies "
"(pairs are required): %s" % (len(args), str(args)))
if A == True or A == False or B == True or B == False:
return Or(Not(A), B)
elif A == B:
return S.true
elif A.is_Relational and B.is_Relational:
if A.canonical == B.canonical:
return S.true
if A.negated.canonical == B.canonical:
return B
else:
return Basic.__new__(cls, *args)
def to_nnf(self, simplify=True):
a, b = self.args
return Or._to_nnf(~a, b, simplify=simplify)
def to_anf(self, deep=True):
a, b = self.args
return Xor._to_anf(true, a, And(a, b), deep=deep)
class Equivalent(BooleanFunction):
"""
Equivalence relation.
Equivalent(A, B) is True iff A and B are both True or both False
Returns True if all of the arguments are logically equivalent.
Returns False otherwise.
Examples
========
>>> from sympy.logic.boolalg import Equivalent, And
>>> from sympy.abc import x, y
>>> Equivalent(False, False, False)
True
>>> Equivalent(True, False, False)
False
>>> Equivalent(x, And(x, True))
True
"""
def __new__(cls, *args, **options):
from sympy.core.relational import Relational
args = [_sympify(arg) for arg in args]
argset = set(args)
for x in args:
if isinstance(x, Number) or x in [True, False]: # Includes 0, 1
argset.discard(x)
argset.add(True if x else False)
rel = []
for r in argset:
if isinstance(r, Relational):
rel.append((r, r.canonical, r.negated.canonical))
remove = []
for i, (r, c, nc) in enumerate(rel):
for j in range(i + 1, len(rel)):
rj, cj = rel[j][:2]
if cj == nc:
return false
elif cj == c:
remove.append((r, rj))
break
for a, b in remove:
argset.remove(a)
argset.remove(b)
argset.add(True)
if len(argset) <= 1:
return true
if True in argset:
argset.discard(True)
return And(*argset)
if False in argset:
argset.discard(False)
return And(*[~arg for arg in argset])
_args = frozenset(argset)
obj = super(Equivalent, cls).__new__(cls, _args)
obj._argset = _args
return obj
# XXX: This should be cached on the object rather than using cacheit
# Maybe it can be computed in __new__?
@property # type: ignore
@cacheit
def args(self):
return tuple(ordered(self._argset))
def to_nnf(self, simplify=True):
args = []
for a, b in zip(self.args, self.args[1:]):
args.append(Or(~a, b))
args.append(Or(~self.args[-1], self.args[0]))
return And._to_nnf(*args, simplify=simplify)
def to_anf(self, deep=True):
a = And(*self.args)
b = And(*[to_anf(Not(arg), deep=False) for arg in self.args])
b = distribute_xor_over_and(b)
return Xor._to_anf(a, b, deep=deep)
class ITE(BooleanFunction):
"""
If then else clause.
ITE(A, B, C) evaluates and returns the result of B if A is true
else it returns the result of C. All args must be Booleans.
Examples
========
>>> from sympy.logic.boolalg import ITE, And, Xor, Or
>>> from sympy.abc import x, y, z
>>> ITE(True, False, True)
False
>>> ITE(Or(True, False), And(True, True), Xor(True, True))
True
>>> ITE(x, y, z)
ITE(x, y, z)
>>> ITE(True, x, y)
x
>>> ITE(False, x, y)
y
>>> ITE(x, y, y)
y
Trying to use non-Boolean args will generate a TypeError:
>>> ITE(True, [], ())
Traceback (most recent call last):
...
TypeError: expecting bool, Boolean or ITE, not `[]`
"""
def __new__(cls, *args, **kwargs):
from sympy.core.relational import Eq, Ne
if len(args) != 3:
raise ValueError('expecting exactly 3 args')
a, b, c = args
# check use of binary symbols
if isinstance(a, (Eq, Ne)):
# in this context, we can evaluate the Eq/Ne
# if one arg is a binary symbol and the other
# is true/false
b, c = map(as_Boolean, (b, c))
bin = set().union(*[i.binary_symbols for i in (b, c)])
if len(set(a.args) - bin) == 1:
# one arg is a binary_symbols
_a = a
if a.lhs is S.true:
a = a.rhs
elif a.rhs is S.true:
a = a.lhs
elif a.lhs is S.false:
a = ~a.rhs
elif a.rhs is S.false:
a = ~a.lhs
else:
# binary can only equal True or False
a = S.false
if isinstance(_a, Ne):
a = ~a
else:
a, b, c = BooleanFunction.binary_check_and_simplify(
a, b, c)
rv = None
if kwargs.get('evaluate', True):
rv = cls.eval(a, b, c)
if rv is None:
rv = BooleanFunction.__new__(cls, a, b, c, evaluate=False)
return rv
@classmethod
def eval(cls, *args):
from sympy.core.relational import Eq, Ne
# do the args give a singular result?
a, b, c = args
if isinstance(a, (Ne, Eq)):
_a = a
if S.true in a.args:
a = a.lhs if a.rhs is S.true else a.rhs
elif S.false in a.args:
a = ~a.lhs if a.rhs is S.false else ~a.rhs
else:
_a = None
if _a is not None and isinstance(_a, Ne):
a = ~a
if a is S.true:
return b
if a is S.false:
return c
if b == c:
return b
else:
# or maybe the results allow the answer to be expressed
# in terms of the condition
if b is S.true and c is S.false:
return a
if b is S.false and c is S.true:
return Not(a)
if [a, b, c] != args:
return cls(a, b, c, evaluate=False)
def to_nnf(self, simplify=True):
a, b, c = self.args
return And._to_nnf(Or(~a, b), Or(a, c), simplify=simplify)
def _eval_as_set(self):
return self.to_nnf().as_set()
def _eval_rewrite_as_Piecewise(self, *args, **kwargs):
from sympy.functions import Piecewise
return Piecewise((args[1], args[0]), (args[2], True))
# end class definitions. Some useful methods
def conjuncts(expr):
"""Return a list of the conjuncts in the expr s.
Examples
========
>>> from sympy.logic.boolalg import conjuncts
>>> from sympy.abc import A, B
>>> conjuncts(A & B)
frozenset({A, B})
>>> conjuncts(A | B)
frozenset({A | B})
"""
return And.make_args(expr)
def disjuncts(expr):
"""Return a list of the disjuncts in the sentence s.
Examples
========
>>> from sympy.logic.boolalg import disjuncts
>>> from sympy.abc import A, B
>>> disjuncts(A | B)
frozenset({A, B})
>>> disjuncts(A & B)
frozenset({A & B})
"""
return Or.make_args(expr)
def distribute_and_over_or(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
Examples
========
>>> from sympy.logic.boolalg import distribute_and_over_or, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_and_over_or(Or(A, And(Not(B), Not(C))))
(A | ~B) & (A | ~C)
"""
return _distribute((expr, And, Or))
def distribute_or_over_and(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in DNF.
Note that the output is NOT simplified.
Examples
========
>>> from sympy.logic.boolalg import distribute_or_over_and, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_or_over_and(And(Or(Not(A), B), C))
(B & C) | (C & ~A)
"""
return _distribute((expr, Or, And))
def distribute_xor_over_and(expr):
"""
Given a sentence s consisting of conjunction and
exclusive disjunctions of literals, return an
equivalent exclusive disjunction.
Note that the output is NOT simplified.
Examples
========
>>> from sympy.logic.boolalg import distribute_xor_over_and, And, Xor, Not
>>> from sympy.abc import A, B, C
>>> distribute_xor_over_and(And(Xor(Not(A), B), C))
(B & C) ^ (C & ~A)
"""
return _distribute((expr, Xor, And))
def _distribute(info):
"""
Distributes info[1] over info[2] with respect to info[0].
"""
if isinstance(info[0], info[2]):
for arg in info[0].args:
if isinstance(arg, info[1]):
conj = arg
break
else:
return info[0]
rest = info[2](*[a for a in info[0].args if a is not conj])
return info[1](*list(map(_distribute,
[(info[2](c, rest), info[1], info[2])
for c in conj.args])), remove_true=False)
elif isinstance(info[0], info[1]):
return info[1](*list(map(_distribute,
[(x, info[1], info[2])
for x in info[0].args])),
remove_true=False)
else:
return info[0]
def to_anf(expr, deep=True):
r"""
Converts expr to Algebraic Normal Form (ANF).
ANF is a canonical normal form, which means that two
equivalent formulas will convert to the same ANF.
A logical expression is in ANF if it has the form
.. math:: 1 \oplus a \oplus b \oplus ab \oplus abc
i.e. it can be:
- purely true,
- purely false,
- conjunction of variables,
- exclusive disjunction.
The exclusive disjunction can only contain true, variables
or conjunction of variables. No negations are permitted.
If ``deep`` is ``False``, arguments of the boolean
expression are considered variables, i.e. only the
top-level expression is converted to ANF.
Examples
========
>>> from sympy.logic.boolalg import And, Or, Not, Implies, Equivalent
>>> from sympy.logic.boolalg import to_anf
>>> from sympy.abc import A, B, C
>>> to_anf(Not(A))
A ^ True
>>> to_anf(And(Or(A, B), Not(C)))
A ^ B ^ (A & B) ^ (A & C) ^ (B & C) ^ (A & B & C)
>>> to_anf(Implies(Not(A), Equivalent(B, C)), deep=False)
True ^ ~A ^ (~A & (Equivalent(B, C)))
"""
expr = sympify(expr)
if is_anf(expr):
return expr
return expr.to_anf(deep=deep)
def to_nnf(expr, simplify=True):
"""
Converts expr to Negation Normal Form.
A logical expression is in Negation Normal Form (NNF) if it
contains only And, Or and Not, and Not is applied only to literals.
If simplify is True, the result contains no redundant clauses.
Examples
========
>>> from sympy.abc import A, B, C, D
>>> from sympy.logic.boolalg import Not, Equivalent, to_nnf
>>> to_nnf(Not((~A & ~B) | (C & D)))
(A | B) & (~C | ~D)
>>> to_nnf(Equivalent(A >> B, B >> A))
(A | ~B | (A & ~B)) & (B | ~A | (B & ~A))
"""
if is_nnf(expr, simplify):
return expr
return expr.to_nnf(simplify)
def to_cnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to conjunctive normal form.
That is, of the form ((A | ~B | ...) & (B | C | ...) & ...)
If simplify is True, the expr is evaluated to its simplest CNF form using
the Quine-McCluskey algorithm.
Examples
========
>>> from sympy.logic.boolalg import to_cnf
>>> from sympy.abc import A, B, D
>>> to_cnf(~(A | B) | D)
(D | ~A) & (D | ~B)
>>> to_cnf((A | B) & (A | ~A), True)
A | B
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'cnf', True)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
res = distribute_and_over_or(expr)
return res
def to_dnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to disjunctive normal form.
That is, of the form ((A & ~B & ...) | (B & C & ...) | ...)
If simplify is True, the expr is evaluated to its simplest DNF form using
the Quine-McCluskey algorithm.
Examples
========
>>> from sympy.logic.boolalg import to_dnf
>>> from sympy.abc import A, B, C
>>> to_dnf(B & (A | C))
(A & B) | (B & C)
>>> to_dnf((A & B) | (A & ~B) | (B & C) | (~B & C), True)
A | C
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'dnf', True)
# Don't convert unless we have to
if is_dnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_or_over_and(expr)
def is_anf(expr):
r"""
Checks if expr is in Algebraic Normal Form (ANF).
A logical expression is in ANF if it has the form
.. math:: 1 \oplus a \oplus b \oplus ab \oplus abc
i.e. it is purely true, purely false, conjunction of
variables or exclusive disjunction. The exclusive
disjunction can only contain true, variables or
conjunction of variables. No negations are permitted.
Examples
========
>>> from sympy.logic.boolalg import And, Not, Xor, true, is_anf
>>> from sympy.abc import A, B, C
>>> is_anf(true)
True
>>> is_anf(A)
True
>>> is_anf(And(A, B, C))
True
>>> is_anf(Xor(A, Not(B)))
False
"""
expr = sympify(expr)
if is_literal(expr) and not isinstance(expr, Not):
return True
if isinstance(expr, And):
for arg in expr.args:
if not arg.is_Symbol:
return False
return True
elif isinstance(expr, Xor):
for arg in expr.args:
if isinstance(arg, And):
for a in arg.args:
if not a.is_Symbol:
return False
elif is_literal(arg):
if isinstance(arg, Not):
return False
else:
return False
return True
else:
return False
def is_nnf(expr, simplified=True):
"""
Checks if expr is in Negation Normal Form.
A logical expression is in Negation Normal Form (NNF) if it
contains only And, Or and Not, and Not is applied only to literals.
If simplified is True, checks if result contains no redundant clauses.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.boolalg import Not, is_nnf
>>> is_nnf(A & B | ~C)
True
>>> is_nnf((A | ~A) & (B | C))
False
>>> is_nnf((A | ~A) & (B | C), False)
True
>>> is_nnf(Not(A & B) | C)
False
>>> is_nnf((A >> B) & (B >> A))
False
"""
expr = sympify(expr)
if is_literal(expr):
return True
stack = [expr]
while stack:
expr = stack.pop()
if expr.func in (And, Or):
if simplified:
args = expr.args
for arg in args:
if Not(arg) in args:
return False
stack.extend(expr.args)
elif not is_literal(expr):
return False
return True
def is_cnf(expr):
"""
Test whether or not an expression is in conjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_cnf
>>> from sympy.abc import A, B, C
>>> is_cnf(A | B | C)
True
>>> is_cnf(A & B & C)
True
>>> is_cnf((A & B) | C)
False
"""
return _is_form(expr, And, Or)
def is_dnf(expr):
"""
Test whether or not an expression is in disjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_dnf
>>> from sympy.abc import A, B, C
>>> is_dnf(A | B | C)
True
>>> is_dnf(A & B & C)
True
>>> is_dnf((A & B) | C)
True
>>> is_dnf(A & (B | C))
False
"""
return _is_form(expr, Or, And)
def _is_form(expr, function1, function2):
"""
Test whether or not an expression is of the required form.
"""
expr = sympify(expr)
def is_a_literal(lit):
if isinstance(lit, Not) \
and lit.args[0].is_Atom:
return True
elif lit.is_Atom:
return True
return False
vals = function1.make_args(expr) if isinstance(expr, function1) else [expr]
for lit in vals:
if isinstance(lit, function2):
vals2 = function2.make_args(lit) if isinstance(lit, function2) else [lit]
for l in vals2:
if is_a_literal(l) is False:
return False
elif is_a_literal(lit) is False:
return False
return True
def eliminate_implications(expr):
"""
Change >>, <<, and Equivalent into &, |, and ~. That is, return an
expression that is equivalent to s, but has only &, |, and ~ as logical
operators.
Examples
========
>>> from sympy.logic.boolalg import Implies, Equivalent, \
eliminate_implications
>>> from sympy.abc import A, B, C
>>> eliminate_implications(Implies(A, B))
B | ~A
>>> eliminate_implications(Equivalent(A, B))
(A | ~B) & (B | ~A)
>>> eliminate_implications(Equivalent(A, B, C))
(A | ~C) & (B | ~A) & (C | ~B)
"""
return to_nnf(expr, simplify=False)
def is_literal(expr):
"""
Returns True if expr is a literal, else False.
Examples
========
>>> from sympy import Or, Q
>>> from sympy.abc import A, B
>>> from sympy.logic.boolalg import is_literal
>>> is_literal(A)
True
>>> is_literal(~A)
True
>>> is_literal(Q.zero(A))
True
>>> is_literal(A + B)
True
>>> is_literal(Or(A, B))
False
"""
if isinstance(expr, Not):
return not isinstance(expr.args[0], BooleanFunction)
else:
return not isinstance(expr, BooleanFunction)
def to_int_repr(clauses, symbols):
"""
Takes clauses in CNF format and puts them into an integer representation.
Examples
========
>>> from sympy.logic.boolalg import to_int_repr
>>> from sympy.abc import x, y
>>> to_int_repr([x | y, y], [x, y]) == [{1, 2}, {2}]
True
"""
# Convert the symbol list into a dict
symbols = dict(list(zip(symbols, list(range(1, len(symbols) + 1)))))
def append_symbol(arg, symbols):
if isinstance(arg, Not):
return -symbols[arg.args[0]]
else:
return symbols[arg]
return [set(append_symbol(arg, symbols) for arg in Or.make_args(c))
for c in clauses]
def term_to_integer(term):
"""
Return an integer corresponding to the base-2 digits given by ``term``.
Parameters
==========
term : a string or list of ones and zeros
Examples
========
>>> from sympy.logic.boolalg import term_to_integer
>>> term_to_integer([1, 0, 0])
4
>>> term_to_integer('100')
4
"""
return int(''.join(list(map(str, list(term)))), 2)
def integer_to_term(k, n_bits=None):
"""
Return a list of the base-2 digits in the integer, ``k``.
Parameters
==========
k : int
n_bits : int
If ``n_bits`` is given and the number of digits in the binary
representation of ``k`` is smaller than ``n_bits`` then left-pad the
list with 0s.
Examples
========
>>> from sympy.logic.boolalg import integer_to_term
>>> integer_to_term(4)
[1, 0, 0]
>>> integer_to_term(4, 6)
[0, 0, 0, 1, 0, 0]
"""
s = '{0:0{1}b}'.format(abs(as_int(k)), as_int(abs(n_bits or 0)))
return list(map(int, s))
def truth_table(expr, variables, input=True):
"""
Return a generator of all possible configurations of the input variables,
and the result of the boolean expression for those values.
Parameters
==========
expr : string or boolean expression
variables : list of variables
input : boolean (default True)
indicates whether to return the input combinations.
Examples
========
>>> from sympy.logic.boolalg import truth_table
>>> from sympy.abc import x,y
>>> table = truth_table(x >> y, [x, y])
>>> for t in table:
... print('{0} -> {1}'.format(*t))
[0, 0] -> True
[0, 1] -> True
[1, 0] -> False
[1, 1] -> True
>>> table = truth_table(x | y, [x, y])
>>> list(table)
[([0, 0], False), ([0, 1], True), ([1, 0], True), ([1, 1], True)]
If input is false, truth_table returns only a list of truth values.
In this case, the corresponding input values of variables can be
deduced from the index of a given output.
>>> from sympy.logic.boolalg import integer_to_term
>>> vars = [y, x]
>>> values = truth_table(x >> y, vars, input=False)
>>> values = list(values)
>>> values
[True, False, True, True]
>>> for i, value in enumerate(values):
... print('{0} -> {1}'.format(list(zip(
... vars, integer_to_term(i, len(vars)))), value))
[(y, 0), (x, 0)] -> True
[(y, 0), (x, 1)] -> False
[(y, 1), (x, 0)] -> True
[(y, 1), (x, 1)] -> True
"""
variables = [sympify(v) for v in variables]
expr = sympify(expr)
if not isinstance(expr, BooleanFunction) and not is_literal(expr):
return
table = product([0, 1], repeat=len(variables))
for term in table:
term = list(term)
value = expr.xreplace(dict(zip(variables, term)))
if input:
yield term, value
else:
yield value
def _check_pair(minterm1, minterm2):
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns -1.
"""
index = -1
for x, (i, j) in enumerate(zip(minterm1, minterm2)):
if i != j:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to its
variable form (for SOP).
"""
temp = []
for i, m in enumerate(minterm):
if m == 0:
temp.append(Not(variables[i]))
elif m == 1:
temp.append(variables[i])
else:
pass # ignore the 3s
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to its
variable form (for POS).
"""
temp = []
for i, m in enumerate(maxterm):
if m == 1:
temp.append(Not(variables[i]))
elif m == 0:
temp.append(variables[i])
else:
pass # ignore the 3s
return Or(*temp)
def _convert_to_varsANF(term, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for ANF).
Parameters
==========
term : list of 1's and 0's (complementation patter)
variables : list of variables
"""
temp = []
for i, m in enumerate(term):
if m == 1:
temp.append(variables[i])
else:
pass # ignore 0s
if temp == []:
return BooleanTrue()
return And(*temp)
def _get_odd_parity_terms(n):
"""
Returns a list of lists, with all possible combinations of n zeros and ones
with an odd number of ones.
"""
op = []
for i in range(1, 2**n):
e = ibin(i, n)
if sum(e) % 2 == 1:
op.append(e)
return op
def _get_even_parity_terms(n):
"""
Returns a list of lists, with all possible combinations of n zeros and ones
with an even number of ones.
"""
op = []
for i in range(2**n):
e = ibin(i, n)
if sum(e) % 2 == 0:
op.append(e)
return op
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
simplified_terms = []
todo = list(range(len(terms)))
for i, ti in enumerate(terms[:-1]):
for j_i, tj in enumerate(terms[(i + 1):]):
index = _check_pair(ti, tj)
if index != -1:
todo[i] = todo[j_i + i + 1] = None
newterm = ti[:]
newterm[index] = 3
if newterm not in simplified_terms:
simplified_terms.append(newterm)
simplified_terms.extend(
[terms[i] for i in [_ for _ in todo if _ is not None]])
return simplified_terms
def _compare_term(minterm, term):
"""
Return True if a binary term is satisfied by the given term. Used
for recognizing prime implicants.
"""
for i, x in enumerate(term):
if x != 3 and x != minterm[i]:
return False
return True
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
if len(terms):
# Create dominating matrix
dommatrix = [[0]*len(l1) for n in range(len(terms))]
for primei, prime in enumerate(l1):
for termi, term in enumerate(terms):
if _compare_term(term, prime):
dommatrix[termi][primei] = 1
# Non-dominated prime implicants, dominated set to None
ndprimeimplicants = list(range(len(l1)))
# Non-dominated terms, dominated set to None
ndterms = list(range(len(terms)))
# Mark dominated rows and columns
oldndterms = None
oldndprimeimplicants = None
while ndterms != oldndterms or \
ndprimeimplicants != oldndprimeimplicants:
oldndterms = ndterms[:]
oldndprimeimplicants = ndprimeimplicants[:]
for rowi, row in enumerate(dommatrix):
if ndterms[rowi] is not None:
row = [row[i] for i in
[_ for _ in ndprimeimplicants if _ is not None]]
for row2i, row2 in enumerate(dommatrix):
if rowi != row2i and ndterms[row2i] is not None:
row2 = [row2[i] for i in
[_ for _ in ndprimeimplicants
if _ is not None]]
if all(a >= b for (a, b) in zip(row2, row)):
# row2 dominating row, keep row
ndterms[row2i] = None
for coli in range(len(l1)):
if ndprimeimplicants[coli] is not None:
col = [dommatrix[a][coli] for a in range(len(terms))]
col = [col[i] for i in
[_ for _ in oldndterms if _ is not None]]
for col2i in range(len(l1)):
if coli != col2i and \
ndprimeimplicants[col2i] is not None:
col2 = [dommatrix[a][col2i]
for a in range(len(terms))]
col2 = [col2[i] for i in
[_ for _ in oldndterms if _ is not None]]
if all(a >= b for (a, b) in zip(col, col2)):
# col dominating col2, keep col
ndprimeimplicants[col2i] = None
l1 = [l1[i] for i in [_ for _ in ndprimeimplicants if _ is not None]]
return l1
else:
return []
def _input_to_binlist(inputlist, variables):
binlist = []
bits = len(variables)
for val in inputlist:
if isinstance(val, int):
binlist.append(ibin(val, bits))
elif isinstance(val, dict):
nonspecvars = list(variables)
for key in val.keys():
nonspecvars.remove(key)
for t in product([0, 1], repeat=len(nonspecvars)):
d = dict(zip(nonspecvars, t))
d.update(val)
binlist.append([d[v] for v in variables])
elif isinstance(val, (list, tuple)):
if len(val) != bits:
raise ValueError("Each term must contain {} bits as there are"
"\n{} variables (or be an integer)."
"".format(bits, bits))
binlist.append(list(val))
else:
raise TypeError("A term list can only contain lists,"
" ints or dicts.")
return binlist
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest Sum of Products form.
The variables must be given as the first argument.
Return a logical Or function (i.e., the "sum of products" or "SOP"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import SOPform
>>> from sympy import symbols
>>> w, x, y, z = symbols('w x y z')
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform([w, x, y, z], minterms, dontcares)
(y & z) | (z & ~w)
The terms can also be represented as integers:
>>> minterms = [1, 3, 7, 11, 15]
>>> dontcares = [0, 2, 5]
>>> SOPform([w, x, y, z], minterms, dontcares)
(y & z) | (z & ~w)
They can also be specified using dicts, which does not have to be fully
specified:
>>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}]
>>> SOPform([w, x, y, z], minterms)
(x & ~w) | (y & z & ~x)
Or a combination:
>>> minterms = [4, 7, 11, [1, 1, 1, 1]]
>>> dontcares = [{w : 0, x : 0, y: 0}, 5]
>>> SOPform([w, x, y, z], minterms, dontcares)
(w & y & z) | (x & y & z) | (~w & ~y)
References
==========
.. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = _input_to_binlist(minterms, variables)
dontcares = _input_to_binlist((dontcares or []), variables)
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
old = None
new = minterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest Product of Sums form.
The variables must be given as the first argument.
Return a logical And function (i.e., the "product of sums" or "POS"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import POSform
>>> from sympy import symbols
>>> w, x, y, z = symbols('w x y z')
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform([w, x, y, z], minterms, dontcares)
z & (y | ~w)
The terms can also be represented as integers:
>>> minterms = [1, 3, 7, 11, 15]
>>> dontcares = [0, 2, 5]
>>> POSform([w, x, y, z], minterms, dontcares)
z & (y | ~w)
They can also be specified using dicts, which does not have to be fully
specified:
>>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}]
>>> POSform([w, x, y, z], minterms)
(x | y) & (x | z) & (~w | ~x)
Or a combination:
>>> minterms = [4, 7, 11, [1, 1, 1, 1]]
>>> dontcares = [{w : 0, x : 0, y: 0}, 5]
>>> POSform([w, x, y, z], minterms, dontcares)
(w | x) & (y | ~w) & (z | ~y)
References
==========
.. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = _input_to_binlist(minterms, variables)
dontcares = _input_to_binlist((dontcares or []), variables)
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
maxterms = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
old = None
new = maxterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def ANFform(variables, truthvalues):
"""
The ANFform function converts the list of truth values to
Algebraic Normal Form (ANF).
The variables must be given as the first argument.
Return True, False, logical And funciton (i.e., the
"Zhegalkin monomial") or logical Xor function (i.e.,
the "Zhegalkin polynomial"). When True and False
are represented by 1 and 0, respectively, then
And is multiplication and Xor is addition.
Formally a "Zhegalkin monomial" is the product (logical
And) of a finite set of distinct variables, including
the empty set whose product is denoted 1 (True).
A "Zhegalkin polynomial" is the sum (logical Xor) of a
set of Zhegalkin monomials, with the empty set denoted
by 0 (False).
Parameters
==========
variables : list of variables
truthvalues : list of 1's and 0's (result column of truth table)
Examples
========
>>> from sympy.logic.boolalg import ANFform
>>> from sympy.abc import x, y
>>> ANFform([x], [1, 0])
x ^ True
>>> ANFform([x, y], [0, 1, 1, 1])
x ^ y ^ (x & y)
References
==========
.. [2] https://en.wikipedia.org/wiki/Zhegalkin_polynomial
"""
n_vars = len(variables)
n_values = len(truthvalues)
if n_values != 2 ** n_vars:
raise ValueError("The number of truth values must be equal to 2^%d, "
"got %d" % (n_vars, n_values))
variables = [sympify(v) for v in variables]
coeffs = anf_coeffs(truthvalues)
terms = []
for i, t in enumerate(product([0, 1], repeat=n_vars)):
if coeffs[i] == 1:
terms.append(t)
return Xor(*[_convert_to_varsANF(x, variables) for x in terms],
remove_true=False)
def anf_coeffs(truthvalues):
"""
Convert a list of truth values of some boolean expression
to the list of coefficients of the polynomial mod 2 (exclusive
disjunction) representing the boolean expression in ANF
(i.e., the "Zhegalkin polynomial").
There are 2^n possible Zhegalkin monomials in n variables, since
each monomial is fully specified by the presence or absence of
each variable.
We can enumerate all the monomials. For example, boolean
function with four variables (a, b, c, d) can contain
up to 2^4 = 16 monomials. The 13-th monomial is the
product a & b & d, because 13 in binary is 1, 1, 0, 1.
A given monomial's presence or absence in a polynomial corresponds
to that monomial's coefficient being 1 or 0 respectively.
Examples
========
>>> from sympy.logic.boolalg import anf_coeffs, bool_monomial, Xor
>>> from sympy.abc import a, b, c
>>> truthvalues = [0, 1, 1, 0, 0, 1, 0, 1]
>>> coeffs = anf_coeffs(truthvalues)
>>> coeffs
[0, 1, 1, 0, 0, 0, 1, 0]
>>> polynomial = Xor(*[
... bool_monomial(k, [a, b, c])
... for k, coeff in enumerate(coeffs) if coeff == 1
... ])
>>> polynomial
b ^ c ^ (a & b)
"""
s = '{0:b}'.format(len(truthvalues))
n = len(s) - 1
if len(truthvalues) != 2**n:
raise ValueError("The number of truth values must be a power of two, "
"got %d" % len(truthvalues))
coeffs = [[v] for v in truthvalues]
for i in range(n):
tmp = []
for j in range(2 ** (n-i-1)):
tmp.append(coeffs[2*j] +
list(map(lambda x, y: x^y, coeffs[2*j], coeffs[2*j+1])))
coeffs = tmp
return coeffs[0]
def bool_minterm(k, variables):
"""
Return the k-th minterm.
Minterms are numbered by a binary encoding of the complementation
pattern of the variables. This convention assigns the value 1 to
the direct form and 0 to the complemented form.
Parameters
==========
k : int or list of 1's and 0's (complementation patter)
variables : list of variables
Examples
========
>>> from sympy.logic.boolalg import bool_minterm
>>> from sympy.abc import x, y, z
>>> bool_minterm([1, 0, 1], [x, y, z])
x & z & ~y
>>> bool_minterm(6, [x, y, z])
x & y & ~z
References
==========
.. [3] https://en.wikipedia.org/wiki/Canonical_normal_form#Indexing_minterms
"""
if isinstance(k, int):
k = integer_to_term(k, len(variables))
variables = list(map(sympify, variables))
return _convert_to_varsSOP(k, variables)
def bool_maxterm(k, variables):
"""
Return the k-th maxterm.
Each maxterm is assigned an index based on the opposite
conventional binary encoding used for minterms. The maxterm
convention assigns the value 0 to the direct form and 1 to
the complemented form.
Parameters
==========
k : int or list of 1's and 0's (complementation pattern)
variables : list of variables
Examples
========
>>> from sympy.logic.boolalg import bool_maxterm
>>> from sympy.abc import x, y, z
>>> bool_maxterm([1, 0, 1], [x, y, z])
y | ~x | ~z
>>> bool_maxterm(6, [x, y, z])
z | ~x | ~y
References
==========
.. [4] https://en.wikipedia.org/wiki/Canonical_normal_form#Indexing_maxterms
"""
if isinstance(k, int):
k = integer_to_term(k, len(variables))
variables = list(map(sympify, variables))
return _convert_to_varsPOS(k, variables)
def bool_monomial(k, variables):
"""
Return the k-th monomial.
Monomials are numbered by a binary encoding of the presence and
absences of the variables. This convention assigns the value
1 to the presence of variable and 0 to the absence of variable.
Each boolean function can be uniquely represented by a
Zhegalkin Polynomial (Algebraic Normal Form). The Zhegalkin
Polynomial of the boolean function with n variables can contain
up to 2^n monomials. We can enumarate all the monomials.
Each monomial is fully specified by the presence or absence
of each variable.
For example, boolean function with four variables (a, b, c, d)
can contain up to 2^4 = 16 monomials. The 13-th monomial is the
product a & b & d, because 13 in binary is 1, 1, 0, 1.
Parameters
==========
k : int or list of 1's and 0's
variables : list of variables
Examples
========
>>> from sympy.logic.boolalg import bool_monomial
>>> from sympy.abc import x, y, z
>>> bool_monomial([1, 0, 1], [x, y, z])
x & z
>>> bool_monomial(6, [x, y, z])
x & y
"""
if isinstance(k, int):
k = integer_to_term(k, len(variables))
variables = list(map(sympify, variables))
return _convert_to_varsANF(k, variables)
def _find_predicates(expr):
"""Helper to find logical predicates in BooleanFunctions.
A logical predicate is defined here as anything within a BooleanFunction
that is not a BooleanFunction itself.
"""
if not isinstance(expr, BooleanFunction):
return {expr}
return set().union(*(_find_predicates(i) for i in expr.args))
def simplify_logic(expr, form=None, deep=True, force=False):
"""
This function simplifies a boolean function to its simplified version
in SOP or POS form. The return type is an Or or And object in SymPy.
Parameters
==========
expr : string or boolean expression
form : string ('cnf' or 'dnf') or None (default).
If 'cnf' or 'dnf', the simplest expression in the corresponding
normal form is returned; if None, the answer is returned
according to the form with fewest args (in CNF by default).
deep : boolean (default True)
Indicates whether to recursively simplify any
non-boolean functions contained within the input.
force : boolean (default False)
As the simplifications require exponential time in the number of
variables, there is by default a limit on expressions with 8 variables.
When the expression has more than 8 variables only symbolical
simplification (controlled by ``deep``) is made. By setting force to ``True``, this limit
is removed. Be aware that this can lead to very long simplification times.
Examples
========
>>> from sympy.logic import simplify_logic
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> b = (~x & ~y & ~z) | ( ~x & ~y & z)
>>> simplify_logic(b)
~x & ~y
>>> S(b)
(z & ~x & ~y) | (~x & ~y & ~z)
>>> simplify_logic(_)
~x & ~y
"""
if form not in (None, 'cnf', 'dnf'):
raise ValueError("form can be cnf or dnf only")
expr = sympify(expr)
if deep:
variables = _find_predicates(expr)
from sympy.simplify.simplify import simplify
s = [simplify(v) for v in variables]
expr = expr.xreplace(dict(zip(variables, s)))
if not isinstance(expr, BooleanFunction):
return expr
# get variables in case not deep or after doing
# deep simplification since they may have changed
variables = _find_predicates(expr)
if not force and len(variables) > 8:
return expr
# group into constants and variable values
c, v = sift(variables, lambda x: x in (True, False), binary=True)
variables = c + v
truthtable = []
# standardize constants to be 1 or 0 in keeping with truthtable
c = [1 if i == True else 0 for i in c]
for t in product([0, 1], repeat=len(v)):
if expr.xreplace(dict(zip(v, t))) == True:
truthtable.append(c + list(t))
big = len(truthtable) >= (2 ** (len(variables) - 1))
if form == 'dnf' or form is None and big:
return SOPform(variables, truthtable)
return POSform(variables, truthtable)
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol;
# of times it appeared as a Not(symbol);
# of times it appeared as a Symbol in an And or Or;
# of times it appeared as a Not(Symbol) in an And or Or;
a sorted tuple of tuples, (i, j, k), where i is the number of arguments
in an And or Or with which it appeared as a Symbol, and j is
the number of arguments that were Not(Symbol); k is the number
of times that (i, j) was seen.
]
Examples
========
>>> from sympy.logic.boolalg import _finger as finger
>>> from sympy import And, Or, Not, Xor, to_cnf, symbols
>>> from sympy.abc import a, b, x, y
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(finger(eq))
{(0, 0, 1, 0, ((2, 0, 1),)): [x],
(0, 0, 1, 0, ((2, 1, 1),)): [a, b],
(0, 0, 1, 2, ((2, 0, 1),)): [y]}
>>> dict(finger(x & ~y))
{(0, 1, 0, 0, ()): [y], (1, 0, 0, 0, ()): [x]}
In the following, the (5, 2, 6) means that there were 6 Or
functions in which a symbol appeared as itself amongst 5 arguments in
which there were also 2 negated symbols, e.g. ``(a0 | a1 | a2 | ~a3 | ~a4)``
is counted once for a0, a1 and a2.
>>> dict(finger(to_cnf(Xor(*symbols('a:5')))))
{(0, 0, 8, 8, ((5, 0, 1), (5, 2, 6), (5, 4, 1))): [a0, a1, a2, a3, a4]}
The equation must not have more than one level of nesting:
>>> dict(finger(And(Or(x, y), y)))
{(0, 0, 1, 0, ((2, 0, 1),)): [x], (1, 0, 1, 0, ((2, 0, 1),)): [y]}
>>> dict(finger(And(Or(x, And(a, x)), y)))
Traceback (most recent call last):
...
NotImplementedError: unexpected level of nesting
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = dict(list(zip(f, [[0]*4 + [defaultdict(int)] for fi in f])))
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args), sum(isinstance(ai, Not) for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1][o] += 1
elif ai.is_Not:
d[ai.args[0]][3] += 1
else:
raise NotImplementedError('unexpected level of nesting')
inv = defaultdict(list)
for k, v in ordered(iter(d.items())):
v[-1] = tuple(sorted([i + (j,) for i, j in v[-1].items()]))
inv[tuple(v)].append(k)
return inv
def bool_map(bool1, bool2):
"""
Return the simplified version of bool1, and the mapping of variables
that makes the two expressions bool1 and bool2 represent the same
logical behaviour for some correspondence between the variables
of each.
If more than one mappings of this sort exist, one of them
is returned.
For example, And(x, y) is logically equivalent to And(a, b) for
the mapping {x: a, y:b} or {x: b, y:a}.
If no such mapping exists, return False.
Examples
========
>>> from sympy import SOPform, bool_map, Or, And, Not, Xor
>>> from sympy.abc import w, x, y, z, a, b, c, d
>>> function1 = SOPform([x, z, y],[[1, 0, 1], [0, 0, 1]])
>>> function2 = SOPform([a, b, c],[[1, 0, 1], [1, 0, 0]])
>>> bool_map(function1, function2)
(y & ~z, {y: a, z: b})
The results are not necessarily unique, but they are canonical. Here,
``(w, z)`` could be ``(a, d)`` or ``(d, a)``:
>>> eq = Or(And(Not(y), w), And(Not(y), z), And(x, y))
>>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c))
>>> bool_map(eq, eq2)
((x & y) | (w & ~y) | (z & ~y), {w: a, x: b, y: c, z: d})
>>> eq = And(Xor(a, b), c, And(c,d))
>>> bool_map(eq, eq.subs(c, x))
(c & d & (a | b) & (~a | ~b), {a: a, b: b, c: d, d: x})
"""
def match(function1, function2):
"""Return the mapping that equates variables between two
simplified boolean expressions if possible.
By "simplified" we mean that a function has been denested
and is either an And (or an Or) whose arguments are either
symbols (x), negated symbols (Not(x)), or Or (or an And) whose
arguments are only symbols or negated symbols. For example,
And(x, Not(y), Or(w, Not(z))).
Basic.match is not robust enough (see issue 4835) so this is
a workaround that is valid for simplified boolean expressions
"""
# do some quick checks
if function1.__class__ != function2.__class__:
return None # maybe simplification makes them the same?
if len(function1.args) != len(function2.args):
return None # maybe simplification makes them the same?
if function1.is_Symbol:
return {function1: function2}
# get the fingerprint dictionaries
f1 = _finger(function1)
f2 = _finger(function2)
# more quick checks
if len(f1) != len(f2):
return False
# assemble the match dictionary if possible
matchdict = {}
for k in f1.keys():
if k not in f2:
return False
if len(f1[k]) != len(f2[k]):
return False
for i, x in enumerate(f1[k]):
matchdict[x] = f2[k][i]
return matchdict
a = simplify_logic(bool1)
b = simplify_logic(bool2)
m = match(a, b)
if m:
return a, m
return m
def simplify_patterns_and():
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.core import Wild
from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt
a = Wild('a')
b = Wild('b')
c = Wild('c')
# With a better canonical fewer results are required
_matchers_and = ((And(Eq(a, b), Ge(a, b)), Eq(a, b)),
(And(Eq(a, b), Gt(a, b)), S.false),
(And(Eq(a, b), Le(a, b)), Eq(a, b)),
(And(Eq(a, b), Lt(a, b)), S.false),
(And(Ge(a, b), Gt(a, b)), Gt(a, b)),
(And(Ge(a, b), Le(a, b)), Eq(a, b)),
(And(Ge(a, b), Lt(a, b)), S.false),
(And(Ge(a, b), Ne(a, b)), Gt(a, b)),
(And(Gt(a, b), Le(a, b)), S.false),
(And(Gt(a, b), Lt(a, b)), S.false),
(And(Gt(a, b), Ne(a, b)), Gt(a, b)),
(And(Le(a, b), Lt(a, b)), Lt(a, b)),
(And(Le(a, b), Ne(a, b)), Lt(a, b)),
(And(Lt(a, b), Ne(a, b)), Lt(a, b)),
# Min/max
(And(Ge(a, b), Ge(a, c)), Ge(a, Max(b, c))),
(And(Ge(a, b), Gt(a, c)), ITE(b > c, Ge(a, b), Gt(a, c))),
(And(Gt(a, b), Gt(a, c)), Gt(a, Max(b, c))),
(And(Le(a, b), Le(a, c)), Le(a, Min(b, c))),
(And(Le(a, b), Lt(a, c)), ITE(b < c, Le(a, b), Lt(a, c))),
(And(Lt(a, b), Lt(a, c)), Lt(a, Min(b, c))),
# Sign
(And(Eq(a, b), Eq(a, -b)), And(Eq(a, S.Zero), Eq(b, S.Zero))),
)
return _matchers_and
def simplify_patterns_or():
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.core import Wild
from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt
a = Wild('a')
b = Wild('b')
c = Wild('c')
_matchers_or = ((Or(Eq(a, b), Ge(a, b)), Ge(a, b)),
(Or(Eq(a, b), Gt(a, b)), Ge(a, b)),
(Or(Eq(a, b), Le(a, b)), Le(a, b)),
(Or(Eq(a, b), Lt(a, b)), Le(a, b)),
(Or(Ge(a, b), Gt(a, b)), Ge(a, b)),
(Or(Ge(a, b), Le(a, b)), S.true),
(Or(Ge(a, b), Lt(a, b)), S.true),
(Or(Ge(a, b), Ne(a, b)), S.true),
(Or(Gt(a, b), Le(a, b)), S.true),
(Or(Gt(a, b), Lt(a, b)), Ne(a, b)),
(Or(Gt(a, b), Ne(a, b)), Ne(a, b)),
(Or(Le(a, b), Lt(a, b)), Le(a, b)),
(Or(Le(a, b), Ne(a, b)), S.true),
(Or(Lt(a, b), Ne(a, b)), Ne(a, b)),
# Min/max
(Or(Ge(a, b), Ge(a, c)), Ge(a, Min(b, c))),
(Or(Ge(a, b), Gt(a, c)), ITE(b > c, Gt(a, c), Ge(a, b))),
(Or(Gt(a, b), Gt(a, c)), Gt(a, Min(b, c))),
(Or(Le(a, b), Le(a, c)), Le(a, Max(b, c))),
(Or(Le(a, b), Lt(a, c)), ITE(b >= c, Le(a, b), Lt(a, c))),
(Or(Lt(a, b), Lt(a, c)), Lt(a, Max(b, c))),
)
return _matchers_or
def simplify_patterns_xor():
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.core import Wild
from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt
a = Wild('a')
b = Wild('b')
c = Wild('c')
_matchers_xor = ((Xor(Eq(a, b), Ge(a, b)), Gt(a, b)),
(Xor(Eq(a, b), Gt(a, b)), Ge(a, b)),
(Xor(Eq(a, b), Le(a, b)), Lt(a, b)),
(Xor(Eq(a, b), Lt(a, b)), Le(a, b)),
(Xor(Ge(a, b), Gt(a, b)), Eq(a, b)),
(Xor(Ge(a, b), Le(a, b)), Ne(a, b)),
(Xor(Ge(a, b), Lt(a, b)), S.true),
(Xor(Ge(a, b), Ne(a, b)), Le(a, b)),
(Xor(Gt(a, b), Le(a, b)), S.true),
(Xor(Gt(a, b), Lt(a, b)), Ne(a, b)),
(Xor(Gt(a, b), Ne(a, b)), Lt(a, b)),
(Xor(Le(a, b), Lt(a, b)), Eq(a, b)),
(Xor(Le(a, b), Ne(a, b)), Ge(a, b)),
(Xor(Lt(a, b), Ne(a, b)), Gt(a, b)),
# Min/max
(Xor(Ge(a, b), Ge(a, c)),
And(Ge(a, Min(b, c)), Lt(a, Max(b, c)))),
(Xor(Ge(a, b), Gt(a, c)),
ITE(b > c, And(Gt(a, c), Lt(a, b)),
And(Ge(a, b), Le(a, c)))),
(Xor(Gt(a, b), Gt(a, c)),
And(Gt(a, Min(b, c)), Le(a, Max(b, c)))),
(Xor(Le(a, b), Le(a, c)),
And(Le(a, Max(b, c)), Gt(a, Min(b, c)))),
(Xor(Le(a, b), Lt(a, c)),
ITE(b < c, And(Lt(a, c), Gt(a, b)),
And(Le(a, b), Ge(a, c)))),
(Xor(Lt(a, b), Lt(a, c)),
And(Lt(a, Max(b, c)), Ge(a, Min(b, c)))),
)
return _matchers_xor
|
0d1007fb70e9983ce6f4e7be737aca9a1ee8ecaece5b6a9635c387bc3336f3b8 | from __future__ import division, print_function
import os
from sympy.core.function import expand_mul
from sympy.simplify.simplify import dotprodsimp as _dotprodsimp
# The following is an internal variable for controlling the recently introduced
# dotprodsimp intermediate simplification step in matrix operations in one
# place. It is intended as an emergency switch in cases where user code does not
# like the different structure of results that comes from this simplification
# and can not be adapted for some reason. When the intermediate simplification
# step is considered fully compatible with user code and this mechanism is no
# longer needed in can be removed.
# The default value of `None` specifies that dotprodsimp be used in a few
# selected low-level functions but not in others. Setting this global variable
# to `False` will turn off the dotprodsimp intermediate simplifications
# everywhere and setting to `True` will turn it on everywhere in matrices where
# it can be applied.
# There are a few other places in the matrix code where dotprodsimp can probably
# help, these are places where a call is made to:
#
# dps = _get_intermediate_simp()
#
# To determine whether dotprodsimp helps in these places testing needs to be
# done, to turn dotprodsimp on in these places by default replace this call with:
#
# from sympy.simplify.simplify import dotprodsimp as _dotprodsimp
# dps = _get_intermediate_simp(_dotprodsimp)
#
# Or possibly:
#
# from sympy import expand_mul
# dps = _get_intermediate_simp(expand_mul, expand_mul)
#
# This second form uses lighter simplification by default but may still do
# better than nothing.
#
# The other place where dotprodsimp may be added is any place where matrices are
# multiplied via:
#
# A.multiply(B) -> A.multiply(B, dotprodsimp=True)
# True, False or None
_DOTPRODSIMP_MODE = False if os.environ.get('SYMPY_DOTPRODSIMP', '').lower() in \
('false', 'off', '0') else None
def _get_intermediate_simp(deffunc=lambda x: x, offfunc=lambda x: x,
onfunc=_dotprodsimp, dotprodsimp=None):
"""Support function for controlling intermediate simplification. Returns a
simplification function according to the global setting of dotprodsimp
operation.
``deffunc`` - Function to be used by default.
``offfunc`` - Function to be used if dotprodsimp has been turned off.
``onfunc`` - Function to be used if dotprodsimp has been turned on.
``dotprodsimp`` - True, False or None. Will be overriden by global
_DOTPRODSIMP_MODE if that is not None.
"""
if dotprodsimp is False or _DOTPRODSIMP_MODE is False:
return offfunc
if dotprodsimp is True or _DOTPRODSIMP_MODE is True:
return onfunc
return deffunc # None, None
def _get_intermediate_simp_bool(default=False, dotprodsimp=None):
"""Same as ``_get_intermediate_simp`` but returns bools instead of functions
by default."""
return _get_intermediate_simp(default, False, True, dotprodsimp)
def _iszero(x):
"""Returns True if x is zero."""
return getattr(x, 'is_zero', None)
def _is_zero_after_expand_mul(x):
"""Tests by expand_mul only, suitable for polynomials and rational
functions."""
return expand_mul(x) == 0
|
c7d7022008af2111e966926c1eaf266a49a7b7c886bd063ab638b7486715c1c4 | from __future__ import division, print_function
import copy
from sympy.core.function import expand_mul
from sympy.functions.elementary.miscellaneous import Min, sqrt
from .common import NonSquareMatrixError, NonPositiveDefiniteMatrixError
from .utilities import _get_intermediate_simp, _iszero
from .determinant import _find_reasonable_pivot_naive
def _rank_decomposition(M, iszerofunc=_iszero, simplify=False):
r"""Returns a pair of matrices (`C`, `F`) with matching rank
such that `A = C F`.
Parameters
==========
iszerofunc : Function, optional
A function used for detecting whether an element can
act as a pivot. ``lambda x: x.is_zero`` is used by default.
simplify : Bool or Function, optional
A function used to simplify elements when looking for a
pivot. By default SymPy's ``simplify`` is used.
Returns
=======
(C, F) : Matrices
`C` and `F` are full-rank matrices with rank as same as `A`,
whose product gives `A`.
See Notes for additional mathematical details.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([
... [1, 3, 1, 4],
... [2, 7, 3, 9],
... [1, 5, 3, 1],
... [1, 2, 0, 8]
... ])
>>> C, F = A.rank_decomposition()
>>> C
Matrix([
[1, 3, 4],
[2, 7, 9],
[1, 5, 1],
[1, 2, 8]])
>>> F
Matrix([
[1, 0, -2, 0],
[0, 1, 1, 0],
[0, 0, 0, 1]])
>>> C * F == A
True
Notes
=====
Obtaining `F`, an RREF of `A`, is equivalent to creating a
product
.. math::
E_n E_{n-1} ... E_1 A = F
where `E_n, E_{n-1}, ... , E_1` are the elimination matrices or
permutation matrices equivalent to each row-reduction step.
The inverse of the same product of elimination matrices gives
`C`:
.. math::
C = (E_n E_{n-1} ... E_1)^{-1}
It is not necessary, however, to actually compute the inverse:
the columns of `C` are those from the original matrix with the
same column indices as the indices of the pivot columns of `F`.
References
==========
.. [1] https://en.wikipedia.org/wiki/Rank_factorization
.. [2] Piziak, R.; Odell, P. L. (1 June 1999).
"Full Rank Factorization of Matrices".
Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882
See Also
========
rref
"""
F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc,
pivots=True)
rank = len(pivot_cols)
C = M.extract(range(M.rows), pivot_cols)
F = F[:rank, :]
return C, F
def _liupc(M):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(M.rows)]
for r, c, _ in M.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*M.rows
virtual = [inf]*M.rows
for r in range(M.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def _row_structure_symbolic_cholesky(M):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
R, parent = M.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(M.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = list(sorted(set(Lrow[k])))
return Lrow
def _cholesky(M, hermitian=True):
"""Returns the Cholesky-type decomposition L of a matrix A
such that L * L.H == A if hermitian flag is True,
or L * L.T == A if hermitian is False.
A must be a Hermitian positive-definite matrix if hermitian is True,
or a symmetric matrix if it is False.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T
Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
The matrix can have complex entries:
>>> from sympy import I
>>> A = Matrix(((9, 3*I), (-3*I, 5)))
>>> A.cholesky()
Matrix([
[ 3, 0],
[-I, 2]])
>>> A.cholesky() * A.cholesky().H
Matrix([
[ 9, 3*I],
[-3*I, 5]])
Non-hermitian Cholesky-type decomposition may be useful when the
matrix is not positive-definite.
>>> A = Matrix([[1, 2], [2, 1]])
>>> L = A.cholesky(hermitian=False)
>>> L
Matrix([
[1, 0],
[2, sqrt(3)*I]])
>>> L*L.T == A
True
See Also
========
sympy.matrices.dense.DenseMatrix.LDLdecomposition
LUdecomposition
QRdecomposition
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
dps = _get_intermediate_simp(expand_mul, expand_mul)
L = MutableDenseMatrix.zeros(M.rows, M.rows)
if hermitian:
for i in range(M.rows):
for j in range(i):
L[i, j] = dps((1 / L[j, j])*(M[i, j] -
sum(L[i, k]*L[j, k].conjugate() for k in range(j))))
Lii2 = dps(M[i, i] -
sum(L[i, k]*L[i, k].conjugate() for k in range(i)))
if Lii2.is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
L[i, i] = sqrt(Lii2)
else:
for i in range(M.rows):
for j in range(i):
L[i, j] = dps((1 / L[j, j])*(M[i, j] -
sum(L[i, k]*L[j, k] for k in range(j))))
L[i, i] = sqrt(dps(M[i, i] -
sum(L[i, k]**2 for k in range(i))))
return M._new(L)
def _cholesky_sparse(M, hermitian=True):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
The matrix can have complex entries:
>>> from sympy import I
>>> A = SparseMatrix(((9, 3*I), (-3*I, 5)))
>>> A.cholesky()
Matrix([
[ 3, 0],
[-I, 2]])
>>> A.cholesky() * A.cholesky().H
Matrix([
[ 9, 3*I],
[-3*I, 5]])
Non-hermitian Cholesky-type decomposition may be useful when the
matrix is not positive-definite.
>>> A = SparseMatrix([[1, 2], [2, 1]])
>>> L = A.cholesky(hermitian=False)
>>> L
Matrix([
[1, 0],
[2, sqrt(3)*I]])
>>> L*L.T == A
True
See Also
========
sympy.matrices.sparse.SparseMatrix.LDLdecomposition
LUdecomposition
QRdecomposition
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
dps = _get_intermediate_simp(expand_mul, expand_mul)
Crowstruc = M.row_structure_symbolic_cholesky()
C = MutableDenseMatrix.zeros(M.rows)
for i in range(len(Crowstruc)):
for j in Crowstruc[i]:
if i != j:
C[i, j] = M[i, j]
summ = 0
for p1 in Crowstruc[i]:
if p1 < j:
for p2 in Crowstruc[j]:
if p2 < j:
if p1 == p2:
if hermitian:
summ += C[i, p1]*C[j, p1].conjugate()
else:
summ += C[i, p1]*C[j, p1]
else:
break
else:
break
C[i, j] = dps((C[i, j] - summ) / C[j, j])
else: # i == j
C[j, j] = M[j, j]
summ = 0
for k in Crowstruc[j]:
if k < j:
if hermitian:
summ += C[j, k]*C[j, k].conjugate()
else:
summ += C[j, k]**2
else:
break
Cjj2 = dps(C[j, j] - summ)
if hermitian and Cjj2.is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
C[j, j] = sqrt(Cjj2)
return M._new(C)
def _LDLdecomposition(M, hermitian=True):
"""Returns the LDL Decomposition (L, D) of matrix A,
such that L * D * L.H == A if hermitian flag is True, or
L * D * L.T == A if hermitian is False.
This method eliminates the use of square root.
Further this ensures that all the diagonal entries of L are 1.
A must be a Hermitian positive-definite matrix if hermitian is True,
or a symmetric matrix otherwise.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T * A.inv() == eye(A.rows)
True
The matrix can have complex entries:
>>> from sympy import I
>>> A = Matrix(((9, 3*I), (-3*I, 5)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0],
[-I/3, 1]])
>>> D
Matrix([
[9, 0],
[0, 4]])
>>> L*D*L.H == A
True
See Also
========
sympy.matrices.dense.DenseMatrix.cholesky
LUdecomposition
QRdecomposition
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
dps = _get_intermediate_simp(expand_mul, expand_mul)
D = MutableDenseMatrix.zeros(M.rows, M.rows)
L = MutableDenseMatrix.eye(M.rows)
if hermitian:
for i in range(M.rows):
for j in range(i):
L[i, j] = dps((1 / D[j, j])*(M[i, j] - sum(
L[i, k]*L[j, k].conjugate()*D[k, k] for k in range(j))))
D[i, i] = dps(M[i, i] -
sum(L[i, k]*L[i, k].conjugate()*D[k, k] for k in range(i)))
if D[i, i].is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
else:
for i in range(M.rows):
for j in range(i):
L[i, j] = dps((1 / D[j, j])*(M[i, j] - sum(
L[i, k]*L[j, k]*D[k, k] for k in range(j))))
D[i, i] = dps(M[i, i] - sum(L[i, k]**2*D[k, k] for k in range(i)))
return M._new(L), M._new(D)
def _LDLdecomposition_sparse(M, hermitian=True):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
dps = _get_intermediate_simp(expand_mul, expand_mul)
Lrowstruc = M.row_structure_symbolic_cholesky()
L = MutableDenseMatrix.eye(M.rows)
D = MutableDenseMatrix.zeros(M.rows, M.cols)
for i in range(len(Lrowstruc)):
for j in Lrowstruc[i]:
if i != j:
L[i, j] = M[i, j]
summ = 0
for p1 in Lrowstruc[i]:
if p1 < j:
for p2 in Lrowstruc[j]:
if p2 < j:
if p1 == p2:
if hermitian:
summ += L[i, p1]*L[j, p1].conjugate()*D[p1, p1]
else:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] = dps((L[i, j] - summ) / D[j, j])
else: # i == j
D[i, i] = M[i, i]
summ = 0
for k in Lrowstruc[i]:
if k < i:
if hermitian:
summ += L[i, k]*L[i, k].conjugate()*D[k, k]
else:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] = dps(D[i, i] - summ)
if hermitian and D[i, i].is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
return M._new(L), M._new(D)
def _LUdecomposition(M, iszerofunc=_iszero, simpfunc=None, rankcheck=False):
"""Returns (L, U, perm) where L is a lower triangular matrix with unit
diagonal, U is an upper triangular matrix, and perm is a list of row
swap index pairs. If A is the original matrix, then
A = (L*U).permuteBkwd(perm), and the row permutation matrix P such
that P*A = L*U can be computed by P=eye(A.row).permuteFwd(perm).
See documentation for LUCombined for details about the keyword argument
rankcheck, iszerofunc, and simpfunc.
Parameters
==========
rankcheck : bool, optional
Determines if this function should detect the rank
deficiency of the matrixis and should raise a
``ValueError``.
iszerofunc : function, optional
A function which determines if a given expression is zero.
The function should be a callable that takes a single
sympy expression and returns a 3-valued boolean value
``True``, ``False``, or ``None``.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
simpfunc : function or None, optional
A function that simplifies the input.
If this is specified as a function, this function should be
a callable that takes a single sympy expression and returns
an another sympy expression that is algebraically
equivalent.
If ``None``, it indicates that the pivot search algorithm
should not attempt to simplify any candidate pivots.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[4, 3], [6, 3]])
>>> L, U, _ = a.LUdecomposition()
>>> L
Matrix([
[ 1, 0],
[3/2, 1]])
>>> U
Matrix([
[4, 3],
[0, -3/2]])
See Also
========
sympy.matrices.dense.DenseMatrix.cholesky
sympy.matrices.dense.DenseMatrix.LDLdecomposition
QRdecomposition
LUdecomposition_Simple
LUdecompositionFF
LUsolve
"""
combined, p = M.LUdecomposition_Simple(iszerofunc=iszerofunc,
simpfunc=simpfunc, rankcheck=rankcheck)
# L is lower triangular ``M.rows x M.rows``
# U is upper triangular ``M.rows x M.cols``
# L has unit diagonal. For each column in combined, the subcolumn
# below the diagonal of combined is shared by L.
# If L has more columns than combined, then the remaining subcolumns
# below the diagonal of L are zero.
# The upper triangular portion of L and combined are equal.
def entry_L(i, j):
if i < j:
# Super diagonal entry
return M.zero
elif i == j:
return M.one
elif j < combined.cols:
return combined[i, j]
# Subdiagonal entry of L with no corresponding
# entry in combined
return M.zero
def entry_U(i, j):
return M.zero if i > j else combined[i, j]
L = M._new(combined.rows, combined.rows, entry_L)
U = M._new(combined.rows, combined.cols, entry_U)
return L, U, p
def _LUdecomposition_Simple(M, iszerofunc=_iszero, simpfunc=None,
rankcheck=False):
r"""Compute the PLU decomposition of the matrix.
Parameters
==========
rankcheck : bool, optional
Determines if this function should detect the rank
deficiency of the matrixis and should raise a
``ValueError``.
iszerofunc : function, optional
A function which determines if a given expression is zero.
The function should be a callable that takes a single
sympy expression and returns a 3-valued boolean value
``True``, ``False``, or ``None``.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
simpfunc : function or None, optional
A function that simplifies the input.
If this is specified as a function, this function should be
a callable that takes a single sympy expression and returns
an another sympy expression that is algebraically
equivalent.
If ``None``, it indicates that the pivot search algorithm
should not attempt to simplify any candidate pivots.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
Returns
=======
(lu, row_swaps) : (Matrix, list)
If the original matrix is a $m, n$ matrix:
*lu* is a $m, n$ matrix, which contains result of the
decomposition in a compresed form. See the notes section
to see how the matrix is compressed.
*row_swaps* is a $m$-element list where each element is a
pair of row exchange indices.
``A = (L*U).permute_backward(perm)``, and the row
permutation matrix $P$ from the formula $P A = L U$ can be
computed by ``P=eye(A.row).permute_forward(perm)``.
Raises
======
ValueError
Raised if ``rankcheck=True`` and the matrix is found to
be rank deficient during the computation.
Notes
=====
About the PLU decomposition:
PLU decomposition is a generalization of a LU decomposition
which can be extended for rank-deficient matrices.
It can further be generalized for non-square matrices, and this
is the notation that SymPy is using.
PLU decomposition is a decomposition of a $m, n$ matrix $A$ in
the form of $P A = L U$ where
* $L$ is a $m, m$ lower triangular matrix with unit diagonal
entries.
* $U$ is a $m, n$ upper triangular matrix.
* $P$ is a $m, m$ permutation matrix.
So, for a square matrix, the decomposition would look like:
.. math::
L = \begin{bmatrix}
1 & 0 & 0 & \cdots & 0 \\
L_{1, 0} & 1 & 0 & \cdots & 0 \\
L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1
\end{bmatrix}
.. math::
U = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & U_{n-1, n-1}
\end{bmatrix}
And for a matrix with more rows than the columns,
the decomposition would look like:
.. math::
L = \begin{bmatrix}
1 & 0 & 0 & \cdots & 0 & 0 & \cdots & 0 \\
L_{1, 0} & 1 & 0 & \cdots & 0 & 0 & \cdots & 0 \\
L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \ddots
& \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1 & 0
& \cdots & 0 \\
L_{n, 0} & L_{n, 1} & L_{n, 2} & \cdots & L_{n, n-1} & 1
& \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots
& \ddots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & L_{m-1, n-1}
& 0 & \cdots & 1 \\
\end{bmatrix}
.. math::
U = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & U_{n-1, n-1} \\
0 & 0 & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 0
\end{bmatrix}
Finally, for a matrix with more columns than the rows, the
decomposition would look like:
.. math::
L = \begin{bmatrix}
1 & 0 & 0 & \cdots & 0 \\
L_{1, 0} & 1 & 0 & \cdots & 0 \\
L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & 1
\end{bmatrix}
.. math::
U = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1}
& \cdots & U_{0, n-1} \\
0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1}
& \cdots & U_{1, n-1} \\
0 & 0 & U_{2, 2} & \cdots & U_{2, m-1}
& \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots
& \cdots & \vdots \\
0 & 0 & 0 & \cdots & U_{m-1, m-1}
& \cdots & U_{m-1, n-1} \\
\end{bmatrix}
About the compressed LU storage:
The results of the decomposition are often stored in compressed
forms rather than returning $L$ and $U$ matrices individually.
It may be less intiuitive, but it is commonly used for a lot of
numeric libraries because of the efficiency.
The storage matrix is defined as following for this specific
method:
* The subdiagonal elements of $L$ are stored in the subdiagonal
portion of $LU$, that is $LU_{i, j} = L_{i, j}$ whenever
$i > j$.
* The elements on the diagonal of $L$ are all 1, and are not
explicitly stored.
* $U$ is stored in the upper triangular portion of $LU$, that is
$LU_{i, j} = U_{i, j}$ whenever $i <= j$.
* For a case of $m > n$, the right side of the $L$ matrix is
trivial to store.
* For a case of $m < n$, the below side of the $U$ matrix is
trivial to store.
So, for a square matrix, the compressed output matrix would be:
.. math::
LU = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & U_{n-1, n-1}
\end{bmatrix}
For a matrix with more rows than the columns, the compressed
output matrix would be:
.. math::
LU = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots
& U_{n-1, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots
& L_{m-1, n-1} \\
\end{bmatrix}
For a matrix with more columns than the rows, the compressed
output matrix would be:
.. math::
LU = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1}
& \cdots & U_{0, n-1} \\
L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1}
& \cdots & U_{1, n-1} \\
L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, m-1}
& \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots
& \cdots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & U_{m-1, m-1}
& \cdots & U_{m-1, n-1} \\
\end{bmatrix}
About the pivot searching algorithm:
When a matrix contains symbolic entries, the pivot search algorithm
differs from the case where every entry can be categorized as zero or
nonzero.
The algorithm searches column by column through the submatrix whose
top left entry coincides with the pivot position.
If it exists, the pivot is the first entry in the current search
column that iszerofunc guarantees is nonzero.
If no such candidate exists, then each candidate pivot is simplified
if simpfunc is not None.
The search is repeated, with the difference that a candidate may be
the pivot if ``iszerofunc()`` cannot guarantee that it is nonzero.
In the second search the pivot is the first candidate that
iszerofunc can guarantee is nonzero.
If no such candidate exists, then the pivot is the first candidate
for which iszerofunc returns None.
If no such candidate exists, then the search is repeated in the next
column to the right.
The pivot search algorithm differs from the one in ``rref()``, which
relies on ``_find_reasonable_pivot()``.
Future versions of ``LUdecomposition_simple()`` may use
``_find_reasonable_pivot()``.
See Also
========
LUdecomposition
LUdecompositionFF
LUsolve
"""
if rankcheck:
# https://github.com/sympy/sympy/issues/9796
pass
if M.rows == 0 or M.cols == 0:
# Define LU decomposition of a matrix with no entries as a matrix
# of the same dimensions with all zero entries.
return M.zeros(M.rows, M.cols), []
dps = _get_intermediate_simp()
lu = M.as_mutable()
row_swaps = []
pivot_col = 0
for pivot_row in range(0, lu.rows - 1):
# Search for pivot. Prefer entry that iszeropivot determines
# is nonzero, over entry that iszeropivot cannot guarantee
# is zero.
# XXX ``_find_reasonable_pivot`` uses slow zero testing. Blocked by bug #10279
# Future versions of LUdecomposition_simple can pass iszerofunc and simpfunc
# to _find_reasonable_pivot().
# In pass 3 of _find_reasonable_pivot(), the predicate in ``if x.equals(S.Zero):``
# calls sympy.simplify(), and not the simplification function passed in via
# the keyword argument simpfunc.
iszeropivot = True
while pivot_col != M.cols and iszeropivot:
sub_col = (lu[r, pivot_col] for r in range(pivot_row, M.rows))
pivot_row_offset, pivot_value, is_assumed_non_zero, ind_simplified_pairs =\
_find_reasonable_pivot_naive(sub_col, iszerofunc, simpfunc)
iszeropivot = pivot_value is None
if iszeropivot:
# All candidate pivots in this column are zero.
# Proceed to next column.
pivot_col += 1
if rankcheck and pivot_col != pivot_row:
# All entries including and below the pivot position are
# zero, which indicates that the rank of the matrix is
# strictly less than min(num rows, num cols)
# Mimic behavior of previous implementation, by throwing a
# ValueError.
raise ValueError("Rank of matrix is strictly less than"
" number of rows or columns."
" Pass keyword argument"
" rankcheck=False to compute"
" the LU decomposition of this matrix.")
candidate_pivot_row = None if pivot_row_offset is None else pivot_row + pivot_row_offset
if candidate_pivot_row is None and iszeropivot:
# If candidate_pivot_row is None and iszeropivot is True
# after pivot search has completed, then the submatrix
# below and to the right of (pivot_row, pivot_col) is
# all zeros, indicating that Gaussian elimination is
# complete.
return lu, row_swaps
# Update entries simplified during pivot search.
for offset, val in ind_simplified_pairs:
lu[pivot_row + offset, pivot_col] = val
if pivot_row != candidate_pivot_row:
# Row swap book keeping:
# Record which rows were swapped.
# Update stored portion of L factor by multiplying L on the
# left and right with the current permutation.
# Swap rows of U.
row_swaps.append([pivot_row, candidate_pivot_row])
# Update L.
lu[pivot_row, 0:pivot_row], lu[candidate_pivot_row, 0:pivot_row] = \
lu[candidate_pivot_row, 0:pivot_row], lu[pivot_row, 0:pivot_row]
# Swap pivot row of U with candidate pivot row.
lu[pivot_row, pivot_col:lu.cols], lu[candidate_pivot_row, pivot_col:lu.cols] = \
lu[candidate_pivot_row, pivot_col:lu.cols], lu[pivot_row, pivot_col:lu.cols]
# Introduce zeros below the pivot by adding a multiple of the
# pivot row to a row under it, and store the result in the
# row under it.
# Only entries in the target row whose index is greater than
# start_col may be nonzero.
start_col = pivot_col + 1
for row in range(pivot_row + 1, lu.rows):
# Store factors of L in the subcolumn below
# (pivot_row, pivot_row).
lu[row, pivot_row] = \
dps(lu[row, pivot_col]/lu[pivot_row, pivot_col])
# Form the linear combination of the pivot row and the current
# row below the pivot row that zeros the entries below the pivot.
# Employing slicing instead of a loop here raises
# NotImplementedError: Cannot add Zero to MutableSparseMatrix
# in sympy/matrices/tests/test_sparse.py.
# c = pivot_row + 1 if pivot_row == pivot_col else pivot_col
for c in range(start_col, lu.cols):
lu[row, c] = dps(lu[row, c] - lu[row, pivot_row]*lu[pivot_row, c])
if pivot_row != pivot_col:
# matrix rank < min(num rows, num cols),
# so factors of L are not stored directly below the pivot.
# These entries are zero by construction, so don't bother
# computing them.
for row in range(pivot_row + 1, lu.rows):
lu[row, pivot_col] = M.zero
pivot_col += 1
if pivot_col == lu.cols:
# All candidate pivots are zero implies that Gaussian
# elimination is complete.
return lu, row_swaps
if rankcheck:
if iszerofunc(
lu[Min(lu.rows, lu.cols) - 1, Min(lu.rows, lu.cols) - 1]):
raise ValueError("Rank of matrix is strictly less than"
" number of rows or columns."
" Pass keyword argument"
" rankcheck=False to compute"
" the LU decomposition of this matrix.")
return lu, row_swaps
def _LUdecompositionFF(M):
"""Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
See Also
========
LUdecomposition
LUdecomposition_Simple
LUsolve
References
==========
.. [1] W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
"""
from sympy.matrices import SparseMatrix
zeros = SparseMatrix.zeros
eye = SparseMatrix.eye
n, m = M.rows, M.cols
U, L, P = M.as_mutable(), eye(n), eye(n)
DD = zeros(n, n)
oldpivot = 1
for k in range(n - 1):
if U[k, k] == 0:
for kpivot in range(k + 1, n):
if U[kpivot, k]:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L [k, k] = Ukk = U[k, k]
DD[k, k] = oldpivot * Ukk
for i in range(k + 1, n):
L[i, k] = Uik = U[i, k]
for j in range(k + 1, m):
U[i, j] = (Ukk * U[i, j] - U[k, j] * Uik) / oldpivot
U[i, k] = 0
oldpivot = Ukk
DD[n - 1, n - 1] = oldpivot
return P, L, DD, U
def _QRdecomposition(M):
"""Return Q, R where A = Q*R, Q is orthogonal and R is upper triangular.
Examples
========
This is the example from wikipedia:
>>> from sympy import Matrix
>>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175, -58/175],
[ 3/7, 158/175, 6/175],
[-2/7, 6/35, -33/35]])
>>> R
Matrix([
[14, 21, -14],
[ 0, 175, -70],
[ 0, 0, 35]])
>>> A == Q*R
True
QR factorization of an identity matrix:
>>> A = Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> R
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
sympy.matrices.dense.DenseMatrix.cholesky
sympy.matrices.dense.DenseMatrix.LDLdecomposition
LUdecomposition
QRsolve
"""
dps = _get_intermediate_simp(expand_mul, expand_mul)
cls = M.__class__
mat = M.as_mutable()
n = mat.rows
m = mat.cols
ranked = list()
# Pad with additional rows to make wide matrices square
# nOrig keeps track of original size so zeros can be trimmed from Q
if n < m:
nOrig = n
n = m
mat = mat.col_join(mat.zeros(n - nOrig, m))
else:
nOrig = n
Q, R = mat.zeros(n, m), mat.zeros(m)
for j in range(m): # for each column vector
tmp = mat[:, j] # take original v
for i in range(j):
# subtract the project of mat on new vector
R[i, j] = dps(Q[:, i].dot(mat[:, j], hermitian=True))
tmp -= Q[:, i] * R[i, j]
tmp = dps(tmp)
# normalize it
R[j, j] = tmp.norm()
if not R[j, j].is_zero:
ranked.append(j)
Q[:, j] = tmp / R[j, j]
if len(ranked) != 0:
return (cls(Q.extract(range(nOrig), ranked)),
cls(R.extract(ranked, range(R.cols))))
else:
# Trivial case handling for zero-rank matrix
# Force Q as matrix containing standard basis vectors
for i in range(Min(nOrig, m)):
Q[i, i] = 1
return (cls(Q.extract(range(nOrig), range(Min(nOrig, m)))),
cls(R.extract(range(Min(nOrig, m)), range(R.cols))))
|
272d661a3297c473b4015dcebc4b7fee67bb19bd7129ede1920e05e4ecf4bef2 | from __future__ import division, print_function
from types import FunctionType
from sympy.core.numbers import Float, Integer
from sympy.core.singleton import S
from sympy.core.symbol import _uniquely_named_symbol
from sympy.polys import PurePoly, cancel
from sympy.simplify.simplify import (simplify as _simplify,
dotprodsimp as _dotprodsimp)
from .common import MatrixError, NonSquareMatrixError
from .utilities import (
_get_intermediate_simp, _get_intermediate_simp_bool,
_iszero, _is_zero_after_expand_mul)
def _find_reasonable_pivot(col, iszerofunc=_iszero, simpfunc=_simplify):
""" Find the lowest index of an item in ``col`` that is
suitable for a pivot. If ``col`` consists only of
Floats, the pivot with the largest norm is returned.
Otherwise, the first element where ``iszerofunc`` returns
False is used. If ``iszerofunc`` doesn't return false,
items are simplified and retested until a suitable
pivot is found.
Returns a 4-tuple
(pivot_offset, pivot_val, assumed_nonzero, newly_determined)
where pivot_offset is the index of the pivot, pivot_val is
the (possibly simplified) value of the pivot, assumed_nonzero
is True if an assumption that the pivot was non-zero
was made without being proved, and newly_determined are
elements that were simplified during the process of pivot
finding."""
newly_determined = []
col = list(col)
# a column that contains a mix of floats and integers
# but at least one float is considered a numerical
# column, and so we do partial pivoting
if all(isinstance(x, (Float, Integer)) for x in col) and any(
isinstance(x, Float) for x in col):
col_abs = [abs(x) for x in col]
max_value = max(col_abs)
if iszerofunc(max_value):
# just because iszerofunc returned True, doesn't
# mean the value is numerically zero. Make sure
# to replace all entries with numerical zeros
if max_value != 0:
newly_determined = [(i, 0) for i, x in enumerate(col) if x != 0]
return (None, None, False, newly_determined)
index = col_abs.index(max_value)
return (index, col[index], False, newly_determined)
# PASS 1 (iszerofunc directly)
possible_zeros = []
for i, x in enumerate(col):
is_zero = iszerofunc(x)
# is someone wrote a custom iszerofunc, it may return
# BooleanFalse or BooleanTrue instead of True or False,
# so use == for comparison instead of `is`
if is_zero == False:
# we found something that is definitely not zero
return (i, x, False, newly_determined)
possible_zeros.append(is_zero)
# by this point, we've found no certain non-zeros
if all(possible_zeros):
# if everything is definitely zero, we have
# no pivot
return (None, None, False, newly_determined)
# PASS 2 (iszerofunc after simplify)
# we haven't found any for-sure non-zeros, so
# go through the elements iszerofunc couldn't
# make a determination about and opportunistically
# simplify to see if we find something
for i, x in enumerate(col):
if possible_zeros[i] is not None:
continue
simped = simpfunc(x)
is_zero = iszerofunc(simped)
if is_zero == True or is_zero == False:
newly_determined.append((i, simped))
if is_zero == False:
return (i, simped, False, newly_determined)
possible_zeros[i] = is_zero
# after simplifying, some things that were recognized
# as zeros might be zeros
if all(possible_zeros):
# if everything is definitely zero, we have
# no pivot
return (None, None, False, newly_determined)
# PASS 3 (.equals(0))
# some expressions fail to simplify to zero, but
# ``.equals(0)`` evaluates to True. As a last-ditch
# attempt, apply ``.equals`` to these expressions
for i, x in enumerate(col):
if possible_zeros[i] is not None:
continue
if x.equals(S.Zero):
# ``.iszero`` may return False with
# an implicit assumption (e.g., ``x.equals(0)``
# when ``x`` is a symbol), so only treat it
# as proved when ``.equals(0)`` returns True
possible_zeros[i] = True
newly_determined.append((i, S.Zero))
if all(possible_zeros):
return (None, None, False, newly_determined)
# at this point there is nothing that could definitely
# be a pivot. To maintain compatibility with existing
# behavior, we'll assume that an illdetermined thing is
# non-zero. We should probably raise a warning in this case
i = possible_zeros.index(None)
return (i, col[i], True, newly_determined)
def _find_reasonable_pivot_naive(col, iszerofunc=_iszero, simpfunc=None):
"""
Helper that computes the pivot value and location from a
sequence of contiguous matrix column elements. As a side effect
of the pivot search, this function may simplify some of the elements
of the input column. A list of these simplified entries and their
indices are also returned.
This function mimics the behavior of _find_reasonable_pivot(),
but does less work trying to determine if an indeterminate candidate
pivot simplifies to zero. This more naive approach can be much faster,
with the trade-off that it may erroneously return a pivot that is zero.
``col`` is a sequence of contiguous column entries to be searched for
a suitable pivot.
``iszerofunc`` is a callable that returns a Boolean that indicates
if its input is zero, or None if no such determination can be made.
``simpfunc`` is a callable that simplifies its input. It must return
its input if it does not simplify its input. Passing in
``simpfunc=None`` indicates that the pivot search should not attempt
to simplify any candidate pivots.
Returns a 4-tuple:
(pivot_offset, pivot_val, assumed_nonzero, newly_determined)
``pivot_offset`` is the sequence index of the pivot.
``pivot_val`` is the value of the pivot.
pivot_val and col[pivot_index] are equivalent, but will be different
when col[pivot_index] was simplified during the pivot search.
``assumed_nonzero`` is a boolean indicating if the pivot cannot be
guaranteed to be zero. If assumed_nonzero is true, then the pivot
may or may not be non-zero. If assumed_nonzero is false, then
the pivot is non-zero.
``newly_determined`` is a list of index-value pairs of pivot candidates
that were simplified during the pivot search.
"""
# indeterminates holds the index-value pairs of each pivot candidate
# that is neither zero or non-zero, as determined by iszerofunc().
# If iszerofunc() indicates that a candidate pivot is guaranteed
# non-zero, or that every candidate pivot is zero then the contents
# of indeterminates are unused.
# Otherwise, the only viable candidate pivots are symbolic.
# In this case, indeterminates will have at least one entry,
# and all but the first entry are ignored when simpfunc is None.
indeterminates = []
for i, col_val in enumerate(col):
col_val_is_zero = iszerofunc(col_val)
if col_val_is_zero == False:
# This pivot candidate is non-zero.
return i, col_val, False, []
elif col_val_is_zero is None:
# The candidate pivot's comparison with zero
# is indeterminate.
indeterminates.append((i, col_val))
if len(indeterminates) == 0:
# All candidate pivots are guaranteed to be zero, i.e. there is
# no pivot.
return None, None, False, []
if simpfunc is None:
# Caller did not pass in a simplification function that might
# determine if an indeterminate pivot candidate is guaranteed
# to be nonzero, so assume the first indeterminate candidate
# is non-zero.
return indeterminates[0][0], indeterminates[0][1], True, []
# newly_determined holds index-value pairs of candidate pivots
# that were simplified during the search for a non-zero pivot.
newly_determined = []
for i, col_val in indeterminates:
tmp_col_val = simpfunc(col_val)
if id(col_val) != id(tmp_col_val):
# simpfunc() simplified this candidate pivot.
newly_determined.append((i, tmp_col_val))
if iszerofunc(tmp_col_val) == False:
# Candidate pivot simplified to a guaranteed non-zero value.
return i, tmp_col_val, False, newly_determined
return indeterminates[0][0], indeterminates[0][1], True, newly_determined
# This functions is a candidate for caching if it gets implemented for matrices.
def _berkowitz_toeplitz_matrix(M):
"""Return (A,T) where T the Toeplitz matrix used in the Berkowitz algorithm
corresponding to ``M`` and A is the first principal submatrix.
"""
# the 0 x 0 case is trivial
if M.rows == 0 and M.cols == 0:
return M._new(1,1, [M.one])
#
# Partition M = [ a_11 R ]
# [ C A ]
#
a, R = M[0,0], M[0, 1:]
C, A = M[1:, 0], M[1:,1:]
#
# The Toeplitz matrix looks like
#
# [ 1 ]
# [ -a 1 ]
# [ -RC -a 1 ]
# [ -RAC -RC -a 1 ]
# [ -RA**2C -RAC -RC -a 1 ]
# etc.
# Compute the diagonal entries.
# Because multiplying matrix times vector is so much
# more efficient than matrix times matrix, recursively
# compute -R * A**n * C.
diags = [C]
for i in range(M.rows - 2):
diags.append(A.multiply(diags[i], dotprodsimp=True))
diags = [(-R).multiply(d, dotprodsimp=True)[0, 0] for d in diags]
diags = [M.one, -a] + diags
def entry(i,j):
if j > i:
return M.zero
return diags[i - j]
toeplitz = M._new(M.cols + 1, M.rows, entry)
return (A, toeplitz)
# This functions is a candidate for caching if it gets implemented for matrices.
def _berkowitz_vector(M):
""" Run the Berkowitz algorithm and return a vector whose entries
are the coefficients of the characteristic polynomial of ``M``.
Given N x N matrix, efficiently compute
coefficients of characteristic polynomials of ``M``
without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial when ``M``
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the Berkowitz vector is
an N x 1 vector whose entries are coefficients of the
polynomial
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
"""
# handle the trivial cases
if M.rows == 0 and M.cols == 0:
return M._new(1, 1, [M.one])
elif M.rows == 1 and M.cols == 1:
return M._new(2, 1, [M.one, -M[0,0]])
submat, toeplitz = _berkowitz_toeplitz_matrix(M)
return toeplitz.multiply(_berkowitz_vector(submat), dotprodsimp=True)
def _adjugate(M, method="berkowitz"):
"""Returns the adjugate, or classical adjoint, of
a matrix. That is, the transpose of the matrix of cofactors.
https://en.wikipedia.org/wiki/Adjugate
Parameters
==========
method : string, optional
Method to use to find the cofactors, can be "bareiss", "berkowitz" or
"lu".
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> M.adjugate()
Matrix([
[ 4, -2],
[-3, 1]])
See Also
========
cofactor_matrix
sympy.matrices.common.MatrixCommon.transpose
"""
return M.cofactor_matrix(method=method).transpose()
# This functions is a candidate for caching if it gets implemented for matrices.
def _charpoly(M, x='lambda', simplify=_simplify):
"""Computes characteristic polynomial det(x*I - M) where I is
the identity matrix.
A PurePoly is returned, so using different variables for ``x`` does
not affect the comparison or the polynomials:
Parameters
==========
x : string, optional
Name for the "lambda" variable, defaults to "lambda".
simplify : function, optional
Simplification function to use on the characteristic polynomial
calculated. Defaults to ``simplify``.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[1, 3], [2, 0]])
>>> M.charpoly()
PurePoly(lambda**2 - lambda - 6, lambda, domain='ZZ')
>>> M.charpoly(x) == M.charpoly(y)
True
>>> M.charpoly(x) == M.charpoly(y)
True
Specifying ``x`` is optional; a symbol named ``lambda`` is used by
default (which looks good when pretty-printed in unicode):
>>> M.charpoly().as_expr()
lambda**2 - lambda - 6
And if ``x`` clashes with an existing symbol, underscores will
be prepended to the name to make it unique:
>>> M = Matrix([[1, 2], [x, 0]])
>>> M.charpoly(x).as_expr()
_x**2 - _x - 2*x
Whether you pass a symbol or not, the generator can be obtained
with the gen attribute since it may not be the same as the symbol
that was passed:
>>> M.charpoly(x).gen
_x
>>> M.charpoly(x).gen == x
False
Notes
=====
The Samuelson-Berkowitz algorithm is used to compute
the characteristic polynomial efficiently and without any
division operations. Thus the characteristic polynomial over any
commutative ring without zero divisors can be computed.
If the determinant det(x*I - M) can be found out easily as
in the case of an upper or a lower triangular matrix, then
instead of Samuelson-Berkowitz algorithm, eigenvalues are computed
and the characteristic polynomial with their help.
See Also
========
det
"""
if not M.is_square:
raise NonSquareMatrixError()
if M.is_lower or M.is_upper:
diagonal_elements = M.diagonal()
x = _uniquely_named_symbol(x, diagonal_elements)
m = 1
for i in diagonal_elements:
m = m * (x - simplify(i))
return PurePoly(m, x)
berk_vector = _berkowitz_vector(M)
x = _uniquely_named_symbol(x, berk_vector)
return PurePoly([simplify(a) for a in berk_vector], x)
def _cofactor(M, i, j, method="berkowitz"):
"""Calculate the cofactor of an element.
Parameters
==========
method : string, optional
Method to use to find the cofactors, can be "bareiss", "berkowitz" or
"lu".
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> M.cofactor(0, 1)
-3
See Also
========
cofactor_matrix
minor
minor_submatrix
"""
if not M.is_square or M.rows < 1:
raise NonSquareMatrixError()
return (-1)**((i + j) % 2) * M.minor(i, j, method)
def _cofactor_matrix(M, method="berkowitz"):
"""Return a matrix containing the cofactor of each element.
Parameters
==========
method : string, optional
Method to use to find the cofactors, can be "bareiss", "berkowitz" or
"lu".
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> M.cofactor_matrix()
Matrix([
[ 4, -3],
[-2, 1]])
See Also
========
cofactor
minor
minor_submatrix
"""
if not M.is_square or M.rows < 1:
raise NonSquareMatrixError()
return M._new(M.rows, M.cols,
lambda i, j: M.cofactor(i, j, method))
# This functions is a candidate for caching if it gets implemented for matrices.
def _det(M, method="bareiss", iszerofunc=None):
"""Computes the determinant of a matrix if ``M`` is a concrete matrix object
otherwise return an expressions ``Determinant(M)`` if ``M`` is a
``MatrixSymbol`` or other expression.
Parameters
==========
method : string, optional
Specifies the algorithm used for computing the matrix determinant.
If the matrix is at most 3x3, a hard-coded formula is used and the
specified method is ignored. Otherwise, it defaults to
``'bareiss'``.
Also, if the matrix is an upper or a lower triangular matrix, determinant
is computed by simple multiplication of diagonal elements, and the
specified method is ignored.
If it is set to ``'bareiss'``, Bareiss' fraction-free algorithm will
be used.
If it is set to ``'berkowitz'``, Berkowitz' algorithm will be used.
Otherwise, if it is set to ``'lu'``, LU decomposition will be used.
.. note::
For backward compatibility, legacy keys like "bareis" and
"det_lu" can still be used to indicate the corresponding
methods.
And the keys are also case-insensitive for now. However, it is
suggested to use the precise keys for specifying the method.
iszerofunc : FunctionType or None, optional
If it is set to ``None``, it will be defaulted to ``_iszero`` if the
method is set to ``'bareiss'``, and ``_is_zero_after_expand_mul`` if
the method is set to ``'lu'``.
It can also accept any user-specified zero testing function, if it
is formatted as a function which accepts a single symbolic argument
and returns ``True`` if it is tested as zero and ``False`` if it
tested as non-zero, and also ``None`` if it is undecidable.
Returns
=======
det : Basic
Result of determinant.
Raises
======
ValueError
If unrecognized keys are given for ``method`` or ``iszerofunc``.
NonSquareMatrixError
If attempted to calculate determinant from a non-square matrix.
Examples
========
>>> from sympy import Matrix, MatrixSymbol, eye, det
>>> M = Matrix([[1, 2], [3, 4]])
>>> M.det()
-2
"""
# sanitize `method`
method = method.lower()
if method == "bareis":
method = "bareiss"
elif method == "det_lu":
method = "lu"
if method not in ("bareiss", "berkowitz", "lu"):
raise ValueError("Determinant method '%s' unrecognized" % method)
if iszerofunc is None:
if method == "bareiss":
iszerofunc = _is_zero_after_expand_mul
elif method == "lu":
iszerofunc = _iszero
elif not isinstance(iszerofunc, FunctionType):
raise ValueError("Zero testing method '%s' unrecognized" % iszerofunc)
n = M.rows
if n == M.cols: # square check is done in individual method functions
if M.is_upper or M.is_lower:
m = 1
for i in range(n):
m = m * M[i, i]
return _get_intermediate_simp(_dotprodsimp)(m)
elif n == 0:
return M.one
elif n == 1:
return M[0,0]
elif n == 2:
m = M[0, 0] * M[1, 1] - M[0, 1] * M[1, 0]
return _get_intermediate_simp(_dotprodsimp)(m)
elif n == 3:
m = (M[0, 0] * M[1, 1] * M[2, 2]
+ M[0, 1] * M[1, 2] * M[2, 0]
+ M[0, 2] * M[1, 0] * M[2, 1]
- M[0, 2] * M[1, 1] * M[2, 0]
- M[0, 0] * M[1, 2] * M[2, 1]
- M[0, 1] * M[1, 0] * M[2, 2])
return _get_intermediate_simp(_dotprodsimp)(m)
if method == "bareiss":
return M._eval_det_bareiss(iszerofunc=iszerofunc)
elif method == "berkowitz":
return M._eval_det_berkowitz()
elif method == "lu":
return M._eval_det_lu(iszerofunc=iszerofunc)
else:
raise MatrixError('unknown method for calculating determinant')
# This functions is a candidate for caching if it gets implemented for matrices.
def _det_bareiss(M, iszerofunc=_is_zero_after_expand_mul):
"""Compute matrix determinant using Bareiss' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
Parameters
==========
iszerofunc : function, optional
The function to use to determine zeros when doing an LU decomposition.
Defaults to ``lambda x: x.is_zero``.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
"""
# Recursively implemented Bareiss' algorithm as per Deanna Richelle Leggett's
# thesis http://www.math.usm.edu/perry/Research/Thesis_DRL.pdf
def bareiss(mat, cumm=1):
if mat.rows == 0:
return mat.one
elif mat.rows == 1:
return mat[0, 0]
# find a pivot and extract the remaining matrix
# With the default iszerofunc, _find_reasonable_pivot slows down
# the computation by the factor of 2.5 in one test.
# Relevant issues: #10279 and #13877.
pivot_pos, pivot_val, _, _ = _find_reasonable_pivot(mat[:, 0], iszerofunc=iszerofunc)
if pivot_pos is None:
return mat.zero
# if we have a valid pivot, we'll do a "row swap", so keep the
# sign of the det
sign = (-1) ** (pivot_pos % 2)
# we want every row but the pivot row and every column
rows = list(i for i in range(mat.rows) if i != pivot_pos)
cols = list(range(mat.cols))
tmp_mat = mat.extract(rows, cols)
def entry(i, j):
ret = (pivot_val*tmp_mat[i, j + 1] - mat[pivot_pos, j + 1]*tmp_mat[i, 0]) / cumm
if _get_intermediate_simp_bool(True):
return _dotprodsimp(ret)
elif not ret.is_Atom:
return cancel(ret)
return ret
return sign*bareiss(M._new(mat.rows - 1, mat.cols - 1, entry), pivot_val)
if not M.is_square:
raise NonSquareMatrixError()
if M.rows == 0:
return M.one
# sympy/matrices/tests/test_matrices.py contains a test that
# suggests that the determinant of a 0 x 0 matrix is one, by
# convention.
return bareiss(M)
def _det_berkowitz(M):
""" Use the Berkowitz algorithm to compute the determinant."""
if not M.is_square:
raise NonSquareMatrixError()
if M.rows == 0:
return M.one
# sympy/matrices/tests/test_matrices.py contains a test that
# suggests that the determinant of a 0 x 0 matrix is one, by
# convention.
berk_vector = _berkowitz_vector(M)
return (-1)**(len(berk_vector) - 1) * berk_vector[-1]
# This functions is a candidate for caching if it gets implemented for matrices.
def _det_LU(M, iszerofunc=_iszero, simpfunc=None):
""" Computes the determinant of a matrix from its LU decomposition.
This function uses the LU decomposition computed by
LUDecomposition_Simple().
The keyword arguments iszerofunc and simpfunc are passed to
LUDecomposition_Simple().
iszerofunc is a callable that returns a boolean indicating if its
input is zero, or None if it cannot make the determination.
simpfunc is a callable that simplifies its input.
The default is simpfunc=None, which indicate that the pivot search
algorithm should not attempt to simplify any candidate pivots.
If simpfunc fails to simplify its input, then it must return its input
instead of a copy.
Parameters
==========
iszerofunc : function, optional
The function to use to determine zeros when doing an LU decomposition.
Defaults to ``lambda x: x.is_zero``.
simpfunc : function, optional
The simplification function to use when looking for zeros for pivots.
"""
if not M.is_square:
raise NonSquareMatrixError()
if M.rows == 0:
return M.one
# sympy/matrices/tests/test_matrices.py contains a test that
# suggests that the determinant of a 0 x 0 matrix is one, by
# convention.
lu, row_swaps = M.LUdecomposition_Simple(iszerofunc=iszerofunc,
simpfunc=simpfunc)
# P*A = L*U => det(A) = det(L)*det(U)/det(P) = det(P)*det(U).
# Lower triangular factor L encoded in lu has unit diagonal => det(L) = 1.
# P is a permutation matrix => det(P) in {-1, 1} => 1/det(P) = det(P).
# LUdecomposition_Simple() returns a list of row exchange index pairs, rather
# than a permutation matrix, but det(P) = (-1)**len(row_swaps).
# Avoid forming the potentially time consuming product of U's diagonal entries
# if the product is zero.
# Bottom right entry of U is 0 => det(A) = 0.
# It may be impossible to determine if this entry of U is zero when it is symbolic.
if iszerofunc(lu[lu.rows-1, lu.rows-1]):
return M.zero
# Compute det(P)
det = -M.one if len(row_swaps)%2 else M.one
# Compute det(U) by calculating the product of U's diagonal entries.
# The upper triangular portion of lu is the upper triangular portion of the
# U factor in the LU decomposition.
for k in range(lu.rows):
det *= lu[k, k]
# return det(P)*det(U)
return det
def _minor(M, i, j, method="berkowitz"):
"""Return the (i,j) minor of ``M``. That is,
return the determinant of the matrix obtained by deleting
the `i`th row and `j`th column from ``M``.
Parameters
==========
i, j : int
The row and column to exclude to obtain the submatrix.
method : string, optional
Method to use to find the determinant of the submatrix, can be
"bareiss", "berkowitz" or "lu".
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> M.minor(1, 1)
-12
See Also
========
minor_submatrix
cofactor
det
"""
if not M.is_square:
raise NonSquareMatrixError()
return M.minor_submatrix(i, j).det(method=method)
def _minor_submatrix(M, i, j):
"""Return the submatrix obtained by removing the `i`th row
and `j`th column from ``M`` (works with Pythonic negative indices).
Parameters
==========
i, j : int
The row and column to exclude to obtain the submatrix.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> M.minor_submatrix(1, 1)
Matrix([
[1, 3],
[7, 9]])
See Also
========
minor
cofactor
"""
if i < 0:
i += M.rows
if j < 0:
j += M.cols
if not 0 <= i < M.rows or not 0 <= j < M.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < ``M.rows`` "
"(%d)" % M.rows + "and 0 <= j < ``M.cols`` (%d)." % M.cols)
rows = [a for a in range(M.rows) if a != i]
cols = [a for a in range(M.cols) if a != j]
return M.extract(rows, cols)
|
80351089d8392f32e7e2f3f69bd50553eeb9a9d23f7bbba30718cc56b2dea547 | from __future__ import division, print_function
from sympy.core.numbers import mod_inverse
from .common import MatrixError, NonSquareMatrixError, NonInvertibleMatrixError
from .utilities import _iszero
def _pinv_full_rank(M):
"""Subroutine for full row or column rank matrices.
For full row rank matrices, inverse of ``A * A.H`` Exists.
For full column rank matrices, inverse of ``A.H * A`` Exists.
This routine can apply for both cases by checking the shape
and have small decision.
"""
if M.is_zero_matrix:
return M.H
if M.rows >= M.cols:
return M.H.multiply(M).inv().multiply(M.H)
else:
return M.H.multiply(M.multiply(M.H).inv())
def _pinv_rank_decomposition(M):
"""Subroutine for rank decomposition
With rank decompositions, `A` can be decomposed into two full-
rank matrices, and each matrix can take pseudoinverse
individually.
"""
if M.is_zero_matrix:
return M.H
B, C = M.rank_decomposition()
Bp = _pinv_full_rank(B)
Cp = _pinv_full_rank(C)
return Cp.multiply(Bp)
def _pinv_diagonalization(M):
"""Subroutine using diagonalization
This routine can sometimes fail if SymPy's eigenvalue
computation is not reliable.
"""
if M.is_zero_matrix:
return M.H
A = M
AH = M.H
try:
if M.rows >= M.cols:
P, D = AH.multiply(A).diagonalize(normalize=True)
D_pinv = D.applyfunc(lambda x: 0 if _iszero(x) else 1 / x)
return P.multiply(D_pinv).multiply(P.H).multiply(AH)
else:
P, D = A.multiply(AH).diagonalize(
normalize=True)
D_pinv = D.applyfunc(lambda x: 0 if _iszero(x) else 1 / x)
return AH.multiply(P).multiply(D_pinv).multiply(P.H)
except MatrixError:
raise NotImplementedError(
'pinv for rank-deficient matrices where '
'diagonalization of A.H*A fails is not supported yet.')
def _pinv(M, method='RD'):
"""Calculate the Moore-Penrose pseudoinverse of the matrix.
The Moore-Penrose pseudoinverse exists and is unique for any matrix.
If the matrix is invertible, the pseudoinverse is the same as the
inverse.
Parameters
==========
method : String, optional
Specifies the method for computing the pseudoinverse.
If ``'RD'``, Rank-Decomposition will be used.
If ``'ED'``, Diagonalization will be used.
Examples
========
Computing pseudoinverse by rank decomposition :
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A.pinv()
Matrix([
[-17/18, 4/9],
[ -1/9, 1/9],
[ 13/18, -2/9]])
Computing pseudoinverse by diagonalization :
>>> B = A.pinv(method='ED')
>>> B.simplify()
>>> B
Matrix([
[-17/18, 4/9],
[ -1/9, 1/9],
[ 13/18, -2/9]])
See Also
========
inv
pinv_solve
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse
"""
# Trivial case: pseudoinverse of all-zero matrix is its transpose.
if M.is_zero_matrix:
return M.H
if method == 'RD':
return _pinv_rank_decomposition(M)
elif method == 'ED':
return _pinv_diagonalization(M)
else:
raise ValueError('invalid pinv method %s' % repr(method))
def _inv_mod(M, m):
r"""
Returns the inverse of the matrix `K` (mod `m`), if it exists.
Method to find the matrix inverse of `K` (mod `m`) implemented in this function:
* Compute `\mathrm{adj}(K) = \mathrm{cof}(K)^t`, the adjoint matrix of `K`.
* Compute `r = 1/\mathrm{det}(K) \pmod m`.
* `K^{-1} = r\cdot \mathrm{adj}(K) \pmod m`.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.inv_mod(5)
Matrix([
[3, 1],
[4, 2]])
>>> A.inv_mod(3)
Matrix([
[1, 1],
[0, 1]])
"""
if not M.is_square:
raise NonSquareMatrixError()
N = M.cols
det_K = M.det()
det_inv = None
try:
det_inv = mod_inverse(det_K, m)
except ValueError:
raise NonInvertibleMatrixError('Matrix is not invertible (mod %d)' % m)
K_adj = M.adjugate()
K_inv = M.__class__(N, N,
[det_inv * K_adj[i, j] % m for i in range(N) for j in range(N)])
return K_inv
def _verify_invertible(M, iszerofunc=_iszero):
"""Initial check to see if a matrix is invertible. Raises or returns
determinant for use in _inv_ADJ."""
if not M.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
d = M.det(method='berkowitz')
zero = d.equals(0)
if zero is None: # if equals() can't decide, will rref be able to?
ok = M.rref(simplify=True)[0]
zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
if zero:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")
return d
def _inv_ADJ(M, iszerofunc=_iszero):
"""Calculates the inverse using the adjugate matrix and a determinant.
See Also
========
inv
inverse_GE
inverse_LU
inverse_CH
inverse_LDL
"""
d = _verify_invertible(M, iszerofunc=iszerofunc)
return M.adjugate() / d
def _inv_GE(M, iszerofunc=_iszero):
"""Calculates the inverse using Gaussian elimination.
See Also
========
inv
inverse_ADJ
inverse_LU
inverse_CH
inverse_LDL
"""
from .dense import Matrix
if not M.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
big = Matrix.hstack(M.as_mutable(), Matrix.eye(M.rows))
red = big.rref(iszerofunc=iszerofunc, simplify=True)[0]
if any(iszerofunc(red[j, j]) for j in range(red.rows)):
raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")
return M._new(red[:, big.rows:])
def _inv_LU(M, iszerofunc=_iszero):
"""Calculates the inverse using LU decomposition.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_CH
inverse_LDL
"""
_verify_invertible(M, iszerofunc=iszerofunc)
return M.LUsolve(M.eye(M.rows), iszerofunc=_iszero)
def _inv_CH(M, iszerofunc=_iszero):
"""Calculates the inverse using cholesky decomposition.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_LU
inverse_LDL
"""
_verify_invertible(M, iszerofunc=iszerofunc)
return M.cholesky_solve(M.eye(M.rows))
def _inv_LDL(M, iszerofunc=_iszero):
"""Calculates the inverse using LDL decomposition.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_LU
inverse_CH
"""
_verify_invertible(M, iszerofunc=iszerofunc)
return M.LDLsolve(M.eye(M.rows))
def _inv_QR(M, iszerofunc=_iszero):
"""Calculates the inverse using QR decomposition.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_CH
inverse_LDL
"""
_verify_invertible(M, iszerofunc=iszerofunc)
return M.QRsolve(M.eye(M.rows))
def _inv_block(M, iszerofunc=_iszero):
"""Calculates the inverse using BLOCKWISE inversion.
See Also
========
inv
inverse_ADJ
inverse_GE
inverse_CH
inverse_LDL
"""
from sympy import BlockMatrix
i = M.shape[0]
if i <= 20 :
return M.inv(method="LU", iszerofunc=_iszero)
A = M[:i // 2, :i //2]
B = M[:i // 2, i // 2:]
C = M[i // 2:, :i // 2]
D = M[i // 2:, i // 2:]
try:
D_inv = _inv_block(D)
except NonInvertibleMatrixError:
return M.inv(method="LU", iszerofunc=_iszero)
B_D_i = B*D_inv
BDC = B_D_i*C
A_n = A - BDC
try:
A_n = _inv_block(A_n)
except NonInvertibleMatrixError:
return M.inv(method="LU", iszerofunc=_iszero)
B_n = -A_n*B_D_i
dc = D_inv*C
C_n = -dc*A_n
D_n = D_inv + dc*-B_n
nn = BlockMatrix([[A_n, B_n], [C_n, D_n]]).as_explicit()
return nn
def _inv(M, method=None, iszerofunc=_iszero, try_block_diag=False):
"""
Return the inverse of a matrix using the method indicated. Default for
dense matrices is is Gauss elimination, default for sparse matrices is LDL.
Parameters
==========
method : ('GE', 'LU', 'ADJ', 'CH', 'LDL')
iszerofunc : function, optional
Zero-testing function to use.
try_block_diag : bool, optional
If True then will try to form block diagonal matrices using the
method get_diag_blocks(), invert these individually, and then
reconstruct the full inverse matrix.
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix([
... [ 2, -1, 0],
... [-1, 2, -1],
... [ 0, 0, 2]])
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv(method='LDL') # use of 'method=' is optional
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A * _
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> A = Matrix(A)
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv('ADJ') == A.inv('GE') == A.inv('LU') == A.inv('CH') == A.inv('LDL') == A.inv('QR')
True
Notes
=====
According to the ``method`` keyword, it calls the appropriate method:
GE .... inverse_GE(); default for dense matrices
LU .... inverse_LU()
ADJ ... inverse_ADJ()
CH ... inverse_CH()
LDL ... inverse_LDL(); default for sparse matrices
QR ... inverse_QR()
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the ``iszerofunc`` argument to a function that
should return True if its argument is zero. The ADJ routine computes
the determinant and uses that to detect singular matrices in addition
to testing for zeros on the diagonal.
See Also
========
inverse_ADJ
inverse_GE
inverse_LU
inverse_CH
inverse_LDL
Raises
======
ValueError
If the determinant of the matrix is zero.
"""
from sympy.matrices import diag, SparseMatrix
if method is None:
method = 'LDL' if isinstance(M, SparseMatrix) else 'GE'
if try_block_diag:
blocks = M.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return diag(*r)
if method == "GE":
rv = M.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
rv = M.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
rv = M.inverse_ADJ(iszerofunc=iszerofunc)
elif method == "CH":
rv = M.inverse_CH(iszerofunc=iszerofunc)
elif method == "LDL":
rv = M.inverse_LDL(iszerofunc=iszerofunc)
elif method == "QR":
rv = M.inverse_QR(iszerofunc=iszerofunc)
elif method == "BLOCK":
rv = M.inverse_BLOCK(iszerofunc=iszerofunc)
else:
raise ValueError("Inversion method unrecognized")
return M._new(rv)
|
8f646d94e99c1d2f6e77849e4ac9924d354b73d5e99e7814f3b44cab6e766c27 | """
Solution of equations using dense matrices.
The dense matrix is stored as a list of lists.
"""
import copy
from sympy.core.power import isqrt
from sympy.core.symbol import symbols
from sympy.matrices.densetools import (
augment, col, conjugate_transpose, eye, rowadd, rowmul)
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
feature="densesolve",
issue=12695,
deprecated_since_version="1.1").warn()
def row_echelon(matlist, K):
"""
Returns the row echelon form of a matrix with diagonal elements
reduced to 1.
Examples
========
>>> from sympy.matrices.densesolve import row_echelon
>>> from sympy import QQ
>>> a = [
... [QQ(3), QQ(7), QQ(4)],
... [QQ(2), QQ(4), QQ(5)],
... [QQ(6), QQ(2), QQ(3)]]
>>> row_echelon(a, QQ)
[[1, 7/3, 4/3], [0, 1, -7/2], [0, 0, 1]]
See Also
========
rref
"""
result_matlist = copy.deepcopy(matlist)
nrow = len(result_matlist)
for i in range(nrow):
if (result_matlist[i][i] != 1 and result_matlist[i][i] != 0):
rowmul(result_matlist, i, 1/result_matlist[i][i], K)
for j in range(i + 1, nrow):
if (result_matlist[j][i] != 0):
rowadd(result_matlist, j, i, -result_matlist[j][i], K)
return result_matlist
def rref(matlist, K):
"""
Returns the reduced row echelon form of a Matrix.
Examples
========
>>> from sympy.matrices.densesolve import rref
>>> from sympy import QQ
>>> a = [
... [QQ(1), QQ(2), QQ(1)],
... [QQ(-2), QQ(-3), QQ(1)],
... [QQ(3), QQ(5), QQ(0)]]
>>> rref(a, QQ)
[[1, 0, -5], [0, 1, 3], [0, 0, 0]]
See Also
========
row_echelon
"""
result_matlist = copy.deepcopy(matlist)
result_matlist = row_echelon(result_matlist, K)
nrow = len(result_matlist)
for i in range(nrow):
if result_matlist[i][i] == 1:
for j in range(i):
rowadd(result_matlist, j, i, -result_matlist[j][i], K)
return result_matlist
def LU(matlist, K, reverse = 0):
"""
It computes the LU decomposition of a matrix and returns L and U
matrices.
Examples
========
>>> from sympy.matrices.densesolve import LU
>>> from sympy import QQ
>>> a = [
... [QQ(1), QQ(2), QQ(3)],
... [QQ(2), QQ(-4), QQ(6)],
... [QQ(3), QQ(-9), QQ(-3)]]
>>> LU(a, QQ)
([[1, 0, 0], [2, 1, 0], [3, 15/8, 1]], [[1, 2, 3], [0, -8, 0], [0, 0, -12]])
See Also
========
upper_triangle
lower_triangle
"""
nrow = len(matlist)
new_matlist1, new_matlist2 = eye(nrow, K), copy.deepcopy(matlist)
for i in range(nrow):
for j in range(i + 1, nrow):
if (new_matlist2[j][i] != 0):
new_matlist1[j][i] = new_matlist2[j][i]/new_matlist2[i][i]
rowadd(new_matlist2, j, i, -new_matlist2[j][i]/new_matlist2[i][i], K)
return new_matlist1, new_matlist2
def cholesky(matlist, K):
"""
Performs the cholesky decomposition of a Hermitian matrix and
returns L and it's conjugate transpose.
Examples
========
>>> from sympy.matrices.densesolve import cholesky
>>> from sympy import QQ
>>> cholesky([[QQ(25), QQ(15), QQ(-5)], [QQ(15), QQ(18), QQ(0)], [QQ(-5), QQ(0), QQ(11)]], QQ)
([[5, 0, 0], [3, 3, 0], [-1, 1, 3]], [[5, 3, -1], [0, 3, 1], [0, 0, 3]])
See Also
========
cholesky_solve
"""
new_matlist = copy.deepcopy(matlist)
nrow = len(new_matlist)
L = eye(nrow, K)
for i in range(nrow):
for j in range(i + 1):
a = K.zero
for k in range(j):
a += L[i][k]*L[j][k]
if i == j:
L[i][j] = isqrt(new_matlist[i][j] - a)
else:
L[i][j] = (new_matlist[i][j] - a)/L[j][j]
return L, conjugate_transpose(L, K)
def LDL(matlist, K):
"""
Performs the LDL decomposition of a hermitian matrix and returns L, D and
transpose of L. Only applicable to rational entries.
Examples
========
>>> from sympy.matrices.densesolve import LDL
>>> from sympy import QQ
>>> a = [
... [QQ(4), QQ(12), QQ(-16)],
... [QQ(12), QQ(37), QQ(-43)],
... [QQ(-16), QQ(-43), QQ(98)]]
>>> LDL(a, QQ)
([[1, 0, 0], [3, 1, 0], [-4, 5, 1]], [[4, 0, 0], [0, 1, 0], [0, 0, 9]], [[1, 3, -4], [0, 1, 5], [0, 0, 1]])
"""
new_matlist = copy.deepcopy(matlist)
nrow = len(new_matlist)
L, D = eye(nrow, K), eye(nrow, K)
for i in range(nrow):
for j in range(i + 1):
a = K.zero
for k in range(j):
a += L[i][k]*L[j][k]*D[k][k]
if i == j:
D[j][j] = new_matlist[j][j] - a
else:
L[i][j] = (new_matlist[i][j] - a)/D[j][j]
return L, D, conjugate_transpose(L, K)
def upper_triangle(matlist, K):
"""
Transforms a given matrix to an upper triangle matrix by performing
row operations on it.
Examples
========
>>> from sympy.matrices.densesolve import upper_triangle
>>> from sympy import QQ
>>> a = [
... [QQ(4,1), QQ(12,1), QQ(-16,1)],
... [QQ(12,1), QQ(37,1), QQ(-43,1)],
... [QQ(-16,1), QQ(-43,1), QQ(98,1)]]
>>> upper_triangle(a, QQ)
[[4, 12, -16], [0, 1, 5], [0, 0, 9]]
See Also
========
LU
"""
copy_matlist = copy.deepcopy(matlist)
lower_triangle, upper_triangle = LU(copy_matlist, K)
return upper_triangle
def lower_triangle(matlist, K):
"""
Transforms a given matrix to a lower triangle matrix by performing
row operations on it.
Examples
========
>>> from sympy.matrices.densesolve import lower_triangle
>>> from sympy import QQ
>>> a = [
... [QQ(4,1), QQ(12,1), QQ(-16)],
... [QQ(12,1), QQ(37,1), QQ(-43,1)],
... [QQ(-16,1), QQ(-43,1), QQ(98,1)]]
>>> lower_triangle(a, QQ)
[[1, 0, 0], [3, 1, 0], [-4, 5, 1]]
See Also
========
LU
"""
copy_matlist = copy.deepcopy(matlist)
lower_triangle, upper_triangle = LU(copy_matlist, K, reverse = 1)
return lower_triangle
def rref_solve(matlist, variable, constant, K):
"""
Solves a system of equations using reduced row echelon form given
a matrix of coefficients, a vector of variables and a vector of constants.
Examples
========
>>> from sympy.matrices.densesolve import rref_solve
>>> from sympy import QQ
>>> from sympy import Dummy
>>> x, y, z = Dummy('x'), Dummy('y'), Dummy('z')
>>> coefficients = [
... [QQ(25), QQ(15), QQ(-5)],
... [QQ(15), QQ(18), QQ(0)],
... [QQ(-5), QQ(0), QQ(11)]]
>>> constants = [
... [QQ(2)],
... [QQ(3)],
... [QQ(1)]]
>>> variables = [
... [x],
... [y],
... [z]]
>>> rref_solve(coefficients, variables, constants, QQ)
[[-1/225], [23/135], [4/45]]
See Also
========
row_echelon
augment
"""
new_matlist = copy.deepcopy(matlist)
augmented = augment(new_matlist, constant, K)
solution = rref(augmented, K)
return col(solution, -1)
def LU_solve(matlist, variable, constant, K):
"""
Solves a system of equations using LU decomposition given a matrix
of coefficients, a vector of variables and a vector of constants.
Examples
========
>>> from sympy.matrices.densesolve import LU_solve
>>> from sympy import QQ
>>> from sympy import Dummy
>>> x, y, z = Dummy('x'), Dummy('y'), Dummy('z')
>>> coefficients = [
... [QQ(2), QQ(-1), QQ(-2)],
... [QQ(-4), QQ(6), QQ(3)],
... [QQ(-4), QQ(-2), QQ(8)]]
>>> variables = [
... [x],
... [y],
... [z]]
>>> constants = [
... [QQ(-1)],
... [QQ(13)],
... [QQ(-6)]]
>>> LU_solve(coefficients, variables, constants, QQ)
[[2], [3], [1]]
See Also
========
LU
forward_substitution
backward_substitution
"""
new_matlist = copy.deepcopy(matlist)
nrow = len(new_matlist)
L, U = LU(new_matlist, K)
y = [[i] for i in symbols('y:%i' % nrow)]
forward_substitution(L, y, constant, K)
backward_substitution(U, variable, y, K)
return variable
def cholesky_solve(matlist, variable, constant, K):
"""
Solves a system of equations using Cholesky decomposition given
a matrix of coefficients, a vector of variables and a vector of constants.
Examples
========
>>> from sympy.matrices.densesolve import cholesky_solve
>>> from sympy import QQ
>>> from sympy import Dummy
>>> x, y, z = Dummy('x'), Dummy('y'), Dummy('z')
>>> coefficients = [
... [QQ(25), QQ(15), QQ(-5)],
... [QQ(15), QQ(18), QQ(0)],
... [QQ(-5), QQ(0), QQ(11)]]
>>> variables = [
... [x],
... [y],
... [z]]
>>> coefficients = [
... [QQ(2)],
... [QQ(3)],
... [QQ(1)]]
>>> cholesky_solve([[QQ(25), QQ(15), QQ(-5)], [QQ(15), QQ(18), QQ(0)], [QQ(-5), QQ(0), QQ(11)]], [[x], [y], [z]], [[QQ(2)], [QQ(3)], [QQ(1)]], QQ)
[[-1/225], [23/135], [4/45]]
See Also
========
cholesky
forward_substitution
backward_substitution
"""
new_matlist = copy.deepcopy(matlist)
nrow = len(new_matlist)
L, Lstar = cholesky(new_matlist, K)
y = [[i] for i in symbols('y:%i' % nrow)]
forward_substitution(L, y, constant, K)
backward_substitution(Lstar, variable, y, K)
return variable
def forward_substitution(lower_triangle, variable, constant, K):
"""
Performs forward substitution given a lower triangular matrix, a
vector of variables and a vector of constants.
Examples
========
>>> from sympy.matrices.densesolve import forward_substitution
>>> from sympy import QQ
>>> from sympy import Dummy
>>> x, y, z = Dummy('x'), Dummy('y'), Dummy('z')
>>> a = [
... [QQ(1), QQ(0), QQ(0)],
... [QQ(-2), QQ(1), QQ(0)],
... [QQ(-2), QQ(-1), QQ(1)]]
>>> variables = [
... [x],
... [y],
... [z]]
>>> constants = [
... [QQ(-1)],
... [QQ(13)],
... [QQ(-6)]]
>>> forward_substitution(a, variables, constants, QQ)
[[-1], [11], [3]]
See Also
========
LU_solve
cholesky_solve
"""
copy_lower_triangle = copy.deepcopy(lower_triangle)
nrow = len(copy_lower_triangle)
for i in range(nrow):
a = K.zero
for j in range(i):
a += copy_lower_triangle[i][j]*variable[j][0]
variable[i][0] = (constant[i][0] - a)/copy_lower_triangle[i][i]
return variable
def backward_substitution(upper_triangle, variable, constant, K):
"""
Performs forward substitution given a lower triangular matrix,
a vector of variables and a vector constants.
Examples
========
>>> from sympy.matrices.densesolve import backward_substitution
>>> from sympy import QQ
>>> from sympy import Dummy
>>> x, y, z = Dummy('x'), Dummy('y'), Dummy('z')
>>> a = [
... [QQ(2), QQ(-1), QQ(-2)],
... [QQ(0), QQ(4), QQ(-1)],
... [QQ(0), QQ(0), QQ(3)]]
>>> variables = [
... [x],
... [y],
... [z]]
>>> constants = [
... [QQ(-1)],
... [QQ(11)],
... [QQ(3)]]
>>> backward_substitution(a, variables, constants, QQ)
[[2], [3], [1]]
See Also
========
LU_solve
cholesky_solve
"""
copy_upper_triangle = copy.deepcopy(upper_triangle)
nrow = len(copy_upper_triangle)
for i in reversed(range(nrow)):
a = K.zero
for j in reversed(range(i + 1, nrow)):
a += copy_upper_triangle[i][j]*variable[j][0]
variable[i][0] = (constant[i][0] - a)/copy_upper_triangle[i][i]
return variable
|
fbead3c2e19dc6187676135f00970d1cc4664e880ce9eac315a51b3e069e9feb | from __future__ import division, print_function
from types import FunctionType
from sympy.simplify.simplify import (
simplify as _simplify, dotprodsimp as _dotprodsimp)
from .utilities import _get_intermediate_simp, _iszero
from .determinant import _find_reasonable_pivot
def _row_reduce_list(mat, rows, cols, one, iszerofunc, simpfunc,
normalize_last=True, normalize=True, zero_above=True):
"""Row reduce a flat list representation of a matrix and return a tuple
(rref_matrix, pivot_cols, swaps) where ``rref_matrix`` is a flat list,
``pivot_cols`` are the pivot columns and ``swaps`` are any row swaps that
were used in the process of row reduction.
Parameters
==========
mat : list
list of matrix elements, must be ``rows`` * ``cols`` in length
rows, cols : integer
number of rows and columns in flat list representation
one : SymPy object
represents the value one, from ``Matrix.one``
iszerofunc : determines if an entry can be used as a pivot
simpfunc : used to simplify elements and test if they are
zero if ``iszerofunc`` returns `None`
normalize_last : indicates where all row reduction should
happen in a fraction-free manner and then the rows are
normalized (so that the pivots are 1), or whether
rows should be normalized along the way (like the naive
row reduction algorithm)
normalize : whether pivot rows should be normalized so that
the pivot value is 1
zero_above : whether entries above the pivot should be zeroed.
If ``zero_above=False``, an echelon matrix will be returned.
"""
def get_col(i):
return mat[i::cols]
def row_swap(i, j):
mat[i*cols:(i + 1)*cols], mat[j*cols:(j + 1)*cols] = \
mat[j*cols:(j + 1)*cols], mat[i*cols:(i + 1)*cols]
def cross_cancel(a, i, b, j):
"""Does the row op row[i] = a*row[i] - b*row[j]"""
q = (j - i)*cols
for p in range(i*cols, (i + 1)*cols):
mat[p] = isimp(a*mat[p] - b*mat[p + q])
isimp = _get_intermediate_simp(_dotprodsimp)
piv_row, piv_col = 0, 0
pivot_cols = []
swaps = []
# use a fraction free method to zero above and below each pivot
while piv_col < cols and piv_row < rows:
pivot_offset, pivot_val, \
assumed_nonzero, newly_determined = _find_reasonable_pivot(
get_col(piv_col)[piv_row:], iszerofunc, simpfunc)
# _find_reasonable_pivot may have simplified some things
# in the process. Let's not let them go to waste
for (offset, val) in newly_determined:
offset += piv_row
mat[offset*cols + piv_col] = val
if pivot_offset is None:
piv_col += 1
continue
pivot_cols.append(piv_col)
if pivot_offset != 0:
row_swap(piv_row, pivot_offset + piv_row)
swaps.append((piv_row, pivot_offset + piv_row))
# if we aren't normalizing last, we normalize
# before we zero the other rows
if normalize_last is False:
i, j = piv_row, piv_col
mat[i*cols + j] = one
for p in range(i*cols + j + 1, (i + 1)*cols):
mat[p] = isimp(mat[p] / pivot_val)
# after normalizing, the pivot value is 1
pivot_val = one
# zero above and below the pivot
for row in range(rows):
# don't zero our current row
if row == piv_row:
continue
# don't zero above the pivot unless we're told.
if zero_above is False and row < piv_row:
continue
# if we're already a zero, don't do anything
val = mat[row*cols + piv_col]
if iszerofunc(val):
continue
cross_cancel(pivot_val, row, val, piv_row)
piv_row += 1
# normalize each row
if normalize_last is True and normalize is True:
for piv_i, piv_j in enumerate(pivot_cols):
pivot_val = mat[piv_i*cols + piv_j]
mat[piv_i*cols + piv_j] = one
for p in range(piv_i*cols + piv_j + 1, (piv_i + 1)*cols):
mat[p] = isimp(mat[p] / pivot_val)
return mat, tuple(pivot_cols), tuple(swaps)
# This functions is a candidate for caching if it gets implemented for matrices.
def _row_reduce(M, iszerofunc, simpfunc, normalize_last=True,
normalize=True, zero_above=True):
mat, pivot_cols, swaps = _row_reduce_list(list(M), M.rows, M.cols, M.one,
iszerofunc, simpfunc, normalize_last=normalize_last,
normalize=normalize, zero_above=zero_above)
return M._new(M.rows, M.cols, mat), pivot_cols, swaps
def _is_echelon(M, iszerofunc=_iszero):
"""Returns `True` if the matrix is in echelon form. That is, all rows of
zeros are at the bottom, and below each leading non-zero in a row are
exclusively zeros."""
if M.rows <= 0 or M.cols <= 0:
return True
zeros_below = all(iszerofunc(t) for t in M[1:, 0])
if iszerofunc(M[0, 0]):
return zeros_below and _is_echelon(M[:, 1:], iszerofunc)
return zeros_below and _is_echelon(M[1:, 1:], iszerofunc)
def _echelon_form(M, iszerofunc=_iszero, simplify=False, with_pivots=False):
"""Returns a matrix row-equivalent to ``M`` that is in echelon form. Note
that echelon form of a matrix is *not* unique, however, properties like the
row space and the null space are preserved.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> M.echelon_form()
Matrix([
[1, 2],
[0, -2]])
"""
simpfunc = simplify if isinstance(simplify, FunctionType) else _simplify
mat, pivots, _ = _row_reduce(M, iszerofunc, simpfunc,
normalize_last=True, normalize=False, zero_above=False)
if with_pivots:
return mat, pivots
return mat
# This functions is a candidate for caching if it gets implemented for matrices.
def _rank(M, iszerofunc=_iszero, simplify=False):
"""Returns the rank of a matrix.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rank()
2
>>> n = Matrix(3, 3, range(1, 10))
>>> n.rank()
2
"""
def _permute_complexity_right(M, iszerofunc):
"""Permute columns with complicated elements as
far right as they can go. Since the ``sympy`` row reduction
algorithms start on the left, having complexity right-shifted
speeds things up.
Returns a tuple (mat, perm) where perm is a permutation
of the columns to perform to shift the complex columns right, and mat
is the permuted matrix."""
def complexity(i):
# the complexity of a column will be judged by how many
# element's zero-ness cannot be determined
return sum(1 if iszerofunc(e) is None else 0 for e in M[:, i])
complex = [(complexity(i), i) for i in range(M.cols)]
perm = [j for (i, j) in sorted(complex)]
return (M.permute(perm, orientation='cols'), perm)
simpfunc = simplify if isinstance(simplify, FunctionType) else _simplify
# for small matrices, we compute the rank explicitly
# if is_zero on elements doesn't answer the question
# for small matrices, we fall back to the full routine.
if M.rows <= 0 or M.cols <= 0:
return 0
if M.rows <= 1 or M.cols <= 1:
zeros = [iszerofunc(x) for x in M]
if False in zeros:
return 1
if M.rows == 2 and M.cols == 2:
zeros = [iszerofunc(x) for x in M]
if not False in zeros and not None in zeros:
return 0
d = M.det()
if iszerofunc(d) and False in zeros:
return 1
if iszerofunc(d) is False:
return 2
mat, _ = _permute_complexity_right(M, iszerofunc=iszerofunc)
_, pivots, _ = _row_reduce(mat, iszerofunc, simpfunc, normalize_last=True,
normalize=False, zero_above=False)
return len(pivots)
def _rref(M, iszerofunc=_iszero, simplify=False, pivots=True,
normalize_last=True):
"""Return reduced row-echelon form of matrix and indices of pivot vars.
Parameters
==========
iszerofunc : Function
A function used for detecting whether an element can
act as a pivot. ``lambda x: x.is_zero`` is used by default.
simplify : Function
A function used to simplify elements when looking for a pivot.
By default SymPy's ``simplify`` is used.
pivots : True or False
If ``True``, a tuple containing the row-reduced matrix and a tuple
of pivot columns is returned. If ``False`` just the row-reduced
matrix is returned.
normalize_last : True or False
If ``True``, no pivots are normalized to `1` until after all
entries above and below each pivot are zeroed. This means the row
reduction algorithm is fraction free until the very last step.
If ``False``, the naive row reduction procedure is used where
each pivot is normalized to be `1` before row operations are
used to zero above and below the pivot.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rref()
(Matrix([
[1, 0],
[0, 1]]), (0, 1))
>>> rref_matrix, rref_pivots = m.rref()
>>> rref_matrix
Matrix([
[1, 0],
[0, 1]])
>>> rref_pivots
(0, 1)
Notes
=====
The default value of ``normalize_last=True`` can provide significant
speedup to row reduction, especially on matrices with symbols. However,
if you depend on the form row reduction algorithm leaves entries
of the matrix, set ``noramlize_last=False``
"""
simpfunc = simplify if isinstance(simplify, FunctionType) else _simplify
mat, pivot_cols, _ = _row_reduce(M, iszerofunc, simpfunc,
normalize_last, normalize=True, zero_above=True)
if pivots:
mat = (mat, pivot_cols)
return mat
|
105bd4d43bd98a6cb6e994e4ec2354c351b6cbfe82b5a0c3e996038d5192d9c2 | """
Fundamental operations of dense matrices.
The dense matrix is stored as a list of lists
"""
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
feature="densetools",
issue=12695,
deprecated_since_version="1.1").warn()
def trace(matlist, K):
"""
Returns the trace of a matrix.
Examples
========
>>> from sympy.matrices.densetools import trace, eye
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> b = eye(4, ZZ)
>>> trace(a, ZZ)
10
>>> trace(b, ZZ)
4
"""
result = K.zero
for i in range(len(matlist)):
result += matlist[i][i]
return result
def transpose(matlist, K):
"""
Returns the transpose of a matrix
Examples
========
>>> from sympy.matrices.densetools import transpose
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> transpose(a, ZZ)
[[3, 2, 6], [7, 4, 2], [4, 5, 3]]
"""
return [list(a) for a in (zip(*matlist))]
def conjugate(matlist, K):
"""
Returns the conjugate of a matrix row-wise.
Examples
========
>>> from sympy.matrices.densetools import conjugate
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(2), ZZ(6)],
... [ZZ(7), ZZ(4), ZZ(2)],
... [ZZ(4), ZZ(5), ZZ(3)]]
>>> conjugate(a, ZZ)
[[3, 2, 6], [7, 4, 2], [4, 5, 3]]
See Also
========
conjugate_row
"""
return [conjugate_row(row, K) for row in matlist]
def conjugate_row(row, K):
"""
Returns the conjugate of a row element-wise
Examples
========
>>> from sympy.matrices.densetools import conjugate_row
>>> from sympy import ZZ
>>> a = [ZZ(3), ZZ(2), ZZ(6)]
>>> conjugate_row(a, ZZ)
[3, 2, 6]
"""
result = []
for r in row:
conj = getattr(r, 'conjugate', None)
if conj is not None:
conjrow = conj()
else:
conjrow = r
result.append(conjrow)
return result
def conjugate_transpose(matlist, K):
"""
Returns the conjugate-transpose of a matrix
Examples
========
>>> from sympy import ZZ
>>> from sympy.matrices.densetools import conjugate_transpose
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> conjugate_transpose(a, ZZ)
[[3, 2, 6], [7, 4, 2], [4, 5, 3]]
"""
return conjugate(transpose(matlist, K), K)
def augment(matlist, column, K):
"""
Augments a matrix and a column.
Examples
========
>>> from sympy.matrices.densetools import augment
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> b = [
... [ZZ(4)],
... [ZZ(5)],
... [ZZ(6)]]
>>> augment(a, b, ZZ)
[[3, 7, 4, 4], [2, 4, 5, 5], [6, 2, 3, 6]]
"""
return [row + element for row, element in zip(matlist, column)]
def eye(n, K):
"""
Returns an identity matrix of size n.
Examples
========
>>> from sympy.matrices.densetools import eye
>>> from sympy import ZZ
>>> eye(3, ZZ)
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
"""
result = []
for i in range(n):
result.append([])
for j in range(n):
if (i == j):
result[i].append(K(1))
else:
result[i].append(K.zero)
return result
def row(matlist, i):
"""
Returns the ith row of a matrix
Examples
========
>>> from sympy.matrices.densetools import row
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> row(a, 2)
[6, 2, 3]
"""
return matlist[i]
def col(matlist, i):
"""
Returns the ith column of a matrix
Note: Currently very expensive
Examples
========
>>> from sympy.matrices.densetools import col
>>> from sympy import ZZ
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> col(a, 1)
[[7], [4], [2]]
"""
matcol = [list(l) for l in zip(*matlist)]
return [[l] for l in matcol[i]]
def rowswap(matlist, index1, index2, K):
"""
Returns the matrix with index1 row and index2 row swapped
"""
matlist[index1], matlist[index2] = matlist[index2], matlist[index1]
return matlist
def rowmul(matlist, index, k, K):
"""
Multiplies index row with k
"""
for i in range(len(matlist[index])):
matlist[index][i] = k*matlist[index][i]
return matlist
def rowadd(matlist, index1, index2 , k, K):
"""
Adds the index1 row with index2 row which in turn is multiplied by k
"""
for i in range(len(matlist[index1])):
matlist[index1][i] = (matlist[index1][i] + k*matlist[index2][i])
return matlist
def isHermitian(matlist, K):
"""
Checks whether matrix is hermitian
Examples
========
>>> from sympy.matrices.densetools import isHermitian
>>> from sympy import QQ
>>> a = [
... [QQ(2,1), QQ(-1,1), QQ(-1,1)],
... [QQ(0,1), QQ(4,1), QQ(-1,1)],
... [QQ(0,1), QQ(0,1), QQ(3,1)]]
>>> isHermitian(a, QQ)
False
"""
return conjugate_transpose(matlist, K) == matlist
|
89ebaebde2c98303b53f62f1244a32337f8ff970f0ed3e6d4bd4897b9ce97b8c | """
Fundamental arithmetic of dense matrices. The dense matrix is stored
as a list of lists.
"""
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
feature="densearith",
issue=12695,
deprecated_since_version="1.1").warn()
def add(matlist1, matlist2, K):
"""
Adds matrices row-wise.
Examples
========
>>> from sympy.matrices.densearith import add
>>> from sympy import ZZ
>>> e = [
... [ZZ(12), ZZ(78)],
... [ZZ(56), ZZ(79)]]
>>> f = [
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]]
>>> g = [
... [ZZ.zero, ZZ.zero],
... [ZZ.zero, ZZ.zero]]
>>> add(e, f, ZZ)
[[13, 80], [59, 83]]
>>> add(f, g, ZZ)
[[1, 2], [3, 4]]
See Also
========
addrow
"""
return [addrow(row1, row2, K) for row1, row2 in zip(matlist1, matlist2)]
def addrow(row1, row2, K):
"""
Adds two rows of a matrix element-wise.
Examples
========
>>> from sympy.matrices.densearith import addrow
>>> from sympy import ZZ
>>> a = [ZZ(12), ZZ(34), ZZ(56)]
>>> b = [ZZ(14), ZZ(56), ZZ(63)]
>>> c = [ZZ(0), ZZ(0), ZZ(0)]
>>> addrow(a, b, ZZ)
[26, 90, 119]
>>> addrow(b, c, ZZ)
[14, 56, 63]
"""
return [element1 + element2 for element1, element2 in zip(row1, row2)]
def sub(matlist1, matlist2, K):
"""
Subtracts two matrices by first negating the second matrix and
then adding it to first matrix.
Examples
========
>>> from sympy.matrices.densearith import sub
>>> from sympy import ZZ
>>> e = [
... [ZZ(12), ZZ(78)],
... [ZZ(56), ZZ(79)]]
>>> f = [
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]]
>>> g = [
... [ZZ.zero, ZZ.zero],
... [ZZ.zero, ZZ.zero]]
>>> sub(e, f, ZZ)
[[11, 76], [53, 75]]
>>> sub(f, g, ZZ)
[[1, 2], [3, 4]]
See Also
========
negate
negaterow
"""
return add(matlist1, negate(matlist2, K), K)
def negate(matlist, K):
"""
Negates the elements of a matrix row-wise.
Examples
========
>>> from sympy.matrices.densearith import negate
>>> from sympy import ZZ
>>> a = [
... [ZZ(2), ZZ(3)],
... [ZZ(4), ZZ(5)]]
>>> b = [
... [ZZ(0), ZZ(0)],
... [ZZ(0), ZZ(0)]]
>>> negate(a, ZZ)
[[-2, -3], [-4, -5]]
>>> negate(b, ZZ)
[[0, 0], [0, 0]]
See Also
========
negaterow
"""
return [negaterow(row, K) for row in matlist]
def negaterow(row, K):
"""
Negates a row element-wise.
Examples
========
>>> from sympy.matrices.densearith import negaterow
>>> from sympy import ZZ
>>> a = [ZZ(2), ZZ(3), ZZ(4)]
>>> b = [ZZ(0), ZZ(0), ZZ(0)]
>>> negaterow(a, ZZ)
[-2, -3, -4]
>>> negaterow(b, ZZ)
[0, 0, 0]
"""
return [-element for element in row]
def mulmatmat(matlist1, matlist2, K):
"""
Multiplies two matrices by multiplying each row with each column at
a time. The multiplication of row and column is done with mulrowcol.
Firstly, the second matrix is converted from a list of rows to a
list of columns using zip and then multiplication is done.
Examples
========
>>> from sympy.matrices.densearith import mulmatmat
>>> from sympy import ZZ
>>> from sympy.matrices.densetools import eye
>>> a = [
... [ZZ(3), ZZ(4)],
... [ZZ(5), ZZ(6)]]
>>> b = [
... [ZZ(1), ZZ(2)],
... [ZZ(7), ZZ(8)]]
>>> c = eye(2, ZZ)
>>> mulmatmat(a, b, ZZ)
[[31, 38], [47, 58]]
>>> mulmatmat(a, c, ZZ)
[[3, 4], [5, 6]]
See Also
========
mulrowcol
"""
matcol = [list(i) for i in zip(*matlist2)]
result = []
for row in matlist1:
result.append([mulrowcol(row, col, K) for col in matcol])
return result
def mulmatscaler(matlist, scaler, K):
"""
Performs scaler matrix multiplication one row at at time. The row-scaler
multiplication is done using mulrowscaler.
Examples
========
>>> from sympy import ZZ
>>> from sympy.matrices.densearith import mulmatscaler
>>> a = [
... [ZZ(3), ZZ(7), ZZ(4)],
... [ZZ(2), ZZ(4), ZZ(5)],
... [ZZ(6), ZZ(2), ZZ(3)]]
>>> mulmatscaler(a, ZZ(1), ZZ)
[[3, 7, 4], [2, 4, 5], [6, 2, 3]]
See Also
========
mulscalerrow
"""
return [mulrowscaler(row, scaler, K) for row in matlist]
def mulrowscaler(row, scaler, K):
"""
Performs the scaler-row multiplication element-wise.
Examples
========
>>> from sympy import ZZ
>>> from sympy.matrices.densearith import mulrowscaler
>>> a = [ZZ(3), ZZ(4), ZZ(5)]
>>> mulrowscaler(a, 2, ZZ)
[6, 8, 10]
"""
return [scaler*element for element in row]
def mulrowcol(row, col, K):
"""
Multiplies two lists representing row and column element-wise.
Gotcha: Here the column is represented as a list contrary to the norm
where it is represented as a list of one element lists. The reason is
that the theoretically correct approach is too expensive. This problem
is expected to be removed later as we have a good data structure to
facilitate column operations.
Examples
========
>>> from sympy.matrices.densearith import mulrowcol
>>> from sympy import ZZ
>>> a = [ZZ(2), ZZ(4), ZZ(6)]
>>> mulrowcol(a, a, ZZ)
56
"""
result = K.zero
for i in range(len(row)):
result += row[i]*col[i]
return result
|
5839d957cd9a4be7e0f969f576c89a39fddf7ba3e4a5ed7de0cec978e3140100 | from __future__ import division, print_function
from typing import Callable
from sympy.core import Basic, Dict, Integer, S, Tuple
from sympy.core.cache import cacheit
from sympy.core.sympify import converter as sympify_converter
from sympy.matrices.dense import DenseMatrix
from sympy.matrices.expressions import MatrixExpr
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.sparse import MutableSparseMatrix, SparseMatrix
def sympify_matrix(arg):
return arg.as_immutable()
sympify_converter[MatrixBase] = sympify_matrix
class ImmutableDenseMatrix(DenseMatrix, MatrixExpr): # type: ignore
"""Create an immutable version of a matrix.
Examples
========
>>> from sympy import eye
>>> from sympy.matrices import ImmutableMatrix
>>> ImmutableMatrix(eye(3))
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> _[0, 0] = 42
Traceback (most recent call last):
...
TypeError: Cannot set values of ImmutableDenseMatrix
"""
# MatrixExpr is set as NotIterable, but we want explicit matrices to be
# iterable
_iterable = True
_class_priority = 8
_op_priority = 10.001
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
__hash__ = MatrixExpr.__hash__ # type: Callable[[MatrixExpr], int]
@classmethod
def _new(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], ImmutableDenseMatrix):
return args[0]
if kwargs.get('copy', True) is False:
if len(args) != 3:
raise TypeError("'copy=False' requires a matrix be initialized as rows,cols,[list]")
rows, cols, flat_list = args
else:
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
flat_list = list(flat_list) # create a shallow copy
rows = Integer(rows)
cols = Integer(cols)
if not isinstance(flat_list, Tuple):
flat_list = Tuple(*flat_list)
return Basic.__new__(cls, rows, cols, flat_list)
@property
def _mat(self):
# self.args[2] is a Tuple. Access to the elements
# of a tuple are significantly faster than Tuple,
# so return the internal tuple.
return self.args[2].args
def _entry(self, i, j, **kwargs):
return DenseMatrix.__getitem__(self, (i, j))
def __setitem__(self, *args):
raise TypeError("Cannot set values of {}".format(self.__class__))
def _eval_Eq(self, other):
"""Helper method for Equality with matrices.
Relational automatically converts matrices to ImmutableDenseMatrix
instances, so this method only applies here. Returns True if the
matrices are definitively the same, False if they are definitively
different, and None if undetermined (e.g. if they contain Symbols).
Returning None triggers default handling of Equalities.
"""
if not hasattr(other, 'shape') or self.shape != other.shape:
return S.false
if isinstance(other, MatrixExpr) and not isinstance(
other, ImmutableDenseMatrix):
return None
diff = (self - other).is_zero_matrix
if diff is True:
return S.true
elif diff is False:
return S.false
def _eval_extract(self, rowsList, colsList):
# self._mat is a Tuple. It is slightly faster to index a
# tuple over a Tuple, so grab the internal tuple directly
mat = self._mat
cols = self.cols
indices = (i * cols + j for i in rowsList for j in colsList)
return self._new(len(rowsList), len(colsList),
Tuple(*(mat[i] for i in indices), sympify=False), copy=False)
@property
def cols(self):
return int(self.args[1])
@property
def rows(self):
return int(self.args[0])
@property
def shape(self):
return tuple(int(i) for i in self.args[:2])
def as_immutable(self):
return self
def is_diagonalizable(self, reals_only=False, **kwargs):
return super(ImmutableDenseMatrix, self).is_diagonalizable(
reals_only=reals_only, **kwargs)
is_diagonalizable.__doc__ = DenseMatrix.is_diagonalizable.__doc__
is_diagonalizable = cacheit(is_diagonalizable)
# make sure ImmutableDenseMatrix is aliased as ImmutableMatrix
ImmutableMatrix = ImmutableDenseMatrix
class ImmutableSparseMatrix(SparseMatrix, Basic):
"""Create an immutable version of a sparse matrix.
Examples
========
>>> from sympy import eye
>>> from sympy.matrices.immutable import ImmutableSparseMatrix
>>> ImmutableSparseMatrix(1, 1, {})
Matrix([[0]])
>>> ImmutableSparseMatrix(eye(3))
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> _[0, 0] = 42
Traceback (most recent call last):
...
TypeError: Cannot set values of ImmutableSparseMatrix
>>> _.shape
(3, 3)
"""
is_Matrix = True
_class_priority = 9
@classmethod
def _new(cls, *args, **kwargs):
s = MutableSparseMatrix(*args)
rows = Integer(s.rows)
cols = Integer(s.cols)
mat = Dict(s._smat)
obj = Basic.__new__(cls, rows, cols, mat)
obj.rows = s.rows
obj.cols = s.cols
obj._smat = s._smat
return obj
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
def __setitem__(self, *args):
raise TypeError("Cannot set values of ImmutableSparseMatrix")
def __hash__(self):
return hash((type(self).__name__,) + (self.shape, tuple(self._smat)))
_eval_Eq = ImmutableDenseMatrix._eval_Eq
def as_immutable(self):
return self
def is_diagonalizable(self, reals_only=False, **kwargs):
return super(ImmutableSparseMatrix, self).is_diagonalizable(
reals_only=reals_only, **kwargs)
is_diagonalizable.__doc__ = SparseMatrix.is_diagonalizable.__doc__
is_diagonalizable = cacheit(is_diagonalizable)
|
1825f2c6ed5beac71b26e4b6559be73315b0c6f4682b13979956add2873f717e | """
Basic methods common to all matrices to be used
when creating more advanced matrices (e.g., matrices over rings,
etc.).
"""
from __future__ import division, print_function
from sympy.core.logic import FuzzyBool
from collections import defaultdict
from inspect import isfunction
from sympy.assumptions.refine import refine
from sympy.core import SympifyError, Add
from sympy.core.basic import Atom
from sympy.core.compatibility import (
Iterable, as_int, is_sequence, reduce)
from sympy.core.decorators import call_highest_priority
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.functions import Abs
from sympy.simplify import simplify as _simplify
from sympy.simplify.simplify import dotprodsimp as _dotprodsimp
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import flatten
from sympy.utilities.misc import filldedent
from .utilities import _get_intermediate_simp_bool
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
"""Wrong matrix shape"""
pass
class NonSquareMatrixError(ShapeError):
pass
class NonInvertibleMatrixError(ValueError, MatrixError):
"""The matrix in not invertible (division by multidimensional zero error)."""
pass
class NonPositiveDefiniteMatrixError(ValueError, MatrixError):
"""The matrix is not a positive-definite matrix."""
pass
class MatrixRequired(object):
"""All subclasses of matrix objects must implement the
required matrix properties listed here."""
rows = None # type: int
cols = None # type: int
_simplify = None
@classmethod
def _new(cls, *args, **kwargs):
"""`_new` must, at minimum, be callable as
`_new(rows, cols, mat) where mat is a flat list of the
elements of the matrix."""
raise NotImplementedError("Subclasses must implement this.")
def __eq__(self, other):
raise NotImplementedError("Subclasses must implement this.")
def __getitem__(self, key):
"""Implementations of __getitem__ should accept ints, in which
case the matrix is indexed as a flat list, tuples (i,j) in which
case the (i,j) entry is returned, slices, or mixed tuples (a,b)
where a and b are any combintion of slices and integers."""
raise NotImplementedError("Subclasses must implement this.")
def __len__(self):
"""The total number of entries in the matrix."""
raise NotImplementedError("Subclasses must implement this.")
@property
def shape(self):
raise NotImplementedError("Subclasses must implement this.")
class MatrixShaping(MatrixRequired):
"""Provides basic matrix shaping and extracting of submatrices"""
def _eval_col_del(self, col):
def entry(i, j):
return self[i, j] if j < col else self[i, j + 1]
return self._new(self.rows, self.cols - 1, entry)
def _eval_col_insert(self, pos, other):
def entry(i, j):
if j < pos:
return self[i, j]
elif pos <= j < pos + other.cols:
return other[i, j - pos]
return self[i, j - other.cols]
return self._new(self.rows, self.cols + other.cols,
lambda i, j: entry(i, j))
def _eval_col_join(self, other):
rows = self.rows
def entry(i, j):
if i < rows:
return self[i, j]
return other[i - rows, j]
return classof(self, other)._new(self.rows + other.rows, self.cols,
lambda i, j: entry(i, j))
def _eval_extract(self, rowsList, colsList):
mat = list(self)
cols = self.cols
indices = (i * cols + j for i in rowsList for j in colsList)
return self._new(len(rowsList), len(colsList),
list(mat[i] for i in indices))
def _eval_get_diag_blocks(self):
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def _eval_row_del(self, row):
def entry(i, j):
return self[i, j] if i < row else self[i + 1, j]
return self._new(self.rows - 1, self.cols, entry)
def _eval_row_insert(self, pos, other):
entries = list(self)
insert_pos = pos * self.cols
entries[insert_pos:insert_pos] = list(other)
return self._new(self.rows + other.rows, self.cols, entries)
def _eval_row_join(self, other):
cols = self.cols
def entry(i, j):
if j < cols:
return self[i, j]
return other[i, j - cols]
return classof(self, other)._new(self.rows, self.cols + other.cols,
lambda i, j: entry(i, j))
def _eval_tolist(self):
return [list(self[i,:]) for i in range(self.rows)]
def _eval_vec(self):
rows = self.rows
def entry(n, _):
# we want to read off the columns first
j = n // rows
i = n - j * rows
return self[i, j]
return self._new(len(self), 1, entry)
def col_del(self, col):
"""Delete the specified column."""
if col < 0:
col += self.cols
if not 0 <= col < self.cols:
raise ValueError("Column {} out of range.".format(col))
return self._eval_col_del(col)
def col_insert(self, pos, other):
"""Insert one or more columns at the given column position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.col_insert(1, V)
Matrix([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]])
See Also
========
col
row_insert
"""
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(other)
pos = as_int(pos)
if pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != other.rows:
raise ShapeError(
"`self` and `other` must have the same number of rows.")
return self._eval_col_insert(pos, other)
def col_join(self, other):
"""Concatenates two matrices along self's last and other's first row.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.col_join(V)
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 1, 1]])
See Also
========
col
row_join
"""
# A null matrix can always be stacked (see #10770)
if self.rows == 0 and self.cols != other.cols:
return self._new(0, other.cols, []).col_join(other)
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_col_join(other)
def col(self, j):
"""Elementary column selector.
Examples
========
>>> from sympy import eye
>>> eye(2).col(0)
Matrix([
[1],
[0]])
See Also
========
row
sympy.matrices.dense.MutableDenseMatrix.col_op
sympy.matrices.dense.MutableDenseMatrix.col_swap
col_del
col_join
col_insert
"""
return self[:, j]
def extract(self, rowsList, colsList):
"""Return a submatrix by specifying a list of rows and columns.
Negative indices can be given. All indices must be in the range
-n <= i < n where n is the number of rows or columns.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 3, range(12))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
>>> m.extract([0, 1, 3], [0, 1])
Matrix([
[0, 1],
[3, 4],
[9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1])
Matrix([
[2],
[2],
[5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1])
Matrix([
[2],
[8]])
RowsList or colsList can also be a list of booleans, in which case
the rows or columns corresponding to the True values will be selected:
>>> m.extract([0, 1, 2, 3], [True, False, True])
Matrix([
[0, 2],
[3, 5],
[6, 8],
[9, 11]])
"""
if not is_sequence(rowsList) or not is_sequence(colsList):
raise TypeError("rowsList and colsList must be iterable")
# ensure rowsList and colsList are lists of integers
if rowsList and all(isinstance(i, bool) for i in rowsList):
rowsList = [index for index, item in enumerate(rowsList) if item]
if colsList and all(isinstance(i, bool) for i in colsList):
colsList = [index for index, item in enumerate(colsList) if item]
# ensure everything is in range
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._eval_extract(rowsList, colsList)
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
return self._eval_get_diag_blocks()
@classmethod
def hstack(cls, *args):
"""Return a matrix formed by joining args horizontally (i.e.
by repeated application of row_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.hstack(eye(2), 2*eye(2))
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]])
"""
if len(args) == 0:
return cls._new()
kls = type(args[0])
return reduce(kls.row_join, args)
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if self.rows * self.cols != rows * cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self[i * cols + j])
def row_del(self, row):
"""Delete the specified row."""
if row < 0:
row += self.rows
if not 0 <= row < self.rows:
raise ValueError("Row {} out of range.".format(row))
return self._eval_row_del(row)
def row_insert(self, pos, other):
"""Insert one or more rows at the given row position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.row_insert(1, V)
Matrix([
[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
See Also
========
row
col_insert
"""
# Allows you to build a matrix even if it is null matrix
if not self:
return self._new(other)
pos = as_int(pos)
if pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_row_insert(pos, other)
def row_join(self, other):
"""Concatenates two matrices along self's last and rhs's first column
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.row_join(V)
Matrix([
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]])
See Also
========
row
col_join
"""
# A null matrix can always be stacked (see #10770)
if self.cols == 0 and self.rows != other.rows:
return self._new(other.rows, 0, []).row_join(other)
if self.rows != other.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
return self._eval_row_join(other)
def diagonal(self, k=0):
"""Returns the kth diagonal of self. The main diagonal
corresponds to `k=0`; diagonals above and below correspond to
`k > 0` and `k < 0`, respectively. The values of `self[i, j]`
for which `j - i = k`, are returned in order of increasing
`i + j`, starting with `i + j = |k|`.
Examples
========
>>> from sympy import Matrix, SparseMatrix
>>> m = Matrix(3, 3, lambda i, j: j - i); m
Matrix([
[ 0, 1, 2],
[-1, 0, 1],
[-2, -1, 0]])
>>> _.diagonal()
Matrix([[0, 0, 0]])
>>> m.diagonal(1)
Matrix([[1, 1]])
>>> m.diagonal(-2)
Matrix([[-2]])
Even though the diagonal is returned as a Matrix, the element
retrieval can be done with a single index:
>>> Matrix.diag(1, 2, 3).diagonal()[1] # instead of [0, 1]
2
See Also
========
diag - to create a diagonal matrix
"""
rv = []
k = as_int(k)
r = 0 if k > 0 else -k
c = 0 if r else k
while True:
if r == self.rows or c == self.cols:
break
rv.append(self[r, c])
r += 1
c += 1
if not rv:
raise ValueError(filldedent('''
The %s diagonal is out of range [%s, %s]''' % (
k, 1 - self.rows, self.cols - 1)))
return self._new(1, len(rv), rv)
def row(self, i):
"""Elementary row selector.
Examples
========
>>> from sympy import eye
>>> eye(2).row(0)
Matrix([[1, 0]])
See Also
========
col
sympy.matrices.dense.MutableDenseMatrix.row_op
sympy.matrices.dense.MutableDenseMatrix.row_swap
row_del
row_join
row_insert
"""
return self[i, :]
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy.matrices import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> from sympy import Matrix, ones
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return self._eval_tolist()
def vec(self):
"""Return the Matrix converted into a one column matrix by stacking columns
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 3], [2, 4]])
>>> m
Matrix([
[1, 3],
[2, 4]])
>>> m.vec()
Matrix([
[1],
[2],
[3],
[4]])
See Also
========
vech
"""
return self._eval_vec()
@classmethod
def vstack(cls, *args):
"""Return a matrix formed by joining args vertically (i.e.
by repeated application of col_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.vstack(eye(2), 2*eye(2))
Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]])
"""
if len(args) == 0:
return cls._new()
kls = type(args[0])
return reduce(kls.col_join, args)
class MatrixSpecial(MatrixRequired):
"""Construction of special matrices"""
@classmethod
def _eval_diag(cls, rows, cols, diag_dict):
"""diag_dict is a defaultdict containing
all the entries of the diagonal matrix."""
def entry(i, j):
return diag_dict[(i, j)]
return cls._new(rows, cols, entry)
@classmethod
def _eval_eye(cls, rows, cols):
def entry(i, j):
return cls.one if i == j else cls.zero
return cls._new(rows, cols, entry)
@classmethod
def _eval_jordan_block(cls, rows, cols, eigenvalue, band='upper'):
if band == 'lower':
def entry(i, j):
if i == j:
return eigenvalue
elif j + 1 == i:
return cls.one
return cls.zero
else:
def entry(i, j):
if i == j:
return eigenvalue
elif i + 1 == j:
return cls.one
return cls.zero
return cls._new(rows, cols, entry)
@classmethod
def _eval_ones(cls, rows, cols):
def entry(i, j):
return cls.one
return cls._new(rows, cols, entry)
@classmethod
def _eval_zeros(cls, rows, cols):
def entry(i, j):
return cls.zero
return cls._new(rows, cols, entry)
@classmethod
def diag(kls, *args, **kwargs):
"""Returns a matrix with the specified diagonal.
If matrices are passed, a block-diagonal matrix
is created (i.e. the "direct sum" of the matrices).
kwargs
======
rows : rows of the resulting matrix; computed if
not given.
cols : columns of the resulting matrix; computed if
not given.
cls : class for the resulting matrix
unpack : bool which, when True (default), unpacks a single
sequence rather than interpreting it as a Matrix.
strict : bool which, when False (default), allows Matrices to
have variable-length rows.
Examples
========
>>> from sympy.matrices import Matrix
>>> Matrix.diag(1, 2, 3)
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
The current default is to unpack a single sequence. If this is
not desired, set `unpack=False` and it will be interpreted as
a matrix.
>>> Matrix.diag([1, 2, 3]) == Matrix.diag(1, 2, 3)
True
When more than one element is passed, each is interpreted as
something to put on the diagonal. Lists are converted to
matricecs. Filling of the diagonal always continues from
the bottom right hand corner of the previous item: this
will create a block-diagonal matrix whether the matrices
are square or not.
>>> col = [1, 2, 3]
>>> row = [[4, 5]]
>>> Matrix.diag(col, row)
Matrix([
[1, 0, 0],
[2, 0, 0],
[3, 0, 0],
[0, 4, 5]])
When `unpack` is False, elements within a list need not all be
of the same length. Setting `strict` to True would raise a
ValueError for the following:
>>> Matrix.diag([[1, 2, 3], [4, 5], [6]], unpack=False)
Matrix([
[1, 2, 3],
[4, 5, 0],
[6, 0, 0]])
The type of the returned matrix can be set with the ``cls``
keyword.
>>> from sympy.matrices import ImmutableMatrix
>>> from sympy.utilities.misc import func_name
>>> func_name(Matrix.diag(1, cls=ImmutableMatrix))
'ImmutableDenseMatrix'
A zero dimension matrix can be used to position the start of
the filling at the start of an arbitrary row or column:
>>> from sympy import ones
>>> r2 = ones(0, 2)
>>> Matrix.diag(r2, 1, 2)
Matrix([
[0, 0, 1, 0],
[0, 0, 0, 2]])
See Also
========
eye
diagonal - to extract a diagonal
.dense.diag
.expressions.blockmatrix.BlockMatrix
"""
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.dense import Matrix
from sympy.matrices.sparse import SparseMatrix
klass = kwargs.get('cls', kls)
strict = kwargs.get('strict', False) # lists -> Matrices
unpack = kwargs.get('unpack', True) # unpack single sequence
if unpack and len(args) == 1 and is_sequence(args[0]) and \
not isinstance(args[0], MatrixBase):
args = args[0]
# fill a default dict with the diagonal entries
diag_entries = defaultdict(int)
rmax = cmax = 0 # keep track of the biggest index seen
for m in args:
if isinstance(m, list):
if strict:
# if malformed, Matrix will raise an error
_ = Matrix(m)
r, c = _.shape
m = _.tolist()
else:
m = SparseMatrix(m)
for (i, j), _ in m._smat.items():
diag_entries[(i + rmax, j + cmax)] = _
r, c = m.shape
m = [] # to skip process below
elif hasattr(m, 'shape'): # a Matrix
# convert to list of lists
r, c = m.shape
m = m.tolist()
else: # in this case, we're a single value
diag_entries[(rmax, cmax)] = m
rmax += 1
cmax += 1
continue
# process list of lists
for i in range(len(m)):
for j, _ in enumerate(m[i]):
diag_entries[(i + rmax, j + cmax)] = _
rmax += r
cmax += c
rows = kwargs.get('rows', None)
cols = kwargs.get('cols', None)
if rows is None:
rows, cols = cols, rows
if rows is None:
rows, cols = rmax, cmax
else:
cols = rows if cols is None else cols
if rows < rmax or cols < cmax:
raise ValueError(filldedent('''
The constructed matrix is {} x {} but a size of {} x {}
was specified.'''.format(rmax, cmax, rows, cols)))
return klass._eval_diag(rows, cols, diag_entries)
@classmethod
def eye(kls, rows, cols=None, **kwargs):
"""Returns an identity matrix.
Args
====
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_eye(rows, cols)
@classmethod
def jordan_block(kls, size=None, eigenvalue=None, **kwargs):
"""Returns a Jordan block
Parameters
==========
size : Integer, optional
Specifies the shape of the Jordan block matrix.
eigenvalue : Number or Symbol
Specifies the value for the main diagonal of the matrix.
.. note::
The keyword ``eigenval`` is also specified as an alias
of this keyword, but it is not recommended to use.
We may deprecate the alias in later release.
band : 'upper' or 'lower', optional
Specifies the position of the off-diagonal to put `1` s on.
cls : Matrix, optional
Specifies the matrix class of the output form.
If it is not specified, the class type where the method is
being executed on will be returned.
rows, cols : Integer, optional
Specifies the shape of the Jordan block matrix. See Notes
section for the details of how these key works.
.. note::
This feature will be deprecated in the future.
Returns
=======
Matrix
A Jordan block matrix.
Raises
======
ValueError
If insufficient arguments are given for matrix size
specification, or no eigenvalue is given.
Examples
========
Creating a default Jordan block:
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> Matrix.jordan_block(4, x)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
Creating an alternative Jordan block matrix where `1` is on
lower off-diagonal:
>>> Matrix.jordan_block(4, x, band='lower')
Matrix([
[x, 0, 0, 0],
[1, x, 0, 0],
[0, 1, x, 0],
[0, 0, 1, x]])
Creating a Jordan block with keyword arguments
>>> Matrix.jordan_block(size=4, eigenvalue=x)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
Notes
=====
.. note::
This feature will be deprecated in the future.
The keyword arguments ``size``, ``rows``, ``cols`` relates to
the Jordan block size specifications.
If you want to create a square Jordan block, specify either
one of the three arguments.
If you want to create a rectangular Jordan block, specify
``rows`` and ``cols`` individually.
+--------------------------------+---------------------+
| Arguments Given | Matrix Shape |
+----------+----------+----------+----------+----------+
| size | rows | cols | rows | cols |
+==========+==========+==========+==========+==========+
| size | Any | size | size |
+----------+----------+----------+----------+----------+
| | None | ValueError |
| +----------+----------+----------+----------+
| None | rows | None | rows | rows |
| +----------+----------+----------+----------+
| | None | cols | cols | cols |
+ +----------+----------+----------+----------+
| | rows | cols | rows | cols |
+----------+----------+----------+----------+----------+
References
==========
.. [1] https://en.wikipedia.org/wiki/Jordan_matrix
"""
if 'rows' in kwargs or 'cols' in kwargs:
SymPyDeprecationWarning(
feature="Keyword arguments 'rows' or 'cols'",
issue=16102,
useinstead="a more generic banded matrix constructor",
deprecated_since_version="1.4"
).warn()
klass = kwargs.pop('cls', kls)
band = kwargs.pop('band', 'upper')
rows = kwargs.pop('rows', None)
cols = kwargs.pop('cols', None)
eigenval = kwargs.get('eigenval', None)
if eigenvalue is None and eigenval is None:
raise ValueError("Must supply an eigenvalue")
elif eigenvalue != eigenval and None not in (eigenval, eigenvalue):
raise ValueError(
"Inconsistent values are given: 'eigenval'={}, "
"'eigenvalue'={}".format(eigenval, eigenvalue))
else:
if eigenval is not None:
eigenvalue = eigenval
if (size, rows, cols) == (None, None, None):
raise ValueError("Must supply a matrix size")
if size is not None:
rows, cols = size, size
elif rows is not None and cols is None:
cols = rows
elif cols is not None and rows is None:
rows = cols
rows, cols = as_int(rows), as_int(cols)
return klass._eval_jordan_block(rows, cols, eigenvalue, band)
@classmethod
def ones(kls, rows, cols=None, **kwargs):
"""Returns a matrix of ones.
Args
====
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_ones(rows, cols)
@classmethod
def zeros(kls, rows, cols=None, **kwargs):
"""Returns a matrix of zeros.
Args
====
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_zeros(rows, cols)
class MatrixProperties(MatrixRequired):
"""Provides basic properties of a matrix."""
def _eval_atoms(self, *types):
result = set()
for i in self:
result.update(i.atoms(*types))
return result
def _eval_free_symbols(self):
return set().union(*(i.free_symbols for i in self))
def _eval_has(self, *patterns):
return any(a.has(*patterns) for a in self)
def _eval_is_anti_symmetric(self, simpfunc):
if not all(simpfunc(self[i, j] + self[j, i]).is_zero for i in range(self.rows) for j in range(self.cols)):
return False
return True
def _eval_is_diagonal(self):
for i in range(self.rows):
for j in range(self.cols):
if i != j and self[i, j]:
return False
return True
# _eval_is_hermitian is called by some general sympy
# routines and has a different *args signature. Make
# sure the names don't clash by adding `_matrix_` in name.
def _eval_is_matrix_hermitian(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i].conjugate()))
return mat.is_zero_matrix
def _eval_is_Identity(self) -> FuzzyBool:
def dirac(i, j):
if i == j:
return 1
return 0
return all(self[i, j] == dirac(i, j)
for i in range(self.rows)
for j in range(self.cols))
def _eval_is_lower_hessenberg(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 2, self.cols))
def _eval_is_lower(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
def _eval_is_symbolic(self):
return self.has(Symbol)
def _eval_is_symmetric(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i]))
return mat.is_zero_matrix
def _eval_is_zero_matrix(self):
if any(i.is_zero == False for i in self):
return False
if any(i.is_zero is None for i in self):
return None
return True
def _eval_is_upper_hessenberg(self):
return all(self[i, j].is_zero
for i in range(2, self.rows)
for j in range(min(self.cols, (i - 1))))
def _eval_values(self):
return [i for i in self if not i.is_zero]
def atoms(self, *types):
"""Returns the atoms that form the current object.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import Matrix
>>> Matrix([[x]])
Matrix([[x]])
>>> _.atoms()
{x}
"""
types = tuple(t if isinstance(t, type) else type(t) for t in types)
if not types:
types = (Atom,)
return self._eval_atoms(*types)
@property
def free_symbols(self):
"""Returns the free symbols within the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix([[x], [1]]).free_symbols
{x}
"""
return self._eval_free_symbols()
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import Matrix, SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = Matrix(((1, x), (0.2, 3)))
>>> B = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
>>> B.has(x)
True
>>> B.has(y)
False
>>> B.has(Float)
True
"""
return self._eval_has(*patterns)
def is_anti_symmetric(self, simplify=True):
"""Check if matrix M is an antisymmetric matrix,
that is, M is a square matrix with all M[i, j] == -M[j, i].
When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is
simplified before testing to see if it is zero. By default,
the SymPy simplify function is used. To use a custom function
set simplify to a function that accepts a single argument which
returns a simplified expression. To skip simplification, set
simplify to False but note that although this will be faster,
it may induce false negatives.
Examples
========
>>> from sympy import Matrix, symbols
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_anti_symmetric()
True
>>> x, y = symbols('x y')
>>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0])
>>> m
Matrix([
[ 0, 0, x],
[-y, 0, 0]])
>>> m.is_anti_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y,
... -(x + 1)**2 , 0, x*y,
... -y, -x*y, 0])
Simplification of matrix elements is done by default so even
though two elements which should be equal and opposite wouldn't
pass an equality test, the matrix is still reported as
anti-symmetric:
>>> m[0, 1] == -m[1, 0]
False
>>> m.is_anti_symmetric()
True
If 'simplify=False' is used for the case when a Matrix is already
simplified, this will speed things up. Here, we see that without
simplification the matrix does not appear anti-symmetric:
>>> m.is_anti_symmetric(simplify=False)
False
But if the matrix were already expanded, then it would appear
anti-symmetric and simplification in the is_anti_symmetric routine
is not needed:
>>> m = m.expand()
>>> m.is_anti_symmetric(simplify=False)
True
"""
# accept custom simplification
simpfunc = simplify
if not isfunction(simplify):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_anti_symmetric(simpfunc)
def is_diagonal(self):
"""Check if matrix is diagonal,
that is matrix in which the entries outside the main diagonal are all zero.
Examples
========
>>> from sympy import Matrix, diag
>>> m = Matrix(2, 2, [1, 0, 0, 2])
>>> m
Matrix([
[1, 0],
[0, 2]])
>>> m.is_diagonal()
True
>>> m = Matrix(2, 2, [1, 1, 0, 2])
>>> m
Matrix([
[1, 1],
[0, 2]])
>>> m.is_diagonal()
False
>>> m = diag(1, 2, 3)
>>> m
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> m.is_diagonal()
True
See Also
========
is_lower
is_upper
sympy.matrices.matrices.MatrixEigen.is_diagonalizable
diagonalize
"""
return self._eval_is_diagonal()
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = Matrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
if not self.is_square:
return False
return self._eval_is_matrix_hermitian(_simplify)
@property
def is_Identity(self) -> FuzzyBool:
if not self.is_square:
return False
return self._eval_is_Identity()
@property
def is_lower_hessenberg(self):
r"""Checks if the matrix is in the lower-Hessenberg form.
The lower hessenberg matrix has zero entries
above the first superdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]])
>>> a
Matrix([
[1, 2, 0, 0],
[5, 2, 3, 0],
[3, 4, 3, 7],
[5, 6, 1, 1]])
>>> a.is_lower_hessenberg
True
See Also
========
is_upper_hessenberg
is_lower
"""
return self._eval_is_lower_hessenberg()
@property
def is_lower(self):
"""Check if matrix is a lower triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_lower
True
>>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4 , 0, 6, 6, 5])
>>> m
Matrix([
[0, 0, 0],
[2, 0, 0],
[1, 4, 0],
[6, 6, 5]])
>>> m.is_lower
True
>>> from sympy.abc import x, y
>>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y])
>>> m
Matrix([
[x**2 + y, x + y**2],
[ 0, x + y]])
>>> m.is_lower
False
See Also
========
is_upper
is_diagonal
is_lower_hessenberg
"""
return self._eval_is_lower()
@property
def is_square(self):
"""Checks if a matrix is square.
A matrix is square if the number of rows equals the number of columns.
The empty matrix is square by definition, since the number of rows and
the number of columns are both zero.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[1, 2, 3], [4, 5, 6]])
>>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> c = Matrix([])
>>> a.is_square
False
>>> b.is_square
True
>>> c.is_square
True
"""
return self.rows == self.cols
def is_symbolic(self):
"""Checks if any elements contain Symbols.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.is_symbolic()
True
"""
return self._eval_is_symbolic()
def is_symmetric(self, simplify=True):
"""Check if matrix is symmetric matrix,
that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry.
They can be skipped using 'simplify=False'; while speeding things a bit,
this may however induce false negatives.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [0, 1, 1, 2])
>>> m
Matrix([
[0, 1],
[1, 2]])
>>> m.is_symmetric()
True
>>> m = Matrix(2, 2, [0, 1, 2, 0])
>>> m
Matrix([
[0, 1],
[2, 0]])
>>> m.is_symmetric()
False
>>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0])
>>> m
Matrix([
[0, 0, 0],
[0, 0, 0]])
>>> m.is_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3])
>>> m
Matrix([
[ 1, x**2 + 2*x + 1, y],
[(x + 1)**2, 2, 0],
[ y, 0, 3]])
>>> m.is_symmetric()
True
If the matrix is already simplified, you may speed-up is_symmetric()
test by using 'simplify=False'.
>>> bool(m.is_symmetric(simplify=False))
False
>>> m1 = m.expand()
>>> m1.is_symmetric(simplify=False)
True
"""
simpfunc = simplify
if not isfunction(simplify):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_symmetric(simpfunc)
@property
def is_upper_hessenberg(self):
"""Checks if the matrix is the upper-Hessenberg form.
The upper hessenberg matrix has zero entries
below the first subdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]])
>>> a
Matrix([
[1, 4, 2, 3],
[3, 4, 1, 7],
[0, 2, 3, 4],
[0, 0, 1, 3]])
>>> a.is_upper_hessenberg
True
See Also
========
is_lower_hessenberg
is_upper
"""
return self._eval_is_upper_hessenberg()
@property
def is_upper(self):
"""Check if matrix is an upper triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_upper
True
>>> m = Matrix(4, 3, [5, 1, 9, 0, 4 , 6, 0, 0, 5, 0, 0, 0])
>>> m
Matrix([
[5, 1, 9],
[0, 4, 6],
[0, 0, 5],
[0, 0, 0]])
>>> m.is_upper
True
>>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1])
>>> m
Matrix([
[4, 2, 5],
[6, 1, 1]])
>>> m.is_upper
False
See Also
========
is_lower
is_diagonal
is_upper_hessenberg
"""
return all(self[i, j].is_zero
for i in range(1, self.rows)
for j in range(min(i, self.cols)))
@property
def is_zero_matrix(self):
"""Checks if a matrix is a zero matrix.
A matrix is zero if every element is zero. A matrix need not be square
to be considered zero. The empty matrix is zero by the principle of
vacuous truth. For a matrix that may or may not be zero (e.g.
contains a symbol), this will be None
Examples
========
>>> from sympy import Matrix, zeros
>>> from sympy.abc import x
>>> a = Matrix([[0, 0], [0, 0]])
>>> b = zeros(3, 4)
>>> c = Matrix([[0, 1], [0, 0]])
>>> d = Matrix([])
>>> e = Matrix([[x, 0], [0, 0]])
>>> a.is_zero_matrix
True
>>> b.is_zero_matrix
True
>>> c.is_zero_matrix
False
>>> d.is_zero_matrix
True
>>> e.is_zero_matrix
"""
return self._eval_is_zero_matrix()
def values(self):
"""Return non-zero values of self."""
return self._eval_values()
class MatrixOperations(MatrixRequired):
"""Provides basic matrix shape and elementwise
operations. Should not be instantiated directly."""
def _eval_adjoint(self):
return self.transpose().conjugate()
def _eval_applyfunc(self, f):
out = self._new(self.rows, self.cols, [f(x) for x in self])
return out
def _eval_as_real_imag(self): # type: ignore
from sympy.functions.elementary.complexes import re, im
return (self.applyfunc(re), self.applyfunc(im))
def _eval_conjugate(self):
return self.applyfunc(lambda x: x.conjugate())
def _eval_permute_cols(self, perm):
# apply the permutation to a list
mapping = list(perm)
def entry(i, j):
return self[i, mapping[j]]
return self._new(self.rows, self.cols, entry)
def _eval_permute_rows(self, perm):
# apply the permutation to a list
mapping = list(perm)
def entry(i, j):
return self[mapping[i], j]
return self._new(self.rows, self.cols, entry)
def _eval_trace(self):
return sum(self[i, i] for i in range(self.rows))
def _eval_transpose(self):
return self._new(self.cols, self.rows, lambda i, j: self[j, i])
def adjoint(self):
"""Conjugate transpose or Hermitian conjugation."""
return self._eval_adjoint()
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
return self._eval_applyfunc(f)
def as_real_imag(self, deep=True, **hints):
"""Returns a tuple containing the (real, imaginary) part of matrix."""
# XXX: Ignoring deep and hints...
return self._eval_as_real_imag()
def conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
sympy.matrices.matrices.MatrixBase.D: Dirac conjugation
"""
return self._eval_conjugate()
def doit(self, **kwargs):
return self.applyfunc(lambda x: x.doit())
def evalf(self, n=15, subs=None, maxn=100, chop=False, strict=False, quad=None, verbose=False):
"""Apply evalf() to each element of self."""
options = {'subs':subs, 'maxn':maxn, 'chop':chop, 'strict':strict,
'quad':quad, 'verbose':verbose}
return self.applyfunc(lambda i: i.evalf(n, **options))
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""Apply core.function.expand to each entry of the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix(1, 1, [x*(x+1)])
Matrix([[x*(x + 1)]])
>>> _.expand()
Matrix([[x**2 + x]])
"""
return self.applyfunc(lambda x: x.expand(
deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
**hints))
@property
def H(self):
"""Return Hermite conjugate.
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m
Matrix([
[ 0],
[1 + I],
[ 2],
[ 3]])
>>> m.H
Matrix([[0, 1 - I, 2, 3]])
See Also
========
conjugate: By-element conjugation
sympy.matrices.matrices.MatrixBase.D: Dirac conjugation
"""
return self.T.C
def permute(self, perm, orientation='rows', direction='forward'):
r"""Permute the rows or columns of a matrix by the given list of
swaps.
Parameters
==========
perm : Permutation, list, or list of lists
A representation for the permutation.
If it is ``Permutation``, it is used directly with some
resizing with respect to the matrix size.
If it is specified as list of lists,
(e.g., ``[[0, 1], [0, 2]]``), then the permutation is formed
from applying the product of cycles. The direction how the
cyclic product is applied is described in below.
If it is specified as a list, the list should represent
an array form of a permutation. (e.g., ``[1, 2, 0]``) which
would would form the swapping function
`0 \mapsto 1, 1 \mapsto 2, 2\mapsto 0`.
orientation : 'rows', 'cols'
A flag to control whether to permute the rows or the columns
direction : 'forward', 'backward'
A flag to control whether to apply the permutations from
the start of the list first, or from the back of the list
first.
For example, if the permutation specification is
``[[0, 1], [0, 2]]``,
If the flag is set to ``'forward'``, the cycle would be
formed as `0 \mapsto 2, 2 \mapsto 1, 1 \mapsto 0`.
If the flag is set to ``'backward'``, the cycle would be
formed as `0 \mapsto 1, 1 \mapsto 2, 2 \mapsto 0`.
If the argument ``perm`` is not in a form of list of lists,
this flag takes no effect.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='forward')
Matrix([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='backward')
Matrix([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
Notes
=====
If a bijective function
`\sigma : \mathbb{N}_0 \rightarrow \mathbb{N}_0` denotes the
permutation.
If the matrix `A` is the matrix to permute, represented as
a horizontal or a vertical stack of vectors:
.. math::
A =
\begin{bmatrix}
a_0 \\ a_1 \\ \vdots \\ a_{n-1}
\end{bmatrix} =
\begin{bmatrix}
\alpha_0 & \alpha_1 & \cdots & \alpha_{n-1}
\end{bmatrix}
If the matrix `B` is the result, the permutation of matrix rows
is defined as:
.. math::
B := \begin{bmatrix}
a_{\sigma(0)} \\ a_{\sigma(1)} \\ \vdots \\ a_{\sigma(n-1)}
\end{bmatrix}
And the permutation of matrix columns is defined as:
.. math::
B := \begin{bmatrix}
\alpha_{\sigma(0)} & \alpha_{\sigma(1)} &
\cdots & \alpha_{\sigma(n-1)}
\end{bmatrix}
"""
from sympy.combinatorics import Permutation
# allow british variants and `columns`
if direction == 'forwards':
direction = 'forward'
if direction == 'backwards':
direction = 'backward'
if orientation == 'columns':
orientation = 'cols'
if direction not in ('forward', 'backward'):
raise TypeError("direction='{}' is an invalid kwarg. "
"Try 'forward' or 'backward'".format(direction))
if orientation not in ('rows', 'cols'):
raise TypeError("orientation='{}' is an invalid kwarg. "
"Try 'rows' or 'cols'".format(orientation))
if not isinstance(perm, (Permutation, Iterable)):
raise ValueError(
"{} must be a list, a list of lists, "
"or a SymPy permutation object.".format(perm))
# ensure all swaps are in range
max_index = self.rows if orientation == 'rows' else self.cols
if not all(0 <= t <= max_index for t in flatten(list(perm))):
raise IndexError("`swap` indices out of range.")
if perm and not isinstance(perm, Permutation) and \
isinstance(perm[0], Iterable):
if direction == 'forward':
perm = list(reversed(perm))
perm = Permutation(perm, size=max_index+1)
else:
perm = Permutation(perm, size=max_index+1)
if orientation == 'rows':
return self._eval_permute_rows(perm)
if orientation == 'cols':
return self._eval_permute_cols(perm)
def permute_cols(self, swaps, direction='forward'):
"""Alias for
``self.permute(swaps, orientation='cols', direction=direction)``
See Also
========
permute
"""
return self.permute(swaps, orientation='cols', direction=direction)
def permute_rows(self, swaps, direction='forward'):
"""Alias for
``self.permute(swaps, orientation='rows', direction=direction)``
See Also
========
permute
"""
return self.permute(swaps, orientation='rows', direction=direction)
def refine(self, assumptions=True):
"""Apply refine to each element of the matrix.
Examples
========
>>> from sympy import Symbol, Matrix, Abs, sqrt, Q
>>> x = Symbol('x')
>>> Matrix([[Abs(x)**2, sqrt(x**2)],[sqrt(x**2), Abs(x)**2]])
Matrix([
[ Abs(x)**2, sqrt(x**2)],
[sqrt(x**2), Abs(x)**2]])
>>> _.refine(Q.real(x))
Matrix([
[ x**2, Abs(x)],
[Abs(x), x**2]])
"""
return self.applyfunc(lambda x: refine(x, assumptions))
def replace(self, F, G, map=False, simultaneous=True, exact=None):
"""Replaces Function F in Matrix entries with Function G.
Examples
========
>>> from sympy import symbols, Function, Matrix
>>> F, G = symbols('F, G', cls=Function)
>>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M
Matrix([
[F(0), F(1)],
[F(1), F(2)]])
>>> N = M.replace(F,G)
>>> N
Matrix([
[G(0), G(1)],
[G(1), G(2)]])
"""
return self.applyfunc(
lambda x: x.replace(F, G, map=map, simultaneous=simultaneous, exact=exact))
def simplify(self, **kwargs):
"""Apply simplify to each element of the matrix.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, cos
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
Matrix([[x*sin(y)**2 + x*cos(y)**2]])
>>> _.simplify()
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.simplify(**kwargs))
def subs(self, *args, **kwargs): # should mirror core.basic.subs
"""Return a new matrix with subs applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.subs(x, y)
Matrix([[y]])
>>> Matrix(_).subs(y, x)
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.subs(*args, **kwargs))
def trace(self):
"""
Returns the trace of a square matrix i.e. the sum of the
diagonal elements.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.trace()
5
"""
if self.rows != self.cols:
raise NonSquareMatrixError()
return self._eval_trace()
def transpose(self):
"""
Returns the transpose of the matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.transpose()
Matrix([
[1, 3],
[2, 4]])
>>> from sympy import Matrix, I
>>> m=Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m.transpose()
Matrix([
[ 1, 3],
[2 + I, 4]])
>>> m.T == m.transpose()
True
See Also
========
conjugate: By-element conjugation
"""
return self._eval_transpose()
@property
def T(self):
'''Matrix transposition'''
return self.transpose()
@property
def C(self):
'''By-element conjugation'''
return self.conjugate()
def n(self, *args, **kwargs):
"""Apply evalf() to each element of self."""
return self.evalf(*args, **kwargs)
def xreplace(self, rule): # should mirror core.basic.xreplace
"""Return a new matrix with xreplace applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.xreplace({x: y})
Matrix([[y]])
>>> Matrix(_).xreplace({y: x})
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.xreplace(rule))
def _eval_simplify(self, **kwargs):
# XXX: We can't use self.simplify here as mutable subclasses will
# override simplify and have it return None
return MatrixOperations.simplify(self, **kwargs)
def _eval_trigsimp(self, **opts):
from sympy.simplify import trigsimp
return self.applyfunc(lambda x: trigsimp(x, **opts))
class MatrixArithmetic(MatrixRequired):
"""Provides basic matrix arithmetic operations.
Should not be instantiated directly."""
_op_priority = 10.01
def _eval_Abs(self):
return self._new(self.rows, self.cols, lambda i, j: Abs(self[i, j]))
def _eval_add(self, other):
return self._new(self.rows, self.cols,
lambda i, j: self[i, j] + other[i, j])
def _eval_matrix_mul(self, other):
def entry(i, j):
vec = [self[i,k]*other[k,j] for k in range(self.cols)]
try:
return Add(*vec)
except (TypeError, SympifyError):
# Some matrices don't work with `sum` or `Add`
# They don't work with `sum` because `sum` tries to add `0`
# Fall back to a safe way to multiply if the `Add` fails.
return reduce(lambda a, b: a + b, vec)
return self._new(self.rows, other.cols, entry)
def _eval_matrix_mul_elementwise(self, other):
return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other[i,j])
def _eval_matrix_rmul(self, other):
def entry(i, j):
return sum(other[i,k]*self[k,j] for k in range(other.cols))
return self._new(other.rows, self.cols, entry)
def _eval_pow_by_recursion(self, num):
if num == 1:
return self
if num % 2 == 1:
a, b = self, self._eval_pow_by_recursion(num - 1)
else:
a = b = self._eval_pow_by_recursion(num // 2)
return a.multiply(b)
def _eval_pow_by_cayley(self, exp):
from sympy.discrete.recurrences import linrec_coeffs
row = self.shape[0]
p = self.charpoly()
coeffs = (-p).all_coeffs()[1:]
coeffs = linrec_coeffs(coeffs, exp)
new_mat = self.eye(row)
ans = self.zeros(row)
for i in range(row):
ans += coeffs[i]*new_mat
new_mat *= self
return ans
def _eval_pow_by_recursion_dotprodsimp(self, num, prevsimp=None):
if prevsimp is None:
prevsimp = [True]*len(self)
if num == 1:
return self
if num % 2 == 1:
a, b = self, self._eval_pow_by_recursion_dotprodsimp(num - 1,
prevsimp=prevsimp)
else:
a = b = self._eval_pow_by_recursion_dotprodsimp(num // 2,
prevsimp=prevsimp)
m = a.multiply(b, dotprodsimp=False)
lenm = len(m)
elems = [None]*lenm
for i in range(lenm):
if prevsimp[i]:
elems[i], prevsimp[i] = _dotprodsimp(m[i], withsimp=True)
else:
elems[i] = m[i]
return m._new(m.rows, m.cols, elems)
def _eval_scalar_mul(self, other):
return self._new(self.rows, self.cols, lambda i, j: self[i,j]*other)
def _eval_scalar_rmul(self, other):
return self._new(self.rows, self.cols, lambda i, j: other*self[i,j])
def _eval_Mod(self, other):
from sympy import Mod
return self._new(self.rows, self.cols, lambda i, j: Mod(self[i, j], other))
# python arithmetic functions
def __abs__(self):
"""Returns a new matrix with entry-wise absolute values."""
return self._eval_Abs()
@call_highest_priority('__radd__')
def __add__(self, other):
"""Return self + other, raising ShapeError if shapes don't match."""
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check.
if hasattr(other, 'shape'):
if self.shape != other.shape:
raise ShapeError("Matrix size mismatch: %s + %s" % (
self.shape, other.shape))
# honest sympy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
# call the highest-priority class's _eval_add
a, b = self, other
if a.__class__ != classof(a, b):
b, a = a, b
return a._eval_add(b)
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_add(self, other)
raise TypeError('cannot add %s and %s' % (type(self), type(other)))
@call_highest_priority('__rdiv__')
def __div__(self, other):
return self * (self.one / other)
@call_highest_priority('__rmatmul__')
def __matmul__(self, other):
other = _matrixify(other)
if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):
return NotImplemented
return self.__mul__(other)
def __mod__(self, other):
return self.applyfunc(lambda x: x % other)
@call_highest_priority('__rmul__')
def __mul__(self, other):
"""Return self*other where other is either a scalar or a matrix
of compatible dimensions.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
True
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
Matrix([
[30, 36, 42],
[66, 81, 96]])
>>> B*A
Traceback (most recent call last):
...
ShapeError: Matrices size mismatch.
>>>
See Also
========
matrix_multiply_elementwise
"""
return self.multiply(other)
def multiply(self, other, dotprodsimp=None):
"""Same as __mul__() but with optional simplification.
Parameters
==========
dotprodsimp : bool, optional
Specifies whether intermediate term algebraic simplification is used
during matrix multiplications to control expression blowup and thus
speed up calculation. Default is off.
"""
isimpbool = _get_intermediate_simp_bool(False, dotprodsimp)
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check.
if hasattr(other, 'shape') and len(other.shape) == 2:
if self.shape[1] != other.shape[0]:
raise ShapeError("Matrix size mismatch: %s * %s." % (
self.shape, other.shape))
# honest sympy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
m = self._eval_matrix_mul(other)
if isimpbool:
return m._new(m.rows, m.cols, [_dotprodsimp(e) for e in m])
return m
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_matrix_mul(self, other)
# if 'other' is not iterable then scalar multiplication.
if not isinstance(other, Iterable):
try:
return self._eval_scalar_mul(other)
except TypeError:
pass
return NotImplemented
def multiply_elementwise(self, other):
"""Return the Hadamard product (elementwise product) of A and B
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> A.multiply_elementwise(B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
sympy.matrices.matrices.MatrixBase.cross
sympy.matrices.matrices.MatrixBase.dot
multiply
"""
if self.shape != other.shape:
raise ShapeError("Matrix shapes must agree {} != {}".format(self.shape, other.shape))
return self._eval_matrix_mul_elementwise(other)
def __neg__(self):
return self._eval_scalar_mul(-1)
@call_highest_priority('__rpow__')
def __pow__(self, exp):
"""Return self**exp a scalar or symbol."""
return self.pow(exp)
def pow(self, exp, method=None):
r"""Return self**exp a scalar or symbol.
Parameters
==========
method : multiply, mulsimp, jordan, cayley
If multiply then it returns exponentiation using recursion.
If jordan then Jordan form exponentiation will be used.
If cayley then the exponentiation is done using Cayley-Hamilton
theorem.
If mulsimp then the exponentiation is done using recursion
with dotprodsimp. This specifies whether intermediate term
algebraic simplification is used during naive matrix power to
control expression blowup and thus speed up calculation.
If None, then it heuristically decides which method to use.
"""
if method is not None and method not in ['multiply', 'mulsimp', 'jordan', 'cayley']:
raise TypeError('No such method')
if self.rows != self.cols:
raise NonSquareMatrixError()
a = self
jordan_pow = getattr(a, '_matrix_pow_by_jordan_blocks', None)
exp = sympify(exp)
if exp.is_zero:
return a._new(a.rows, a.cols, lambda i, j: int(i == j))
if exp == 1:
return a
diagonal = getattr(a, 'is_diagonal', None)
if diagonal is not None and diagonal():
return a._new(a.rows, a.cols, lambda i, j: a[i,j]**exp if i == j else 0)
if exp.is_Number and exp % 1 == 0:
if a.rows == 1:
return a._new([[a[0]**exp]])
if exp < 0:
exp = -exp
a = a.inv()
# When certain conditions are met,
# Jordan block algorithm is faster than
# computation by recursion.
if method == 'jordan':
try:
return jordan_pow(exp)
except MatrixError:
if method == 'jordan':
raise
elif method == 'cayley':
if not exp.is_Number or exp % 1 != 0:
raise ValueError("cayley method is only valid for integer powers")
return a._eval_pow_by_cayley(exp)
elif method == "mulsimp":
if not exp.is_Number or exp % 1 != 0:
raise ValueError("mulsimp method is only valid for integer powers")
return a._eval_pow_by_recursion_dotprodsimp(exp)
elif method == "multiply":
if not exp.is_Number or exp % 1 != 0:
raise ValueError("multiply method is only valid for integer powers")
return a._eval_pow_by_recursion(exp)
elif method is None and exp.is_Number and exp % 1 == 0:
# Decide heuristically which method to apply
if a.rows == 2 and exp > 100000:
return jordan_pow(exp)
elif _get_intermediate_simp_bool(True, None):
return a._eval_pow_by_recursion_dotprodsimp(exp)
elif exp > 10000:
return a._eval_pow_by_cayley(exp)
else:
return a._eval_pow_by_recursion(exp)
if jordan_pow:
try:
return jordan_pow(exp)
except NonInvertibleMatrixError:
# Raised by jordan_pow on zero determinant matrix unless exp is
# definitely known to be a non-negative integer.
# Here we raise if n is definitely not a non-negative integer
# but otherwise we can leave this as an unevaluated MatPow.
if exp.is_integer is False or exp.is_nonnegative is False:
raise
from sympy.matrices.expressions import MatPow
return MatPow(a, exp)
@call_highest_priority('__add__')
def __radd__(self, other):
return self + other
@call_highest_priority('__matmul__')
def __rmatmul__(self, other):
other = _matrixify(other)
if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):
return NotImplemented
return self.__rmul__(other)
@call_highest_priority('__mul__')
def __rmul__(self, other):
other = _matrixify(other)
# matrix-like objects can have shapes. This is
# our first sanity check.
if hasattr(other, 'shape') and len(other.shape) == 2:
if self.shape[0] != other.shape[1]:
raise ShapeError("Matrix size mismatch.")
# honest sympy matrices defer to their class's routine
if getattr(other, 'is_Matrix', False):
return other._new(other.as_mutable() * self)
# Matrix-like objects can be passed to CommonMatrix routines directly.
if getattr(other, 'is_MatrixLike', False):
return MatrixArithmetic._eval_matrix_rmul(self, other)
# if 'other' is not iterable then scalar multiplication.
if not isinstance(other, Iterable):
try:
return self._eval_scalar_rmul(other)
except TypeError:
pass
return NotImplemented
@call_highest_priority('__sub__')
def __rsub__(self, a):
return (-self) + a
@call_highest_priority('__rsub__')
def __sub__(self, a):
return self + (-a)
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self.__div__(other)
class MatrixCommon(MatrixArithmetic, MatrixOperations, MatrixProperties,
MatrixSpecial, MatrixShaping):
"""All common matrix operations including basic arithmetic, shaping,
and special matrices like `zeros`, and `eye`."""
_diff_wrt = True # type: bool
class _MinimalMatrix(object):
"""Class providing the minimum functionality
for a matrix-like object and implementing every method
required for a `MatrixRequired`. This class does not have everything
needed to become a full-fledged SymPy object, but it will satisfy the
requirements of anything inheriting from `MatrixRequired`. If you wish
to make a specialized matrix type, make sure to implement these
methods and properties with the exception of `__init__` and `__repr__`
which are included for convenience."""
is_MatrixLike = True
_sympify = staticmethod(sympify)
_class_priority = 3
zero = S.Zero
one = S.One
is_Matrix = True
is_MatrixExpr = False
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __init__(self, rows, cols=None, mat=None):
if isfunction(mat):
# if we passed in a function, use that to populate the indices
mat = list(mat(i, j) for i in range(rows) for j in range(cols))
if cols is None and mat is None:
mat = rows
rows, cols = getattr(mat, 'shape', (rows, cols))
try:
# if we passed in a list of lists, flatten it and set the size
if cols is None and mat is None:
mat = rows
cols = len(mat[0])
rows = len(mat)
mat = [x for l in mat for x in l]
except (IndexError, TypeError):
pass
self.mat = tuple(self._sympify(x) for x in mat)
self.rows, self.cols = rows, cols
if self.rows is None or self.cols is None:
raise NotImplementedError("Cannot initialize matrix with given parameters")
def __getitem__(self, key):
def _normalize_slices(row_slice, col_slice):
"""Ensure that row_slice and col_slice don't have
`None` in their arguments. Any integers are converted
to slices of length 1"""
if not isinstance(row_slice, slice):
row_slice = slice(row_slice, row_slice + 1, None)
row_slice = slice(*row_slice.indices(self.rows))
if not isinstance(col_slice, slice):
col_slice = slice(col_slice, col_slice + 1, None)
col_slice = slice(*col_slice.indices(self.cols))
return (row_slice, col_slice)
def _coord_to_index(i, j):
"""Return the index in _mat corresponding
to the (i,j) position in the matrix. """
return i * self.cols + j
if isinstance(key, tuple):
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
# if the coordinates are not slices, make them so
# and expand the slices so they don't contain `None`
i, j = _normalize_slices(i, j)
rowsList, colsList = list(range(self.rows))[i], \
list(range(self.cols))[j]
indices = (i * self.cols + j for i in rowsList for j in
colsList)
return self._new(len(rowsList), len(colsList),
list(self.mat[i] for i in indices))
# if the key is a tuple of ints, change
# it to an array index
key = _coord_to_index(i, j)
return self.mat[key]
def __eq__(self, other):
try:
classof(self, other)
except TypeError:
return False
return (
self.shape == other.shape and list(self) == list(other))
def __len__(self):
return self.rows*self.cols
def __repr__(self):
return "_MinimalMatrix({}, {}, {})".format(self.rows, self.cols,
self.mat)
@property
def shape(self):
return (self.rows, self.cols)
class _CastableMatrix: # this is needed here ONLY FOR TESTS.
def as_mutable(self):
return self
def as_immutable(self):
return self
class _MatrixWrapper(object):
"""Wrapper class providing the minimum functionality for a matrix-like
object: .rows, .cols, .shape, indexability, and iterability. CommonMatrix
math operations should work on matrix-like objects. This one is intended for
matrix-like objects which use the same indexing format as SymPy with respect
to returning matrix elements instead of rows for non-tuple indexes.
"""
is_Matrix = False # needs to be here because of __getattr__
is_MatrixLike = True
def __init__(self, mat, shape):
self.mat = mat
self.shape = shape
self.rows, self.cols = shape
def __getitem__(self, key):
if isinstance(key, tuple):
return sympify(self.mat.__getitem__(key))
return sympify(self.mat.__getitem__((key // self.rows, key % self.cols)))
def __iter__(self): # supports numpy.matrix and numpy.array
mat = self.mat
cols = self.cols
return iter(sympify(mat[r, c]) for r in range(self.rows) for c in range(cols))
def _matrixify(mat):
"""If `mat` is a Matrix or is matrix-like,
return a Matrix or MatrixWrapper object. Otherwise
`mat` is passed through without modification."""
if getattr(mat, 'is_Matrix', False) or getattr(mat, 'is_MatrixLike', False):
return mat
shape = None
if hasattr(mat, 'shape'): # numpy, scipy.sparse
if len(mat.shape) == 2:
shape = mat.shape
elif hasattr(mat, 'rows') and hasattr(mat, 'cols'): # mpmath
shape = (mat.rows, mat.cols)
if shape:
return _MatrixWrapper(mat, shape)
return mat
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if type(j) is not int:
jindex = getattr(j, '__index__', None)
if jindex is not None:
j = jindex()
else:
raise IndexError("Invalid index a[%r]" % (j,))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j,))
return int(j)
def classof(A, B):
"""
Get the type of the result when combining matrices of different types.
Currently the strategy is that immutability is contagious.
Examples
========
>>> from sympy import Matrix, ImmutableMatrix
>>> from sympy.matrices.common import classof
>>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix
>>> IM = ImmutableMatrix([[1, 2], [3, 4]])
>>> classof(M, IM)
<class 'sympy.matrices.immutable.ImmutableDenseMatrix'>
"""
priority_A = getattr(A, '_class_priority', None)
priority_B = getattr(B, '_class_priority', None)
if None not in (priority_A, priority_B):
if A._class_priority > B._class_priority:
return A.__class__
else:
return B.__class__
try:
import numpy
except ImportError:
pass
else:
if isinstance(A, numpy.ndarray):
return B.__class__
if isinstance(B, numpy.ndarray):
return A.__class__
raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
|
513d1f5007887289acb91ae2971bd6e4b3cbb55e30e729ae583342ab2f3ed630 | from __future__ import division, print_function
import random
from sympy.core import SympifyError, Add
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence, reduce
from sympy.core.expr import Expr
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.matrices.common import \
a2idx, classof, ShapeError
from sympy.matrices.matrices import MatrixBase
from sympy.simplify.simplify import simplify as _simplify
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.misc import filldedent
from .decompositions import _cholesky, _LDLdecomposition
from .solvers import _lower_triangular_solve, _upper_triangular_solve
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
def _compare_sequence(a, b):
"""Compares the elements of a list/tuple `a`
and a list/tuple `b`. `_compare_sequence((1,2), [1, 2])`
is True, whereas `(1,2) == [1, 2]` is False"""
if type(a) is type(b):
# if they are the same type, compare directly
return a == b
# there is no overhead for calling `tuple` on a
# tuple
return tuple(a) == tuple(b)
class DenseMatrix(MatrixBase):
is_MatrixExpr = False # type: bool
_op_priority = 10.01
_class_priority = 4
def __eq__(self, other):
other = sympify(other)
self_shape = getattr(self, 'shape', None)
other_shape = getattr(other, 'shape', None)
if None in (self_shape, other_shape):
return False
if self_shape != other_shape:
return False
if isinstance(other, Matrix):
return _compare_sequence(self._mat, other._mat)
elif isinstance(other, MatrixBase):
return _compare_sequence(self._mat, Matrix(other)._mat)
def __getitem__(self, key):
"""Return portion of self defined by key. If the key involves a slice
then a list will be returned (if key is a single slice) or a matrix
(if key was a tuple involving a slice).
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix([
... [1, 2 + I],
... [3, 4 ]])
If the key is a tuple that doesn't involve a slice then that element
is returned:
>>> m[1, 0]
3
When a tuple key involves a slice, a matrix is returned. Here, the
first column is selected (all rows, column 0):
>>> m[:, 0]
Matrix([
[1],
[3]])
If the slice is not a tuple then it selects from the underlying
list of elements that are arranged in row order and a list is
returned if a slice is involved:
>>> m[0]
1
>>> m[::2]
[1, 3]
"""
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._mat[i*self.cols + j]
except (TypeError, IndexError):
if (isinstance(i, Expr) and not i.is_number) or (isinstance(j, Expr) and not j.is_number):
if ((j < 0) is True) or ((j >= self.shape[1]) is True) or\
((i < 0) is True) or ((i >= self.shape[0]) is True):
raise ValueError("index out of boundary")
from sympy.matrices.expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
if isinstance(i, slice):
i = range(self.rows)[i]
elif is_sequence(i):
pass
else:
i = [i]
if isinstance(j, slice):
j = range(self.cols)[j]
elif is_sequence(j):
pass
else:
j = [j]
return self.extract(i, j)
else:
# row-wise decomposition of matrix
if isinstance(key, slice):
return self._mat[key]
return self._mat[a2idx(key)]
def __setitem__(self, key, value):
raise NotImplementedError()
def _eval_add(self, other):
# we assume both arguments are dense matrices since
# sparse matrices have a higher priority
mat = [a + b for a,b in zip(self._mat, other._mat)]
return classof(self, other)._new(self.rows, self.cols, mat, copy=False)
def _eval_extract(self, rowsList, colsList):
mat = self._mat
cols = self.cols
indices = (i * cols + j for i in rowsList for j in colsList)
return self._new(len(rowsList), len(colsList),
list(mat[i] for i in indices), copy=False)
def _eval_matrix_mul(self, other):
other_len = other.rows*other.cols
new_len = self.rows*other.cols
new_mat = [self.zero]*new_len
# if we multiply an n x 0 with a 0 x m, the
# expected behavior is to produce an n x m matrix of zeros
if self.cols != 0 and other.rows != 0:
self_cols = self.cols
mat = self._mat
other_mat = other._mat
for i in range(new_len):
row, col = i // other.cols, i % other.cols
row_indices = range(self_cols*row, self_cols*(row+1))
col_indices = range(col, other_len, other.cols)
vec = [mat[a]*other_mat[b] for a, b in zip(row_indices, col_indices)]
try:
new_mat[i] = Add(*vec)
except (TypeError, SympifyError):
# Some matrices don't work with `sum` or `Add`
# They don't work with `sum` because `sum` tries to add `0`
# Fall back to a safe way to multiply if the `Add` fails.
new_mat[i] = reduce(lambda a, b: a + b, vec)
return classof(self, other)._new(self.rows, other.cols, new_mat, copy=False)
def _eval_matrix_mul_elementwise(self, other):
mat = [a*b for a,b in zip(self._mat, other._mat)]
return classof(self, other)._new(self.rows, self.cols, mat, copy=False)
def _eval_inverse(self, **kwargs):
return self.inv(method=kwargs.get('method', 'GE'),
iszerofunc=kwargs.get('iszerofunc', _iszero),
try_block_diag=kwargs.get('try_block_diag', False))
def _eval_scalar_mul(self, other):
mat = [other*a for a in self._mat]
return self._new(self.rows, self.cols, mat, copy=False)
def _eval_scalar_rmul(self, other):
mat = [a*other for a in self._mat]
return self._new(self.rows, self.cols, mat, copy=False)
def _eval_tolist(self):
mat = list(self._mat)
cols = self.cols
return [mat[i*cols:(i + 1)*cols] for i in range(self.rows)]
def as_immutable(self):
"""Returns an Immutable version of this Matrix
"""
from .immutable import ImmutableDenseMatrix as cls
if self.rows and self.cols:
return cls._new(self.tolist())
return cls._new(self.rows, self.cols, [])
def as_mutable(self):
"""Returns a mutable version of this matrix
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return Matrix(self)
def equals(self, other, failing_expression=False):
"""Applies ``equals`` to corresponding elements of the matrices,
trying to prove that the elements are equivalent, returning True
if they are, False if any pair is not, and None (or the first
failing expression if failing_expression is True) if it cannot
be decided if the expressions are equivalent or not. This is, in
general, an expensive operation.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x
>>> from sympy import cos
>>> A = Matrix([x*(x - 1), 0])
>>> B = Matrix([x**2 - x, 0])
>>> A == B
False
>>> A.simplify() == B.simplify()
True
>>> A.equals(B)
True
>>> A.equals(2)
False
See Also
========
sympy.core.expr.Expr.equals
"""
self_shape = getattr(self, 'shape', None)
other_shape = getattr(other, 'shape', None)
if None in (self_shape, other_shape):
return False
if self_shape != other_shape:
return False
rv = True
for i in range(self.rows):
for j in range(self.cols):
ans = self[i, j].equals(other[i, j], failing_expression)
if ans is False:
return False
elif ans is not True and rv is True:
rv = ans
return rv
def cholesky(self, hermitian=True):
return _cholesky(self, hermitian=hermitian)
def LDLdecomposition(self, hermitian=True):
return _LDLdecomposition(self, hermitian=hermitian)
def lower_triangular_solve(self, rhs):
return _lower_triangular_solve(self, rhs)
def upper_triangular_solve(self, rhs):
return _upper_triangular_solve(self, rhs)
cholesky.__doc__ = _cholesky.__doc__
LDLdecomposition.__doc__ = _LDLdecomposition.__doc__
lower_triangular_solve.__doc__ = _lower_triangular_solve.__doc__
upper_triangular_solve.__doc__ = _upper_triangular_solve.__doc__
def _force_mutable(x):
"""Return a matrix as a Matrix, otherwise return x."""
if getattr(x, 'is_Matrix', False):
return x.as_mutable()
elif isinstance(x, Basic):
return x
elif hasattr(x, '__array__'):
a = x.__array__()
if len(a.shape) == 0:
return sympify(a)
return Matrix(x)
return x
class MutableDenseMatrix(DenseMatrix, MatrixBase):
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
@classmethod
def _new(cls, *args, **kwargs):
# if the `copy` flag is set to False, the input
# was rows, cols, [list]. It should be used directly
# without creating a copy.
if kwargs.get('copy', True) is False:
if len(args) != 3:
raise TypeError("'copy=False' requires a matrix be initialized as rows,cols,[list]")
rows, cols, flat_list = args
else:
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
flat_list = list(flat_list) # create a shallow copy
self = object.__new__(cls)
self.rows = rows
self.cols = cols
self._mat = flat_list
return self
def __setitem__(self, key, value):
"""
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
self._mat[i*self.cols + j] = value
def as_mutable(self):
return self.copy()
def col_del(self, i):
"""Delete the given column.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_del(1)
>>> M
Matrix([
[1, 0],
[0, 0],
[0, 1]])
See Also
========
col
row_del
"""
if i < -self.cols or i >= self.cols:
raise IndexError("Index out of range: 'i=%s', valid -%s <= i < %s"
% (i, self.cols, self.cols))
for j in range(self.rows - 1, -1, -1):
del self._mat[i + j*self.cols]
self.cols -= 1
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i).
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[1, 2, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
col
row_op
"""
self._mat[j::self.cols] = [f(*t) for t in list(zip(self._mat[j::self.cols], list(range(self.rows))))]
def col_swap(self, i, j):
"""Swap the two given columns of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[1, 0], [1, 0]])
>>> M
Matrix([
[1, 0],
[1, 0]])
>>> M.col_swap(0, 1)
>>> M
Matrix([
[0, 1],
[0, 1]])
See Also
========
col
row_swap
"""
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def copyin_list(self, key, value):
"""Copy in elements from a list.
Parameters
==========
key : slice
The section of this matrix to replace.
value : iterable
The iterable to copy values from.
Examples
========
>>> from sympy.matrices import eye
>>> I = eye(3)
>>> I[:2, 0] = [1, 2] # col
>>> I
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
>>> I[1, :2] = [[3, 4]]
>>> I
Matrix([
[1, 0, 0],
[3, 4, 0],
[0, 0, 1]])
See Also
========
copyin_matrix
"""
if not is_sequence(value):
raise TypeError("`value` must be an ordered iterable, not %s." % type(value))
return self.copyin_matrix(key, Matrix(value))
def copyin_matrix(self, key, value):
"""Copy in values from a matrix into the given bounds.
Parameters
==========
key : slice
The section of this matrix to replace.
value : Matrix
The matrix to copy values from.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> M = Matrix([[0, 1], [2, 3], [4, 5]])
>>> I = eye(3)
>>> I[:3, :2] = M
>>> I
Matrix([
[0, 1, 0],
[2, 3, 0],
[4, 5, 1]])
>>> I[0, 1] = M
>>> I
Matrix([
[0, 0, 1],
[2, 2, 3],
[4, 4, 5]])
See Also
========
copyin_list
"""
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(filldedent("The Matrix `value` doesn't have the "
"same dimensions "
"as the in sub-Matrix given by `key`."))
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
def fill(self, value):
"""Fill the matrix with the scalar value.
See Also
========
zeros
ones
"""
self._mat = [value]*len(self)
def row_del(self, i):
"""Delete the given row.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_del(1)
>>> M
Matrix([
[1, 0, 0],
[0, 0, 1]])
See Also
========
row
col_del
"""
if i < -self.rows or i >= self.rows:
raise IndexError("Index out of range: 'i = %s', valid -%s <= i"
" < %s" % (i, self.rows, self.rows))
if i < 0:
i += self.rows
del self._mat[i*self.cols:(i+1)*self.cols]
self.rows -= 1
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
zip_row_op
col_op
"""
i0 = i*self.cols
ri = self._mat[i0: i0 + self.cols]
self._mat[i0: i0 + self.cols] = [f(x, j) for x, j in zip(ri, list(range(self.cols)))]
def row_swap(self, i, j):
"""Swap the two given rows of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[0, 1], [1, 0]])
>>> M
Matrix([
[0, 1],
[1, 0]])
>>> M.row_swap(0, 1)
>>> M
Matrix([
[1, 0],
[0, 1]])
See Also
========
row
col_swap
"""
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def simplify(self, **kwargs):
"""Applies simplify to the elements of a matrix in place.
This is a shortcut for M.applyfunc(lambda x: simplify(x, ratio, measure))
See Also
========
sympy.simplify.simplify.simplify
"""
for i in range(len(self._mat)):
self._mat[i] = _simplify(self._mat[i], **kwargs)
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
row_op
col_op
"""
i0 = i*self.cols
k0 = k*self.cols
ri = self._mat[i0: i0 + self.cols]
rk = self._mat[k0: k0 + self.cols]
self._mat[i0: i0 + self.cols] = [f(x, y) for x, y in zip(ri, rk)]
is_zero = False
MutableMatrix = Matrix = MutableDenseMatrix
###########
# Numpy Utility Functions:
# list2numpy, matrix2numpy, symmarray, rot_axis[123]
###########
def list2numpy(l, dtype=object): # pragma: no cover
"""Converts python list of SymPy expressions to a NumPy array.
See Also
========
matrix2numpy
"""
from numpy import empty
a = empty(len(l), dtype)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m, dtype=object): # pragma: no cover
"""Converts SymPy's matrix to a NumPy array.
See Also
========
list2numpy
"""
from numpy import empty
a = empty(m.shape, dtype)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
def rot_axis3(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 3-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis3
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis3(theta)
Matrix([
[ 1/2, sqrt(3)/2, 0],
[-sqrt(3)/2, 1/2, 0],
[ 0, 0, 1]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis3(pi/2)
Matrix([
[ 0, 1, 0],
[-1, 0, 0],
[ 0, 0, 1]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, st, 0),
(-st, ct, 0),
(0, 0, 1))
return Matrix(lil)
def rot_axis2(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 2-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis2
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis2(theta)
Matrix([
[ 1/2, 0, -sqrt(3)/2],
[ 0, 1, 0],
[sqrt(3)/2, 0, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis2(pi/2)
Matrix([
[0, 0, -1],
[0, 1, 0],
[1, 0, 0]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, 0, -st),
(0, 1, 0),
(st, 0, ct))
return Matrix(lil)
def rot_axis1(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 1-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis1
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis1(theta)
Matrix([
[1, 0, 0],
[0, 1/2, sqrt(3)/2],
[0, -sqrt(3)/2, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis1(pi/2)
Matrix([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0]])
See Also
========
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((1, 0, 0),
(0, ct, st),
(0, -st, ct))
return Matrix(lil)
@doctest_depends_on(modules=('numpy',))
def symarray(prefix, shape, **kwargs): # pragma: no cover
r"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named ``prefix_i1_i2_``... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as SymPy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
\*\*kwargs : dict
keyword arguments passed on to Symbol
Examples
========
These doctests require numpy.
>>> from sympy import symarray
>>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>>> a = symarray('', 3)
>>> b = symarray('', 3)
>>> a[0] == b[0]
True
>>> a = symarray('a', 3)
>>> b = symarray('b', 3)
>>> a[0] == b[0]
False
Creating symarrays with a prefix:
>>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>>> symarray('a', (2, 3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>>> symarray('a', (2, 3, 2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
For setting assumptions of the underlying Symbols:
>>> [s.is_real for s in symarray('a', 2, real=True)]
[True, True]
"""
from numpy import empty, ndindex
arr = empty(shape, dtype=object)
for index in ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))),
**kwargs)
return arr
###############
# Functions
###############
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant::
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis:
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
seqs = list(map(sympify, seqs))
if not zero:
f = lambda i, j: seqs[j].subs(n, n + i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
def eye(*args, **kwargs):
"""Create square identity matrix n x n
See Also
========
diag
zeros
ones
"""
return Matrix.eye(*args, **kwargs)
def diag(*values, **kwargs):
"""Returns a matrix with the provided values placed on the
diagonal. If non-square matrices are included, they will
produce a block-diagonal matrix.
Examples
========
This version of diag is a thin wrapper to Matrix.diag that differs
in that it treats all lists like matrices -- even when a single list
is given. If this is not desired, either put a `*` before the list or
set `unpack=True`.
>>> from sympy import diag
>>> diag([1, 2, 3], unpack=True) # = diag(1,2,3) or diag(*[1,2,3])
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> diag([1, 2, 3]) # a column vector
Matrix([
[1],
[2],
[3]])
See Also
========
.common.MatrixCommon.eye
.common.MatrixCommon.diagonal - to extract a diagonal
.common.MatrixCommon.diag
.expressions.blockmatrix.BlockMatrix
"""
# Extract any setting so we don't duplicate keywords sent
# as named parameters:
kw = kwargs.copy()
strict = kw.pop('strict', True) # lists will be converted to Matrices
unpack = kw.pop('unpack', False)
return Matrix.diag(*values, strict=strict, unpack=unpack, **kw)
def GramSchmidt(vlist, orthonormal=False):
"""Apply the Gram-Schmidt process to a set of vectors.
Parameters
==========
vlist : List of Matrix
Vectors to be orthogonalized for.
orthonormal : Bool, optional
If true, return an orthonormal basis.
Returns
=======
vlist : List of Matrix
Orthogonalized vectors
Notes
=====
This routine is mostly duplicate from ``Matrix.orthogonalize``,
except for some difference that this always raises error when
linearly dependent vectors are found, and the keyword ``normalize``
has been named as ``orthonormal`` in this function.
See Also
========
.matrices.MatrixSubspaces.orthogonalize
References
==========
.. [1] https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
"""
return MutableDenseMatrix.orthogonalize(
*vlist, normalize=orthonormal, rankcheck=True
)
def hessian(f, varlist, constraints=[]):
"""Compute Hessian matrix for a function f wrt parameters in varlist
which may be given as a sequence or a row/column vector. A list of
constraints may optionally be given.
Examples
========
>>> from sympy import Function, hessian, pprint
>>> from sympy.abc import x, y
>>> f = Function('f')(x, y)
>>> g1 = Function('g')(x, y)
>>> g2 = x**2 + 3*y
>>> pprint(hessian(f, (x, y), [g1, g2]))
[ d d ]
[ 0 0 --(g(x, y)) --(g(x, y)) ]
[ dx dy ]
[ ]
[ 0 0 2*x 3 ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 2*x ---(f(x, y)) -----(f(x, y))]
[dx 2 dy dx ]
[ dx ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 3 -----(f(x, y)) ---(f(x, y)) ]
[dy dy dx 2 ]
[ dy ]
References
==========
https://en.wikipedia.org/wiki/Hessian_matrix
See Also
========
sympy.matrices.matrices.MatrixCalculus.jacobian
wronskian
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, MatrixBase):
if 1 not in varlist.shape:
raise ShapeError("`varlist` must be a column or row vector.")
if varlist.cols == 1:
varlist = varlist.T
varlist = varlist.tolist()[0]
if is_sequence(varlist):
n = len(varlist)
if not n:
raise ShapeError("`len(varlist)` must not be zero.")
else:
raise ValueError("Improper variable list in hessian function")
if not getattr(f, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
m = len(constraints)
N = m + n
out = zeros(N)
for k, g in enumerate(constraints):
if not getattr(g, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
for i in range(n):
out[k, i + m] = g.diff(varlist[i])
for i in range(n):
for j in range(i, n):
out[i + m, j + m] = f.diff(varlist[i]).diff(varlist[j])
for i in range(N):
for j in range(i + 1, N):
out[j, i] = out[i, j]
return out
def jordan_cell(eigenval, n):
"""
Create a Jordan block:
Examples
========
>>> from sympy.matrices import jordan_cell
>>> from sympy.abc import x
>>> jordan_cell(x, 4)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
"""
return Matrix.jordan_block(size=n, eigenvalue=eigenval)
def matrix_multiply_elementwise(A, B):
"""Return the Hadamard product (elementwise product) of A and B
>>> from sympy.matrices import matrix_multiply_elementwise
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> matrix_multiply_elementwise(A, B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
sympy.matrices.common.MatrixCommon.__mul__
"""
return A.multiply_elementwise(B)
def ones(*args, **kwargs):
"""Returns a matrix of ones with ``rows`` rows and ``cols`` columns;
if ``cols`` is omitted a square matrix will be returned.
See Also
========
zeros
eye
diag
"""
if 'c' in kwargs:
kwargs['cols'] = kwargs.pop('c')
return Matrix.ones(*args, **kwargs)
def randMatrix(r, c=None, min=0, max=99, seed=None, symmetric=False,
percent=100, prng=None):
"""Create random matrix with dimensions ``r`` x ``c``. If ``c`` is omitted
the matrix will be square. If ``symmetric`` is True the matrix must be
square. If ``percent`` is less than 100 then only approximately the given
percentage of elements will be non-zero.
The pseudo-random number generator used to generate matrix is chosen in the
following way.
* If ``prng`` is supplied, it will be used as random number generator.
It should be an instance of ``random.Random``, or at least have
``randint`` and ``shuffle`` methods with same signatures.
* if ``prng`` is not supplied but ``seed`` is supplied, then new
``random.Random`` with given ``seed`` will be created;
* otherwise, a new ``random.Random`` with default seed will be used.
Examples
========
>>> from sympy.matrices import randMatrix
>>> randMatrix(3) # doctest:+SKIP
[25, 45, 27]
[44, 54, 9]
[23, 96, 46]
>>> randMatrix(3, 2) # doctest:+SKIP
[87, 29]
[23, 37]
[90, 26]
>>> randMatrix(3, 3, 0, 2) # doctest:+SKIP
[0, 2, 0]
[2, 0, 1]
[0, 0, 1]
>>> randMatrix(3, symmetric=True) # doctest:+SKIP
[85, 26, 29]
[26, 71, 43]
[29, 43, 57]
>>> A = randMatrix(3, seed=1)
>>> B = randMatrix(3, seed=2)
>>> A == B
False
>>> A == randMatrix(3, seed=1)
True
>>> randMatrix(3, symmetric=True, percent=50) # doctest:+SKIP
[77, 70, 0],
[70, 0, 0],
[ 0, 0, 88]
"""
if c is None:
c = r
# Note that ``Random()`` is equivalent to ``Random(None)``
prng = prng or random.Random(seed)
if not symmetric:
m = Matrix._new(r, c, lambda i, j: prng.randint(min, max))
if percent == 100:
return m
z = int(r*c*(100 - percent) // 100)
m._mat[:z] = [S.Zero]*z
prng.shuffle(m._mat)
return m
# Symmetric case
if r != c:
raise ValueError('For symmetric matrices, r must equal c, but %i != %i' % (r, c))
m = zeros(r)
ij = [(i, j) for i in range(r) for j in range(i, r)]
if percent != 100:
ij = prng.sample(ij, int(len(ij)*percent // 100))
for i, j in ij:
value = prng.randint(min, max)
m[i, j] = m[j, i] = value
return m
def wronskian(functions, var, method='bareiss'):
"""
Compute Wronskian for [] of functions
::
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1, ..., fn) = | . . . . |
| . . . . |
| (n) (n) (n) |
| D (f1) D (f2) ... D (fn) |
see: https://en.wikipedia.org/wiki/Wronskian
See Also
========
sympy.matrices.matrices.MatrixCalculus.jacobian
hessian
"""
for index in range(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i, j: functions[i].diff(var, j))
return W.det(method)
def zeros(*args, **kwargs):
"""Returns a matrix of zeros with ``rows`` rows and ``cols`` columns;
if ``cols`` is omitted a square matrix will be returned.
See Also
========
ones
eye
diag
"""
if 'c' in kwargs:
kwargs['cols'] = kwargs.pop('c')
return Matrix.zeros(*args, **kwargs)
|
fbba7eea468d787601e2343ffeedcdf4ea7b45911de29c4d0fb9775a72f4f155 | from __future__ import division, print_function
from sympy.core.function import expand_mul
from sympy.core.symbol import Dummy, _uniquely_named_symbol, symbols
from sympy.utilities.iterables import numbered_symbols
from .common import ShapeError, NonSquareMatrixError, NonInvertibleMatrixError
from .utilities import _get_intermediate_simp, _iszero
def _diagonal_solve(M, rhs):
"""Solves ``Ax = B`` efficiently, where A is a diagonal Matrix,
with non-zero diagonal entries.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.diagonal_solve(B) == B/2
True
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not M.is_diagonal():
raise TypeError("Matrix should be diagonal")
if rhs.rows != M.rows:
raise TypeError("Size mis-match")
return M._new(
rhs.rows, rhs.cols, lambda i, j: rhs[i, j] / M[i, i])
def _lower_triangular_solve(M, rhs):
"""Solves ``Ax = B``, where A is a lower triangular matrix.
See Also
========
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != M.rows:
raise ShapeError("Matrices size mismatch.")
if not M.is_lower:
raise ValueError("Matrix must be lower triangular.")
dps = _get_intermediate_simp()
X = MutableDenseMatrix.zeros(M.rows, rhs.cols)
for j in range(rhs.cols):
for i in range(M.rows):
if M[i, i] == 0:
raise TypeError("Matrix must be non-singular.")
X[i, j] = dps((rhs[i, j] - sum(M[i, k]*X[k, j]
for k in range(i))) / M[i, i])
return M._new(X)
def _lower_triangular_solve_sparse(M, rhs):
"""Solves ``Ax = B``, where A is a lower triangular matrix.
See Also
========
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != M.rows:
raise ShapeError("Matrices size mismatch.")
if not M.is_lower:
raise ValueError("Matrix must be lower triangular.")
dps = _get_intermediate_simp()
rows = [[] for i in range(M.rows)]
for i, j, v in M.row_list():
if i > j:
rows[i].append((j, v))
X = rhs.as_mutable()
for j in range(rhs.cols):
for i in range(rhs.rows):
for u, v in rows[i]:
X[i, j] -= v*X[u, j]
X[i, j] = dps(X[i, j] / M[i, i])
return M._new(X)
def _upper_triangular_solve(M, rhs):
"""Solves ``Ax = B``, where A is an upper triangular matrix.
See Also
========
lower_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != M.rows:
raise ShapeError("Matrix size mismatch.")
if not M.is_upper:
raise TypeError("Matrix is not upper triangular.")
dps = _get_intermediate_simp()
X = MutableDenseMatrix.zeros(M.rows, rhs.cols)
for j in range(rhs.cols):
for i in reversed(range(M.rows)):
if M[i, i] == 0:
raise ValueError("Matrix must be non-singular.")
X[i, j] = dps((rhs[i, j] - sum(M[i, k]*X[k, j]
for k in range(i + 1, M.rows))) / M[i, i])
return M._new(X)
def _upper_triangular_solve_sparse(M, rhs):
"""Solves ``Ax = B``, where A is an upper triangular matrix.
See Also
========
lower_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != M.rows:
raise ShapeError("Matrix size mismatch.")
if not M.is_upper:
raise TypeError("Matrix is not upper triangular.")
dps = _get_intermediate_simp()
rows = [[] for i in range(M.rows)]
for i, j, v in M.row_list():
if i < j:
rows[i].append((j, v))
X = rhs.as_mutable()
for j in range(rhs.cols):
for i in reversed(range(rhs.rows)):
for u, v in reversed(rows[i]):
X[i, j] -= v*X[u, j]
X[i, j] = dps(X[i, j] / M[i, i])
return M._new(X)
def _cholesky_solve(M, rhs):
"""Solves ``Ax = B`` using Cholesky decomposition,
for a general square non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if M.rows < M.cols:
raise NotImplementedError(
'Under-determined System. Try M.gauss_jordan_solve(rhs)')
hermitian = True
reform = False
if M.is_symmetric():
hermitian = False
elif not M.is_hermitian:
reform = True
if reform or M.is_positive_definite is False:
H = M.H
M = H.multiply(M)
rhs = H.multiply(rhs)
hermitian = not M.is_symmetric()
L = M.cholesky(hermitian=hermitian)
Y = L.lower_triangular_solve(rhs)
if hermitian:
return (L.H).upper_triangular_solve(Y)
else:
return (L.T).upper_triangular_solve(Y)
def _LDLsolve(M, rhs):
"""Solves ``Ax = B`` using LDL decomposition,
for a general square and non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.LDLsolve(B) == B/2
True
See Also
========
sympy.matrices.dense.DenseMatrix.LDLdecomposition
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LUsolve
QRsolve
pinv_solve
"""
if M.rows < M.cols:
raise NotImplementedError(
'Under-determined System. Try M.gauss_jordan_solve(rhs)')
hermitian = True
reform = False
if M.is_symmetric():
hermitian = False
elif not M.is_hermitian:
reform = True
if reform or M.is_positive_definite is False:
H = M.H
M = H.multiply(M)
rhs = H.multiply(rhs)
hermitian = not M.is_symmetric()
L, D = M.LDLdecomposition(hermitian=hermitian)
Y = L.lower_triangular_solve(rhs)
Z = D.diagonal_solve(Y)
if hermitian:
return (L.H).upper_triangular_solve(Z)
else:
return (L.T).upper_triangular_solve(Z)
def _LUsolve(M, rhs, iszerofunc=_iszero):
"""Solve the linear system ``Ax = rhs`` for ``x`` where ``A = M``.
This is for symbolic matrices, for real or complex ones use
mpmath.lu_solve or mpmath.qr_solve.
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
QRsolve
pinv_solve
LUdecomposition
"""
if rhs.rows != M.rows:
raise ShapeError(
"``M`` and ``rhs`` must have the same number of rows.")
m = M.rows
n = M.cols
if m < n:
raise NotImplementedError("Underdetermined systems not supported.")
try:
A, perm = M.LUdecomposition_Simple(
iszerofunc=_iszero, rankcheck=True)
except ValueError:
raise NotImplementedError("Underdetermined systems not supported.")
dps = _get_intermediate_simp()
b = rhs.permute_rows(perm).as_mutable()
# forward substitution, all diag entries are scaled to 1
for i in range(m):
for j in range(min(i, n)):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: dps(x - y * scale))
# consistency check for overdetermined systems
if m > n:
for i in range(n, m):
for j in range(b.cols):
if not iszerofunc(b[i, j]):
raise ValueError("The system is inconsistent.")
b = b[0:n, :] # truncate zero rows if consistent
# backward substitution
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: dps(x - y * scale))
scale = A[i, i]
b.row_op(i, lambda x, _: dps(x / scale))
return rhs.__class__(b)
def _QRsolve(M, b):
"""Solve the linear system ``Ax = b``.
``M`` is the matrix ``A``, the method argument is the vector
``b``. The method returns the solution vector ``x``. If ``b`` is a
matrix, the system is solved for each column of ``b`` and the
return value is a matrix of the same shape as ``b``.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use mpmath.qr_solve.
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
pinv_solve
QRdecomposition
"""
dps = _get_intermediate_simp(expand_mul, expand_mul)
Q, R = M.QRdecomposition()
y = Q.T * b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n - 1, -1, -1):
tmp = y[j, :]
for k in range(j + 1, n):
tmp -= R[j, k] * x[n - 1 - k]
tmp = dps(tmp)
x.append(tmp / R[j, j])
return M._new([row._mat for row in reversed(x)])
def _gauss_jordan_solve(M, B, freevar=False):
"""
Solves ``Ax = B`` using Gauss Jordan elimination.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, it will
be returned parametrically. If no solutions exist, It will throw
ValueError.
Parameters
==========
B : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
freevar : List
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of arbitrary
values of free variables. Then the index of the free variables
in the solutions (column Matrix) will be returned by freevar, if
the flag `freevar` is set to `True`.
Returns
=======
x : Matrix
The matrix that will satisfy ``Ax = B``. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
params : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of arbitrary
parameters. These arbitrary parameters are returned as params
Matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 1, 1], [1, 2, 2, -1], [2, 4, 0, 6]])
>>> B = Matrix([7, 12, 4])
>>> sol, params = A.gauss_jordan_solve(B)
>>> sol
Matrix([
[-2*tau0 - 3*tau1 + 2],
[ tau0],
[ 2*tau1 + 5],
[ tau1]])
>>> params
Matrix([
[tau0],
[tau1]])
>>> taus_zeroes = { tau:0 for tau in params }
>>> sol_unique = sol.xreplace(taus_zeroes)
>>> sol_unique
Matrix([
[2],
[0],
[5],
[0]])
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> B = Matrix([3, 6, 9])
>>> sol, params = A.gauss_jordan_solve(B)
>>> sol
Matrix([
[-1],
[ 2],
[ 0]])
>>> params
Matrix(0, 1, [])
>>> A = Matrix([[2, -7], [-1, 4]])
>>> B = Matrix([[-21, 3], [12, -2]])
>>> sol, params = A.gauss_jordan_solve(B)
>>> sol
Matrix([
[0, -2],
[3, -1]])
>>> params
Matrix(0, 2, [])
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
References
==========
.. [1] https://en.wikipedia.org/wiki/Gaussian_elimination
"""
from sympy.matrices import Matrix, zeros
cls = M.__class__
aug = M.hstack(M.copy(), B.copy())
B_cols = B.cols
row, col = aug[:, :-B_cols].shape
# solve by reduced row echelon form
A, pivots = aug.rref(simplify=True)
A, v = A[:, :-B_cols], A[:, -B_cols:]
pivots = list(filter(lambda p: p < col, pivots))
rank = len(pivots)
# Bring to block form
permutation = Matrix(range(col)).T
for i, c in enumerate(pivots):
permutation.col_swap(i, c)
# check for existence of solutions
# rank of aug Matrix should be equal to rank of coefficient matrix
if not v[rank:, :].is_zero_matrix:
raise ValueError("Linear system has no solution")
# Get index of free symbols (free parameters)
# non-pivots columns are free variables
free_var_index = permutation[len(pivots):]
# Free parameters
# what are current unnumbered free symbol names?
name = _uniquely_named_symbol('tau', aug,
compare=lambda i: str(i).rstrip('1234567890')).name
gen = numbered_symbols(name)
tau = Matrix([next(gen) for k in range((col - rank)*B_cols)]).reshape(
col - rank, B_cols)
# Full parametric solution
V = A[:rank, [c for c in range(A.cols) if c not in pivots]]
vt = v[:rank, :]
free_sol = tau.vstack(vt - V * tau, tau)
# Undo permutation
sol = zeros(col, B_cols)
for k in range(col):
sol[permutation[k], :] = free_sol[k,:]
sol, tau = cls(sol), cls(tau)
if freevar:
return sol, tau, free_var_index
else:
return sol, tau
def _pinv_solve(M, B, arbitrary_matrix=None):
"""Solve ``Ax = B`` using the Moore-Penrose pseudoinverse.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, one will
be returned based on the value of arbitrary_matrix. If no solutions
exist, the least-squares solution is returned.
Parameters
==========
B : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
arbitrary_matrix : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
matrix. This parameter may be set to a specific matrix to use
for that purpose; if so, it must be the same shape as x, with as
many rows as matrix A has columns, and as many columns as matrix
B. If left as None, an appropriate matrix containing dummy
symbols in the form of ``wn_m`` will be used, with n and m being
row and column position of each symbol.
Returns
=======
x : Matrix
The matrix that will satisfy ``Ax = B``. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([7, 8])
>>> A.pinv_solve(B)
Matrix([
[ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18],
[-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9],
[ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]])
>>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0]))
Matrix([
[-55/18],
[ 1/9],
[ 59/18]])
See Also
========
sympy.matrices.dense.DenseMatrix.lower_triangular_solve
sympy.matrices.dense.DenseMatrix.upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
Notes
=====
This may return either exact solutions or least squares solutions.
To determine which, check ``A * A.pinv() * B == B``. It will be
True if exact solutions exist, and False if only a least-squares
solution exists. Be aware that the left hand side of that equation
may need to be simplified to correctly compare to the right hand
side.
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system
"""
from sympy.matrices import eye
A = M
A_pinv = M.pinv()
if arbitrary_matrix is None:
rows, cols = A.cols, B.cols
w = symbols('w:{0}_:{1}'.format(rows, cols), cls=Dummy)
arbitrary_matrix = M.__class__(cols, rows, w).T
return A_pinv.multiply(B) + (eye(A.cols) -
A_pinv.multiply(A)).multiply(arbitrary_matrix)
def _solve(M, rhs, method='GJ'):
"""Solves linear equation where the unique solution exists.
Parameters
==========
rhs : Matrix
Vector representing the right hand side of the linear equation.
method : string, optional
If set to ``'GJ'`` or ``'GE'``, the Gauss-Jordan elimination will be
used, which is implemented in the routine ``gauss_jordan_solve``.
If set to ``'LU'``, ``LUsolve`` routine will be used.
If set to ``'QR'``, ``QRsolve`` routine will be used.
If set to ``'PINV'``, ``pinv_solve`` routine will be used.
It also supports the methods available for special linear systems
For positive definite systems:
If set to ``'CH'``, ``cholesky_solve`` routine will be used.
If set to ``'LDL'``, ``LDLsolve`` routine will be used.
To use a different method and to compute the solution via the
inverse, use a method defined in the .inv() docstring.
Returns
=======
solutions : Matrix
Vector representing the solution.
Raises
======
ValueError
If there is not a unique solution then a ``ValueError`` will be
raised.
If ``M`` is not square, a ``ValueError`` and a different routine
for solving the system will be suggested.
"""
if method == 'GJ' or method == 'GE':
try:
soln, param = M.gauss_jordan_solve(rhs)
if param:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible. "
"Try ``M.gauss_jordan_solve(rhs)`` to obtain a parametric solution.")
except ValueError:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")
return soln
elif method == 'LU':
return M.LUsolve(rhs)
elif method == 'CH':
return M.cholesky_solve(rhs)
elif method == 'QR':
return M.QRsolve(rhs)
elif method == 'LDL':
return M.LDLsolve(rhs)
elif method == 'PINV':
return M.pinv_solve(rhs)
else:
return M.inv(method=method).multiply(rhs)
def _solve_least_squares(M, rhs, method='CH'):
"""Return the least-square fit to the data.
Parameters
==========
rhs : Matrix
Vector representing the right hand side of the linear equation.
method : string or boolean, optional
If set to ``'CH'``, ``cholesky_solve`` routine will be used.
If set to ``'LDL'``, ``LDLsolve`` routine will be used.
If set to ``'QR'``, ``QRsolve`` routine will be used.
If set to ``'PINV'``, ``pinv_solve`` routine will be used.
Otherwise, the conjugate of ``M`` will be used to create a system
of equations that is passed to ``solve`` along with the hint
defined by ``method``.
Returns
=======
solutions : Matrix
Vector representing the solution.
Examples
========
>>> from sympy.matrices import Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = Matrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
if method == 'CH':
return M.cholesky_solve(rhs)
elif method == 'QR':
return M.QRsolve(rhs)
elif method == 'LDL':
return M.LDLsolve(rhs)
elif method == 'PINV':
return M.pinv_solve(rhs)
else:
t = M.H
return (t * M).solve(t * rhs, method=method)
|
a6d2e3d090a106194b7a136c4d5f802a9f814c0335a0d1966af6adc011218972 | from __future__ import division, print_function
from collections import defaultdict
from sympy.core import SympifyError, Add
from sympy.core.compatibility import Callable, as_int, is_sequence, reduce
from sympy.core.containers import Dict
from sympy.core.expr import Expr
from sympy.core.singleton import S
from sympy.functions import Abs
from sympy.utilities.iterables import uniq
from sympy.utilities.misc import filldedent
from .common import a2idx
from .dense import Matrix
from .matrices import MatrixBase, ShapeError
from .utilities import _iszero
from .decompositions import (
_liupc, _row_structure_symbolic_cholesky, _cholesky_sparse,
_LDLdecomposition_sparse)
from .solvers import (
_lower_triangular_solve_sparse, _upper_triangular_solve_sparse)
class SparseMatrix(MatrixBase):
"""
A sparse matrix (a matrix with a large number of zero elements).
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> SparseMatrix(2, 2, range(4))
Matrix([
[0, 1],
[2, 3]])
>>> SparseMatrix(2, 2, {(1, 1): 2})
Matrix([
[0, 0],
[0, 2]])
A SparseMatrix can be instantiated from a ragged list of lists:
>>> SparseMatrix([[1, 2, 3], [1, 2], [1]])
Matrix([
[1, 2, 3],
[1, 2, 0],
[1, 0, 0]])
For safety, one may include the expected size and then an error
will be raised if the indices of any element are out of range or
(for a flat list) if the total number of elements does not match
the expected shape:
>>> SparseMatrix(2, 2, [1, 2])
Traceback (most recent call last):
...
ValueError: List length (2) != rows*columns (4)
Here, an error is not raised because the list is not flat and no
element is out of range:
>>> SparseMatrix(2, 2, [[1, 2]])
Matrix([
[1, 2],
[0, 0]])
But adding another element to the first (and only) row will cause
an error to be raised:
>>> SparseMatrix(2, 2, [[1, 2, 3]])
Traceback (most recent call last):
...
ValueError: The location (0, 2) is out of designated range: (1, 1)
To autosize the matrix, pass None for rows:
>>> SparseMatrix(None, [[1, 2, 3]])
Matrix([[1, 2, 3]])
>>> SparseMatrix(None, {(1, 1): 1, (3, 3): 3})
Matrix([
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 3]])
Values that are themselves a Matrix are automatically expanded:
>>> SparseMatrix(4, 4, {(1, 1): ones(2)})
Matrix([
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]])
A ValueError is raised if the expanding matrix tries to overwrite
a different element already present:
>>> SparseMatrix(3, 3, {(0, 0): ones(2), (1, 1): 2})
Traceback (most recent call last):
...
ValueError: collision at (1, 1)
See Also
========
DenseMatrix
MutableSparseMatrix
ImmutableSparseMatrix
"""
def __new__(cls, *args, **kwargs):
self = object.__new__(cls)
if len(args) == 1 and isinstance(args[0], SparseMatrix):
self.rows = args[0].rows
self.cols = args[0].cols
self._smat = dict(args[0]._smat)
return self
self._smat = {}
# autosizing
if len(args) == 2 and args[0] is None:
args = (None,) + args
if len(args) == 3:
r, c = args[:2]
if r is c is None:
self.rows = self.cols = None
elif None in (r, c):
raise ValueError(
'Pass rows=None and no cols for autosizing.')
else:
self.rows, self.cols = map(as_int, args[:2])
if isinstance(args[2], Callable):
op = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(
op(self._sympify(i), self._sympify(j)))
if value:
self._smat[i, j] = value
elif isinstance(args[2], (dict, Dict)):
def update(i, j, v):
# update self._smat and make sure there are
# no collisions
if v:
if (i, j) in self._smat and v != self._smat[i, j]:
raise ValueError('collision at %s' % ((i, j),))
self._smat[i, j] = v
# manual copy, copy.deepcopy() doesn't work
for key, v in args[2].items():
r, c = key
if isinstance(v, SparseMatrix):
for (i, j), vij in v._smat.items():
update(r + i, c + j, vij)
else:
if isinstance(v, (Matrix, list, tuple)):
v = SparseMatrix(v)
for i, j in v._smat:
update(r + i, c + j, v[i, j])
else:
v = self._sympify(v)
update(r, c, self._sympify(v))
elif is_sequence(args[2]):
flat = not any(is_sequence(i) for i in args[2])
if not flat:
s = SparseMatrix(args[2])
self._smat = s._smat
else:
if len(args[2]) != self.rows*self.cols:
raise ValueError(
'Flat list length (%s) != rows*columns (%s)' %
(len(args[2]), self.rows*self.cols))
flat_list = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(flat_list[i*self.cols + j])
if value:
self._smat[i, j] = value
if self.rows is None: # autosizing
k = self._smat.keys()
self.rows = max([i[0] for i in k]) + 1 if k else 0
self.cols = max([i[1] for i in k]) + 1 if k else 0
else:
for i, j in self._smat.keys():
if i and i >= self.rows or j and j >= self.cols:
r, c = self.shape
raise ValueError(filldedent('''
The location %s is out of designated
range: %s''' % ((i, j), (r - 1, c - 1))))
else:
if (len(args) == 1 and isinstance(args[0], (list, tuple))):
# list of values or lists
v = args[0]
c = 0
for i, row in enumerate(v):
if not isinstance(row, (list, tuple)):
row = [row]
for j, vij in enumerate(row):
if vij:
self._smat[i, j] = self._sympify(vij)
c = max(c, len(row))
self.rows = len(v) if c else 0
self.cols = c
else:
# handle full matrix forms with _handle_creation_inputs
r, c, _list = Matrix._handle_creation_inputs(*args)
self.rows = r
self.cols = c
for i in range(self.rows):
for j in range(self.cols):
value = _list[self.cols*i + j]
if value:
self._smat[i, j] = value
return self
def __eq__(self, other):
self_shape = getattr(self, 'shape', None)
other_shape = getattr(other, 'shape', None)
if None in (self_shape, other_shape):
return False
if self_shape != other_shape:
return False
if isinstance(other, SparseMatrix):
return self._smat == other._smat
elif isinstance(other, MatrixBase):
return self._smat == MutableSparseMatrix(other)._smat
def __getitem__(self, key):
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._smat.get((i, j), S.Zero)
except (TypeError, IndexError):
if isinstance(i, slice):
i = range(self.rows)[i]
elif is_sequence(i):
pass
elif isinstance(i, Expr) and not i.is_number:
from sympy.matrices.expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
else:
if i >= self.rows:
raise IndexError('Row index out of bounds')
i = [i]
if isinstance(j, slice):
j = range(self.cols)[j]
elif is_sequence(j):
pass
elif isinstance(j, Expr) and not j.is_number:
from sympy.matrices.expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
else:
if j >= self.cols:
raise IndexError('Col index out of bounds')
j = [j]
return self.extract(i, j)
# check for single arg, like M[:] or M[3]
if isinstance(key, slice):
lo, hi = key.indices(len(self))[:2]
L = []
for i in range(lo, hi):
m, n = divmod(i, self.cols)
L.append(self._smat.get((m, n), S.Zero))
return L
i, j = divmod(a2idx(key, len(self)), self.cols)
return self._smat.get((i, j), S.Zero)
def __setitem__(self, key, value):
raise NotImplementedError()
def _eval_inverse(self, **kwargs):
return self.inv(method=kwargs.get('method', 'LDL'),
iszerofunc=kwargs.get('iszerofunc', _iszero),
try_block_diag=kwargs.get('try_block_diag', False))
def _eval_Abs(self):
return self.applyfunc(lambda x: Abs(x))
def _eval_add(self, other):
"""If `other` is a SparseMatrix, add efficiently. Otherwise,
do standard addition."""
if not isinstance(other, SparseMatrix):
return self + self._new(other)
smat = {}
zero = self._sympify(0)
for key in set().union(self._smat.keys(), other._smat.keys()):
sum = self._smat.get(key, zero) + other._smat.get(key, zero)
if sum != 0:
smat[key] = sum
return self._new(self.rows, self.cols, smat)
def _eval_col_insert(self, icol, other):
if not isinstance(other, SparseMatrix):
other = SparseMatrix(other)
new_smat = {}
# make room for the new rows
for key, val in self._smat.items():
row, col = key
if col >= icol:
col += other.cols
new_smat[row, col] = val
# add other's keys
for key, val in other._smat.items():
row, col = key
new_smat[row, col + icol] = val
return self._new(self.rows, self.cols + other.cols, new_smat)
def _eval_conjugate(self):
smat = {key: val.conjugate() for key,val in self._smat.items()}
return self._new(self.rows, self.cols, smat)
def _eval_extract(self, rowsList, colsList):
urow = list(uniq(rowsList))
ucol = list(uniq(colsList))
smat = {}
if len(urow)*len(ucol) < len(self._smat):
# there are fewer elements requested than there are elements in the matrix
for i, r in enumerate(urow):
for j, c in enumerate(ucol):
smat[i, j] = self._smat.get((r, c), 0)
else:
# most of the request will be zeros so check all of self's entries,
# keeping only the ones that are desired
for rk, ck in self._smat:
if rk in urow and ck in ucol:
smat[urow.index(rk), ucol.index(ck)] = self._smat[rk, ck]
rv = self._new(len(urow), len(ucol), smat)
# rv is nominally correct but there might be rows/cols
# which require duplication
if len(rowsList) != len(urow):
for i, r in enumerate(rowsList):
i_previous = rowsList.index(r)
if i_previous != i:
rv = rv.row_insert(i, rv.row(i_previous))
if len(colsList) != len(ucol):
for i, c in enumerate(colsList):
i_previous = colsList.index(c)
if i_previous != i:
rv = rv.col_insert(i, rv.col(i_previous))
return rv
@classmethod
def _eval_eye(cls, rows, cols):
entries = {(i,i): S.One for i in range(min(rows, cols))}
return cls._new(rows, cols, entries)
def _eval_has(self, *patterns):
# if the matrix has any zeros, see if S.Zero
# has the pattern. If _smat is full length,
# the matrix has no zeros.
zhas = S.Zero.has(*patterns)
if len(self._smat) == self.rows*self.cols:
zhas = False
return any(self[key].has(*patterns) for key in self._smat) or zhas
def _eval_is_Identity(self):
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self._smat) == self.rows
def _eval_is_symmetric(self, simpfunc):
diff = (self - self.T).applyfunc(simpfunc)
return len(diff.values()) == 0
def _eval_matrix_mul(self, other):
"""Fast multiplication exploiting the sparsity of the matrix."""
if not isinstance(other, SparseMatrix):
other = self._new(other)
# if we made it here, we're both sparse matrices
# create quick lookups for rows and cols
row_lookup = defaultdict(dict)
for (i,j), val in self._smat.items():
row_lookup[i][j] = val
col_lookup = defaultdict(dict)
for (i,j), val in other._smat.items():
col_lookup[j][i] = val
smat = {}
for row in row_lookup.keys():
for col in col_lookup.keys():
# find the common indices of non-zero entries.
# these are the only things that need to be multiplied.
indices = set(col_lookup[col].keys()) & set(row_lookup[row].keys())
if indices:
vec = [row_lookup[row][k]*col_lookup[col][k] for k in indices]
try:
smat[row, col] = Add(*vec)
except (TypeError, SympifyError):
# Some matrices don't work with `sum` or `Add`
# They don't work with `sum` because `sum` tries to add `0`
# Fall back to a safe way to multiply if the `Add` fails.
smat[row, col] = reduce(lambda a, b: a + b, vec)
return self._new(self.rows, other.cols, smat)
def _eval_row_insert(self, irow, other):
if not isinstance(other, SparseMatrix):
other = SparseMatrix(other)
new_smat = {}
# make room for the new rows
for key, val in self._smat.items():
row, col = key
if row >= irow:
row += other.rows
new_smat[row, col] = val
# add other's keys
for key, val in other._smat.items():
row, col = key
new_smat[row + irow, col] = val
return self._new(self.rows + other.rows, self.cols, new_smat)
def _eval_scalar_mul(self, other):
return self.applyfunc(lambda x: x*other)
def _eval_scalar_rmul(self, other):
return self.applyfunc(lambda x: other*x)
def _eval_transpose(self):
"""Returns the transposed SparseMatrix of this SparseMatrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.T
Matrix([
[1, 3],
[2, 4]])
"""
smat = {(j,i): val for (i,j),val in self._smat.items()}
return self._new(self.cols, self.rows, smat)
def _eval_values(self):
return [v for k,v in self._smat.items() if not v.is_zero]
@classmethod
def _eval_zeros(cls, rows, cols):
return cls._new(rows, cols, {})
@property
def _mat(self):
"""Return a list of matrix elements. Some routines
in DenseMatrix use `_mat` directly to speed up operations."""
return list(self)
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self.copy()
for k, v in self._smat.items():
fv = f(v)
if fv:
out._smat[k] = fv
else:
out._smat.pop(k, None)
return out
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
sympy.matrices.sparse.MutableSparseMatrix.col_op
sympy.matrices.sparse.SparseMatrix.row_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(reversed(k)))]
def copy(self):
return self._new(self.rows, self.cols, self._smat)
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self._smat)
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
sympy.matrices.sparse.MutableSparseMatrix.row_op
sympy.matrices.sparse.SparseMatrix.col_list
"""
return [tuple(k + (self[k],)) for k in
sorted(list(self._smat.keys()), key=lambda k: list(k))]
def scalar_multiply(self, scalar):
"Scalar element-wise multiplication"
M = self.zeros(*self.shape)
if scalar:
for i in self._smat:
v = scalar*self._smat[i]
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import SparseMatrix, Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method).multiply(rhs)
RL = property(row_list, None, None, "Alternate faster representation")
CL = property(col_list, None, None, "Alternate faster representation")
def liupc(self):
return _liupc(self)
def row_structure_symbolic_cholesky(self):
return _row_structure_symbolic_cholesky(self)
def cholesky(self, hermitian=True):
return _cholesky_sparse(self, hermitian=hermitian)
def LDLdecomposition(self, hermitian=True):
return _LDLdecomposition_sparse(self, hermitian=hermitian)
def lower_triangular_solve(self, rhs):
return _lower_triangular_solve_sparse(self, rhs)
def upper_triangular_solve(self, rhs):
return _upper_triangular_solve_sparse(self, rhs)
liupc.__doc__ = _liupc.__doc__
row_structure_symbolic_cholesky.__doc__ = _row_structure_symbolic_cholesky.__doc__
cholesky.__doc__ = _cholesky_sparse.__doc__
LDLdecomposition.__doc__ = _LDLdecomposition_sparse.__doc__
lower_triangular_solve.__doc__ = lower_triangular_solve.__doc__
upper_triangular_solve.__doc__ = upper_triangular_solve.__doc__
class MutableSparseMatrix(SparseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args)
def __setitem__(self, key, value):
"""Assign value to position designated by key.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> M = SparseMatrix(2, 2, {})
>>> M[1] = 1; M
Matrix([
[0, 1],
[0, 0]])
>>> M[1, 1] = 2; M
Matrix([
[0, 1],
[0, 2]])
>>> M = SparseMatrix(2, 2, {})
>>> M[:, 1] = [1, 1]; M
Matrix([
[0, 1],
[0, 1]])
>>> M = SparseMatrix(2, 2, {})
>>> M[1, :] = [[1, 1]]; M
Matrix([
[0, 0],
[1, 1]])
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = SparseMatrix(4, 4, {})
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
if value:
self._smat[i, j] = value
elif (i, j) in self._smat:
del self._smat[i, j]
def as_mutable(self):
return self.copy()
__hash__ = None # type: ignore
def col_del(self, k):
"""Delete the given column of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.col_del(0)
>>> M
Matrix([
[0],
[1]])
See Also
========
row_del
"""
newD = {}
k = a2idx(k, self.cols)
for (i, j) in self._smat:
if j == k:
pass
elif j > k:
newD[i, j - 1] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.cols -= 1
def col_join(self, other):
"""Returns B augmented beneath A (row-wise joining)::
[A]
[B]
Examples
========
>>> from sympy import SparseMatrix, Matrix, ones
>>> A = SparseMatrix(ones(3))
>>> A
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> B = SparseMatrix.eye(3)
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.col_join(B); C
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C == A.col_join(Matrix(B))
True
Joining along columns is the same as appending rows at the end
of the matrix:
>>> C == A.row_insert(A.rows, Matrix(B))
True
"""
# A null matrix can always be stacked (see #10770)
if self.rows == 0 and self.cols != other.cols:
return self._new(0, other.cols, []).col_join(other)
A, B = self, other
if not A.cols == B.cols:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[i + A.rows, j] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[i + A.rows, j] = v
A.rows += B.rows
return A
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i) for i in range(self.rows).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[1, 0] = -1
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[ 2, 4, 0],
[-1, 0, 0],
[ 0, 0, 2]])
"""
for i in range(self.rows):
v = self._smat.get((i, j), S.Zero)
fv = f(v, i)
if fv:
self._smat[i, j] = fv
elif v:
self._smat.pop((i, j))
def col_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.col_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
"""
if i > j:
i, j = j, i
rows = self.col_list()
temp = []
for ii, jj, v in rows:
if jj == i:
self._smat.pop((ii, jj))
temp.append((ii, v))
elif jj == j:
self._smat.pop((ii, jj))
self._smat[ii, i] = v
elif jj > j:
break
for k, v in temp:
self._smat[k, j] = v
def copyin_list(self, key, value):
if not is_sequence(value):
raise TypeError("`value` must be of type list or tuple.")
self.copyin_matrix(key, Matrix(value))
def copyin_matrix(self, key, value):
# include this here because it's not part of BaseMatrix
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(
"The Matrix `value` doesn't have the same dimensions "
"as the in sub-Matrix given by `key`.")
if not isinstance(value, SparseMatrix):
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
else:
if (rhi - rlo)*(chi - clo) < len(self):
for i in range(rlo, rhi):
for j in range(clo, chi):
self._smat.pop((i, j), None)
else:
for i, j, v in self.row_list():
if rlo <= i < rhi and clo <= j < chi:
self._smat.pop((i, j), None)
for k, v in value._smat.items():
i, j = k
self[i + rlo, j + clo] = value[i, j]
def fill(self, value):
"""Fill self with the given value.
Notes
=====
Unless many values are going to be deleted (i.e. set to zero)
this will create a matrix that is slower than a dense matrix in
operations.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.zeros(3); M
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> M.fill(1); M
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
if not value:
self._smat = {}
else:
v = self._sympify(value)
self._smat = {(i, j): v
for i in range(self.rows) for j in range(self.cols)}
def row_del(self, k):
"""Delete the given row of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.row_del(0)
>>> M
Matrix([[0, 1]])
See Also
========
col_del
"""
newD = {}
k = a2idx(k, self.rows)
for (i, j) in self._smat:
if i == k:
pass
elif i > k:
newD[i - 1, j] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.rows -= 1
def row_join(self, other):
"""Returns B appended after A (column-wise augmenting)::
[A B]
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
>>> A
Matrix([
[1, 0, 1],
[0, 1, 0],
[1, 1, 0]])
>>> B = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.row_join(B); C
Matrix([
[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
>>> C == A.row_join(Matrix(B))
True
Joining at row ends is the same as appending columns at the end
of the matrix:
>>> C == A.col_insert(A.cols, B)
True
"""
# A null matrix can always be stacked (see #10770)
if self.cols == 0 and self.rows != other.rows:
return self._new(other.rows, 0, []).row_join(other)
A, B = self, other
if not A.rows == B.rows:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[i, j + A.cols] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[i, j + A.cols] = v
A.cols += B.cols
return A
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
zip_row_op
col_op
"""
for j in range(self.cols):
v = self._smat.get((i, j), S.Zero)
fv = f(v, j)
if fv:
self._smat[i, j] = fv
elif v:
self._smat.pop((i, j))
def row_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.row_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
"""
if i > j:
i, j = j, i
rows = self.row_list()
temp = []
for ii, jj, v in rows:
if ii == i:
self._smat.pop((ii, jj))
temp.append((jj, v))
elif ii == j:
self._smat.pop((ii, jj))
self._smat[i, jj] = v
elif ii > j:
break
for k, v in temp:
self._smat[j, k] = v
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
row_op
col_op
"""
self.row_op(i, lambda v, j: f(v, self[k, j]))
is_zero = False
|
ed3c77a15d39b5ee5cb5e7973eeb7391413863dd0e6cb59b5969deaaf0bcc123 | from __future__ import division, print_function
from typing import Any
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.compatibility import (
Callable, NotIterable, as_int, is_sequence)
from sympy.core.decorators import deprecated
from sympy.core.expr import Expr
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol, _uniquely_named_symbol
from sympy.core.sympify import sympify
from sympy.functions import exp, factorial, log
from sympy.functions.elementary.miscellaneous import Max, Min, sqrt
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.polys import cancel
from sympy.printing import sstr
from sympy.simplify import simplify as _simplify
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import flatten
from sympy.utilities.misc import filldedent
from .common import (
MatrixCommon, MatrixError, NonSquareMatrixError, NonInvertibleMatrixError,
ShapeError)
from .utilities import _iszero, _is_zero_after_expand_mul
from .determinant import (
_find_reasonable_pivot, _find_reasonable_pivot_naive,
_adjugate, _charpoly, _cofactor, _cofactor_matrix,
_det, _det_bareiss, _det_berkowitz, _det_LU, _minor, _minor_submatrix)
from .reductions import _is_echelon, _echelon_form, _rank, _rref
from .subspaces import _columnspace, _nullspace, _rowspace, _orthogonalize
from .eigen import (
_eigenvals, _eigenvects, _is_diagonalizable, _diagonalize,
_eval_is_positive_definite,
_is_positive_definite, _is_positive_semidefinite,
_is_negative_definite, _is_negative_semidefinite, _is_indefinite,
_jordan_form, _left_eigenvects, _singular_values)
from .decompositions import (
_rank_decomposition, _cholesky, _LDLdecomposition,
_LUdecomposition, _LUdecomposition_Simple, _LUdecompositionFF,
_QRdecomposition)
from .solvers import (
_diagonal_solve, _lower_triangular_solve, _upper_triangular_solve,
_cholesky_solve, _LDLsolve, _LUsolve, _QRsolve, _gauss_jordan_solve,
_pinv_solve, _solve, _solve_least_squares)
from .inverse import (
_pinv, _inv_mod, _inv_ADJ, _inv_GE, _inv_LU, _inv_CH, _inv_LDL, _inv_QR,
_inv, _inv_block)
class DeferredVector(Symbol, NotIterable):
"""A vector whose components are deferred (e.g. for use with lambdify)
Examples
========
>>> from sympy import DeferredVector, lambdify
>>> X = DeferredVector( 'X' )
>>> X
X
>>> expr = (X[0] + 2, X[2] + 3)
>>> func = lambdify( X, expr)
>>> func( [1, 2, 3] )
(3, 6)
"""
def __getitem__(self, i):
if i == -0:
i = 0
if i < 0:
raise IndexError('DeferredVector index out of range')
component_name = '%s[%d]' % (self.name, i)
return Symbol(component_name)
def __str__(self):
return sstr(self)
def __repr__(self):
return "DeferredVector('%s')" % self.name
class MatrixDeterminant(MatrixCommon):
"""Provides basic matrix determinant operations. Should not be instantiated
directly. See ``determinant.py`` for their implementations."""
def _eval_det_bareiss(self, iszerofunc=_is_zero_after_expand_mul):
return _det_bareiss(self, iszerofunc=iszerofunc)
def _eval_det_berkowitz(self):
return _det_berkowitz(self)
def _eval_det_lu(self, iszerofunc=_iszero, simpfunc=None):
return _det_LU(self, iszerofunc=iszerofunc, simpfunc=simpfunc)
def _eval_determinant(self): # for expressions.determinant.Determinant
return _det(self)
def adjugate(self, method="berkowitz"):
return _adjugate(self, method=method)
def charpoly(self, x='lambda', simplify=_simplify):
return _charpoly(self, x=x, simplify=simplify)
def cofactor(self, i, j, method="berkowitz"):
return _cofactor(self, i, j, method=method)
def cofactor_matrix(self, method="berkowitz"):
return _cofactor_matrix(self, method=method)
def det(self, method="bareiss", iszerofunc=None):
return _det(self, method=method, iszerofunc=iszerofunc)
def minor(self, i, j, method="berkowitz"):
return _minor(self, i, j, method=method)
def minor_submatrix(self, i, j):
return _minor_submatrix(self, i, j)
_find_reasonable_pivot.__doc__ = _find_reasonable_pivot.__doc__
_find_reasonable_pivot_naive.__doc__ = _find_reasonable_pivot_naive.__doc__
_eval_det_bareiss.__doc__ = _det_bareiss.__doc__
_eval_det_berkowitz.__doc__ = _det_berkowitz.__doc__
_eval_det_lu.__doc__ = _det_LU.__doc__
_eval_determinant.__doc__ = _det.__doc__
adjugate.__doc__ = _adjugate.__doc__
charpoly.__doc__ = _charpoly.__doc__
cofactor.__doc__ = _cofactor.__doc__
cofactor_matrix.__doc__ = _cofactor_matrix.__doc__
det.__doc__ = _det.__doc__
minor.__doc__ = _minor.__doc__
minor_submatrix.__doc__ = _minor_submatrix.__doc__
class MatrixReductions(MatrixDeterminant):
"""Provides basic matrix row/column operations. Should not be instantiated
directly. See ``reductions.py`` for some of their implementations."""
def echelon_form(self, iszerofunc=_iszero, simplify=False, with_pivots=False):
return _echelon_form(self, iszerofunc=iszerofunc, simplify=simplify,
with_pivots=with_pivots)
@property
def is_echelon(self):
return _is_echelon(self)
def rank(self, iszerofunc=_iszero, simplify=False):
return _rank(self, iszerofunc=iszerofunc, simplify=simplify)
def rref(self, iszerofunc=_iszero, simplify=False, pivots=True,
normalize_last=True):
return _rref(self, iszerofunc=iszerofunc, simplify=simplify,
pivots=pivots, normalize_last=normalize_last)
echelon_form.__doc__ = _echelon_form.__doc__
is_echelon.__doc__ = _is_echelon.__doc__
rank.__doc__ = _rank.__doc__
rref.__doc__ = _rref.__doc__
def _normalize_op_args(self, op, col, k, col1, col2, error_str="col"):
"""Validate the arguments for a row/column operation. ``error_str``
can be one of "row" or "col" depending on the arguments being parsed."""
if op not in ["n->kn", "n<->m", "n->n+km"]:
raise ValueError("Unknown {} operation '{}'. Valid col operations "
"are 'n->kn', 'n<->m', 'n->n+km'".format(error_str, op))
# define self_col according to error_str
self_cols = self.cols if error_str == 'col' else self.rows
# normalize and validate the arguments
if op == "n->kn":
col = col if col is not None else col1
if col is None or k is None:
raise ValueError("For a {0} operation 'n->kn' you must provide the "
"kwargs `{0}` and `k`".format(error_str))
if not 0 <= col < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col))
elif op == "n<->m":
# we need two cols to swap. It doesn't matter
# how they were specified, so gather them together and
# remove `None`
cols = set((col, k, col1, col2)).difference([None])
if len(cols) > 2:
# maybe the user left `k` by mistake?
cols = set((col, col1, col2)).difference([None])
if len(cols) != 2:
raise ValueError("For a {0} operation 'n<->m' you must provide the "
"kwargs `{0}1` and `{0}2`".format(error_str))
col1, col2 = cols
if not 0 <= col1 < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col1))
if not 0 <= col2 < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col2))
elif op == "n->n+km":
col = col1 if col is None else col
col2 = col1 if col2 is None else col2
if col is None or col2 is None or k is None:
raise ValueError("For a {0} operation 'n->n+km' you must provide the "
"kwargs `{0}`, `k`, and `{0}2`".format(error_str))
if col == col2:
raise ValueError("For a {0} operation 'n->n+km' `{0}` and `{0}2` must "
"be different.".format(error_str))
if not 0 <= col < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col))
if not 0 <= col2 < self_cols:
raise ValueError("This matrix doesn't have a {} '{}'".format(error_str, col2))
else:
raise ValueError('invalid operation %s' % repr(op))
return op, col, k, col1, col2
def _eval_col_op_multiply_col_by_const(self, col, k):
def entry(i, j):
if j == col:
return k * self[i, j]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_col_op_swap(self, col1, col2):
def entry(i, j):
if j == col1:
return self[i, col2]
elif j == col2:
return self[i, col1]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_col_op_add_multiple_to_other_col(self, col, k, col2):
def entry(i, j):
if j == col:
return self[i, j] + k * self[i, col2]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_row_op_swap(self, row1, row2):
def entry(i, j):
if i == row1:
return self[row2, j]
elif i == row2:
return self[row1, j]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_row_op_multiply_row_by_const(self, row, k):
def entry(i, j):
if i == row:
return k * self[i, j]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def _eval_row_op_add_multiple_to_other_row(self, row, k, row2):
def entry(i, j):
if i == row:
return self[i, j] + k * self[row2, j]
return self[i, j]
return self._new(self.rows, self.cols, entry)
def elementary_col_op(self, op="n->kn", col=None, k=None, col1=None, col2=None):
"""Performs the elementary column operation `op`.
`op` may be one of
* "n->kn" (column n goes to k*n)
* "n<->m" (swap column n and column m)
* "n->n+km" (column n goes to column n + k*column m)
Parameters
==========
op : string; the elementary row operation
col : the column to apply the column operation
k : the multiple to apply in the column operation
col1 : one column of a column swap
col2 : second column of a column swap or column "m" in the column operation
"n->n+km"
"""
op, col, k, col1, col2 = self._normalize_op_args(op, col, k, col1, col2, "col")
# now that we've validated, we're all good to dispatch
if op == "n->kn":
return self._eval_col_op_multiply_col_by_const(col, k)
if op == "n<->m":
return self._eval_col_op_swap(col1, col2)
if op == "n->n+km":
return self._eval_col_op_add_multiple_to_other_col(col, k, col2)
def elementary_row_op(self, op="n->kn", row=None, k=None, row1=None, row2=None):
"""Performs the elementary row operation `op`.
`op` may be one of
* "n->kn" (row n goes to k*n)
* "n<->m" (swap row n and row m)
* "n->n+km" (row n goes to row n + k*row m)
Parameters
==========
op : string; the elementary row operation
row : the row to apply the row operation
k : the multiple to apply in the row operation
row1 : one row of a row swap
row2 : second row of a row swap or row "m" in the row operation
"n->n+km"
"""
op, row, k, row1, row2 = self._normalize_op_args(op, row, k, row1, row2, "row")
# now that we've validated, we're all good to dispatch
if op == "n->kn":
return self._eval_row_op_multiply_row_by_const(row, k)
if op == "n<->m":
return self._eval_row_op_swap(row1, row2)
if op == "n->n+km":
return self._eval_row_op_add_multiple_to_other_row(row, k, row2)
class MatrixSubspaces(MatrixReductions):
"""Provides methods relating to the fundamental subspaces of a matrix.
Should not be instantiated directly. See ``subspaces.py`` for their
implementations."""
def columnspace(self, simplify=False):
return _columnspace(self, simplify=simplify)
def nullspace(self, simplify=False, iszerofunc=_iszero):
return _nullspace(self, simplify=simplify, iszerofunc=iszerofunc)
def rowspace(self, simplify=False):
return _rowspace(self, simplify=simplify)
# This is a classmethod but is converted to such later in order to allow
# assignment of __doc__ since that does not work for already wrapped
# classmethods in Python 3.6.
def orthogonalize(cls, *vecs, **kwargs):
return _orthogonalize(cls, *vecs, **kwargs)
columnspace.__doc__ = _columnspace.__doc__
nullspace.__doc__ = _nullspace.__doc__
rowspace.__doc__ = _rowspace.__doc__
orthogonalize.__doc__ = _orthogonalize.__doc__
orthogonalize = classmethod(orthogonalize)
class MatrixEigen(MatrixSubspaces):
"""Provides basic matrix eigenvalue/vector operations.
Should not be instantiated directly. See ``eigen.py`` for their
implementations."""
def _eval_is_positive_definite(self, method="eigen"):
return _eval_is_positive_definite(self, method=method)
def eigenvals(self, error_when_incomplete=True, **flags):
return _eigenvals(self, error_when_incomplete=error_when_incomplete, **flags)
def eigenvects(self, error_when_incomplete=True, iszerofunc=_iszero, **flags):
return _eigenvects(self, error_when_incomplete=error_when_incomplete,
iszerofunc=iszerofunc, **flags)
def is_diagonalizable(self, reals_only=False, **kwargs):
return _is_diagonalizable(self, reals_only=reals_only, **kwargs)
def diagonalize(self, reals_only=False, sort=False, normalize=False):
return _diagonalize(self, reals_only=reals_only, sort=sort,
normalize=normalize)
@property
def is_positive_definite(self):
return _is_positive_definite(self)
@property
def is_positive_semidefinite(self):
return _is_positive_semidefinite(self)
@property
def is_negative_definite(self):
return _is_negative_definite(self)
@property
def is_negative_semidefinite(self):
return _is_negative_semidefinite(self)
@property
def is_indefinite(self):
return _is_indefinite(self)
def jordan_form(self, calc_transform=True, **kwargs):
return _jordan_form(self, calc_transform=calc_transform, **kwargs)
def left_eigenvects(self, **flags):
return _left_eigenvects(self, **flags)
def singular_values(self):
return _singular_values(self)
_eval_is_positive_definite.__doc__ = _eval_is_positive_definite.__doc__
eigenvals.__doc__ = _eigenvals.__doc__
eigenvects.__doc__ = _eigenvects.__doc__
is_diagonalizable.__doc__ = _is_diagonalizable.__doc__
diagonalize.__doc__ = _diagonalize.__doc__
is_positive_definite.__doc__ = _is_positive_definite.__doc__
is_positive_semidefinite.__doc__ = _is_positive_semidefinite.__doc__
is_negative_definite.__doc__ = _is_negative_definite.__doc__
is_negative_semidefinite.__doc__ = _is_negative_semidefinite.__doc__
is_indefinite.__doc__ = _is_indefinite.__doc__
jordan_form.__doc__ = _jordan_form.__doc__
left_eigenvects.__doc__ = _left_eigenvects.__doc__
singular_values.__doc__ = _singular_values.__doc__
class MatrixCalculus(MatrixCommon):
"""Provides calculus-related matrix operations."""
def diff(self, *args, **kwargs):
"""Calculate the derivative of each element in the matrix.
``args`` will be passed to the ``integrate`` function.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.diff(x)
Matrix([
[1, 0],
[0, 0]])
See Also
========
integrate
limit
"""
# XXX this should be handled here rather than in Derivative
from sympy import Derivative
kwargs.setdefault('evaluate', True)
deriv = Derivative(self, *args, evaluate=True)
if not isinstance(self, Basic):
return deriv.as_mutable()
else:
return deriv
def _eval_derivative(self, arg):
return self.applyfunc(lambda x: x.diff(arg))
def _accept_eval_derivative(self, s):
return s._visit_eval_derivative_array(self)
def _visit_eval_derivative_scalar(self, base):
# Types are (base: scalar, self: matrix)
return self.applyfunc(lambda x: base.diff(x))
def _visit_eval_derivative_array(self, base):
# Types are (base: array/matrix, self: matrix)
from sympy import derive_by_array
return derive_by_array(base, self)
def integrate(self, *args, **kwargs):
"""Integrate each element of the matrix. ``args`` will
be passed to the ``integrate`` function.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.integrate((x, ))
Matrix([
[x**2/2, x*y],
[ x, 0]])
>>> M.integrate((x, 0, 2))
Matrix([
[2, 2*y],
[2, 0]])
See Also
========
limit
diff
"""
return self.applyfunc(lambda x: x.integrate(*args, **kwargs))
def jacobian(self, X):
"""Calculates the Jacobian matrix (derivative of a vector-valued function).
Parameters
==========
``self`` : vector of expressions representing functions f_i(x_1, ..., x_n).
X : set of x_i's in order, it can be a list or a Matrix
Both ``self`` and X can be a row or a column matrix in any order
(i.e., jacobian() should always work).
Examples
========
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0]])
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)]])
See Also
========
hessian
wronskian
"""
if not isinstance(X, MatrixBase):
X = self._new(X)
# Both X and ``self`` can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("``self`` must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return self._new(m, n, lambda j, i: self[j].diff(X[i]))
def limit(self, *args):
"""Calculate the limit of each element in the matrix.
``args`` will be passed to the ``limit`` function.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.limit(x, 2)
Matrix([
[2, y],
[1, 0]])
See Also
========
integrate
diff
"""
return self.applyfunc(lambda x: x.limit(*args))
# https://github.com/sympy/sympy/pull/12854
class MatrixDeprecated(MatrixCommon):
"""A class to house deprecated matrix methods."""
def _legacy_array_dot(self, b):
"""Compatibility function for deprecated behavior of ``matrix.dot(vector)``
"""
from .dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError(
"Dimensions incorrect for dot product: %s, %s" % (
self.shape, len(b)))
return self.dot(Matrix(b))
else:
raise TypeError(
"`b` must be an ordered iterable or Matrix, not %s." %
type(b))
mat = self
if mat.cols == b.rows:
if b.cols != 1:
mat = mat.T
b = b.T
prod = flatten((mat * b).tolist())
return prod
if mat.cols == b.cols:
return mat.dot(b.T)
elif mat.rows == b.rows:
return mat.T.dot(b)
else:
raise ShapeError("Dimensions incorrect for dot product: %s, %s" % (
self.shape, b.shape))
def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
return self.charpoly(x=x)
def berkowitz_det(self):
"""Computes determinant using Berkowitz method.
See Also
========
det
berkowitz
"""
return self.det(method='berkowitz')
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method.
See Also
========
berkowitz
"""
return self.eigenvals(**flags)
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method.
See Also
========
berkowitz
"""
sign, minors = self.one, []
for poly in self.berkowitz():
minors.append(sign * poly[-1])
sign = -sign
return tuple(minors)
def berkowitz(self):
from sympy.matrices import zeros
berk = ((1,),)
if not self:
return berk
if not self.is_square:
raise NonSquareMatrixError()
A, N = self, self.rows
transforms = [0] * (N - 1)
for n in range(N, 1, -1):
T, k = zeros(n + 1, n), n - 1
R, C = -A[k, :k], A[:k, k]
A, a = A[:k, :k], -A[k, k]
items = [C]
for i in range(0, n - 2):
items.append(A * items[i])
for i, B in enumerate(items):
items[i] = (R * B)[0, 0]
items = [self.one, a] + items
for i in range(n):
T[i:, i] = items[:n - i + 1]
transforms[k - 1] = T
polys = [self._new([self.one, -A[0, 0]])]
for i, T in enumerate(transforms):
polys.append(T * polys[i])
return berk + tuple(map(tuple, polys))
def cofactorMatrix(self, method="berkowitz"):
return self.cofactor_matrix(method=method)
def det_bareis(self):
return _det_bareiss(self)
def det_LU_decomposition(self):
"""Compute matrix determinant using LU decomposition
Note that this method fails if the LU decomposition itself
fails. In particular, if the matrix has no inverse this method
will fail.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
det_bareiss
berkowitz_det
"""
return self.det(method='lu')
def jordan_cell(self, eigenval, n):
return self.jordan_block(size=n, eigenvalue=eigenval)
def jordan_cells(self, calc_transformation=True):
P, J = self.jordan_form()
return P, J.get_diag_blocks()
def minorEntry(self, i, j, method="berkowitz"):
return self.minor(i, j, method=method)
def minorMatrix(self, i, j):
return self.minor_submatrix(i, j)
def permuteBkwd(self, perm):
"""Permute the rows of the matrix with the given permutation in reverse."""
return self.permute_rows(perm, direction='backward')
def permuteFwd(self, perm):
"""Permute the rows of the matrix with the given permutation."""
return self.permute_rows(perm, direction='forward')
class MatrixBase(MatrixDeprecated,
MatrixCalculus,
MatrixEigen,
MatrixCommon):
"""Base class for matrix objects."""
# Added just for numpy compatibility
__array_priority__ = 11
is_Matrix = True
_class_priority = 3
_sympify = staticmethod(sympify)
zero = S.Zero
one = S.One
# Mutable:
__hash__ = None # type: ignore
# Defined here the same as on Basic.
# We don't define _repr_png_ here because it would add a large amount of
# data to any notebook containing SymPy expressions, without adding
# anything useful to the notebook. It can still enabled manually, e.g.,
# for the qtconsole, with init_printing().
def _repr_latex_(self):
"""
IPython/Jupyter LaTeX printing
To change the behavior of this (e.g., pass in some settings to LaTeX),
use init_printing(). init_printing() will also enable LaTeX printing
for built in numeric types like ints and container types that contain
SymPy objects, like lists and dictionaries of expressions.
"""
from sympy.printing.latex import latex
s = latex(self, mode='plain')
return "$\\displaystyle %s$" % s
_repr_latex_orig = _repr_latex_ # type: Any
def __array__(self, dtype=object):
from .dense import matrix2numpy
return matrix2numpy(self, dtype=dtype)
def __len__(self):
"""Return the number of elements of ``self``.
Implemented mainly so bool(Matrix()) == False.
"""
return self.rows * self.cols
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i, j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def _matrix_pow_by_jordan_blocks(self, num):
from sympy.matrices import diag, MutableMatrix
from sympy import binomial
def jordan_cell_power(jc, n):
N = jc.shape[0]
l = jc[0,0]
if l.is_zero:
if N == 1 and n.is_nonnegative:
jc[0,0] = l**n
elif not (n.is_integer and n.is_nonnegative):
raise NonInvertibleMatrixError("Non-invertible matrix can only be raised to a nonnegative integer")
else:
for i in range(N):
jc[0,i] = KroneckerDelta(i, n)
else:
for i in range(N):
bn = binomial(n, i)
if isinstance(bn, binomial):
bn = bn._eval_expand_func()
jc[0,i] = l**(n-i)*bn
for i in range(N):
for j in range(1, N-i):
jc[j,i+j] = jc [j-1,i+j-1]
P, J = self.jordan_form()
jordan_cells = J.get_diag_blocks()
# Make sure jordan_cells matrices are mutable:
jordan_cells = [MutableMatrix(j) for j in jordan_cells]
for j in jordan_cells:
jordan_cell_power(j, num)
return self._new(P.multiply(diag(*jordan_cells))
.multiply(P.inv()))
def __repr__(self):
return sstr(self)
def __str__(self):
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
return "Matrix(%s)" % str(self.tolist())
def _format_str(self, printer=None):
if not printer:
from sympy.printing.str import StrPrinter
printer = StrPrinter()
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
if self.rows == 1:
return "Matrix([%s])" % self.table(printer, rowsep=',\n')
return "Matrix([\n%s])" % self.table(printer, rowsep=',\n')
@classmethod
def irregular(cls, ntop, *matrices, **kwargs):
"""Return a matrix filled by the given matrices which
are listed in order of appearance from left to right, top to
bottom as they first appear in the matrix. They must fill the
matrix completely.
Examples
========
>>> from sympy import ones, Matrix
>>> Matrix.irregular(3, ones(2,1), ones(3,3)*2, ones(2,2)*3,
... ones(1,1)*4, ones(2,2)*5, ones(1,2)*6, ones(1,2)*7)
Matrix([
[1, 2, 2, 2, 3, 3],
[1, 2, 2, 2, 3, 3],
[4, 2, 2, 2, 5, 5],
[6, 6, 7, 7, 5, 5]])
"""
from sympy.core.compatibility import as_int
ntop = as_int(ntop)
# make sure we are working with explicit matrices
b = [i.as_explicit() if hasattr(i, 'as_explicit') else i
for i in matrices]
q = list(range(len(b)))
dat = [i.rows for i in b]
active = [q.pop(0) for _ in range(ntop)]
cols = sum([b[i].cols for i in active])
rows = []
while any(dat):
r = []
for a, j in enumerate(active):
r.extend(b[j][-dat[j], :])
dat[j] -= 1
if dat[j] == 0 and q:
active[a] = q.pop(0)
if len(r) != cols:
raise ValueError(filldedent('''
Matrices provided do not appear to fill
the space completely.'''))
rows.append(r)
return cls._new(rows)
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
"""Return the number of rows, cols and flat matrix elements.
Examples
========
>>> from sympy import Matrix, I
Matrix can be constructed as follows:
* from a nested list of iterables
>>> Matrix( ((1, 2+I), (3, 4)) )
Matrix([
[1, 2 + I],
[3, 4]])
* from un-nested iterable (interpreted as a column)
>>> Matrix( [1, 2] )
Matrix([
[1],
[2]])
* from un-nested iterable with dimensions
>>> Matrix(1, 2, [1, 2] )
Matrix([[1, 2]])
* from no arguments (a 0 x 0 matrix)
>>> Matrix()
Matrix(0, 0, [])
* from a rule
>>> Matrix(2, 2, lambda i, j: i/(j + 1) )
Matrix([
[0, 0],
[1, 1/2]])
See Also
========
irregular - filling a matrix with irregular blocks
"""
from sympy.matrices.sparse import SparseMatrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.expressions.blockmatrix import BlockMatrix
from sympy.utilities.iterables import reshape
flat_list = None
if len(args) == 1:
# Matrix(SparseMatrix(...))
if isinstance(args[0], SparseMatrix):
return args[0].rows, args[0].cols, flatten(args[0].tolist())
# Matrix(Matrix(...))
elif isinstance(args[0], MatrixBase):
return args[0].rows, args[0].cols, args[0]._mat
# Matrix(MatrixSymbol('X', 2, 2))
elif isinstance(args[0], Basic) and args[0].is_Matrix:
return args[0].rows, args[0].cols, args[0].as_explicit()._mat
# Matrix(numpy.ones((2, 2)))
elif hasattr(args[0], "__array__"):
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = args[0].__array__()
if len(arr.shape) == 2:
rows, cols = arr.shape[0], arr.shape[1]
flat_list = [cls._sympify(i) for i in arr.ravel()]
return rows, cols, flat_list
elif len(arr.shape) == 1:
rows, cols = arr.shape[0], 1
flat_list = [cls.zero] * rows
for i in range(len(arr)):
flat_list[i] = cls._sympify(arr[i])
return rows, cols, flat_list
else:
raise NotImplementedError(
"SymPy supports just 1D and 2D matrices")
# Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
elif is_sequence(args[0]) \
and not isinstance(args[0], DeferredVector):
dat = list(args[0])
ismat = lambda i: isinstance(i, MatrixBase) and (
evaluate or
isinstance(i, BlockMatrix) or
isinstance(i, MatrixSymbol))
raw = lambda i: is_sequence(i) and not ismat(i)
evaluate = kwargs.get('evaluate', True)
if evaluate:
def do(x):
# make Block and Symbol explicit
if isinstance(x, (list, tuple)):
return type(x)([do(i) for i in x])
if isinstance(x, BlockMatrix) or \
isinstance(x, MatrixSymbol) and \
all(_.is_Integer for _ in x.shape):
return x.as_explicit()
return x
dat = do(dat)
if dat == [] or dat == [[]]:
rows = cols = 0
flat_list = []
elif not any(raw(i) or ismat(i) for i in dat):
# a column as a list of values
flat_list = [cls._sympify(i) for i in dat]
rows = len(flat_list)
cols = 1 if rows else 0
elif evaluate and all(ismat(i) for i in dat):
# a column as a list of matrices
ncol = set(i.cols for i in dat if any(i.shape))
if ncol:
if len(ncol) != 1:
raise ValueError('mismatched dimensions')
flat_list = [_ for i in dat for r in i.tolist() for _ in r]
cols = ncol.pop()
rows = len(flat_list)//cols
else:
rows = cols = 0
flat_list = []
elif evaluate and any(ismat(i) for i in dat):
ncol = set()
flat_list = []
for i in dat:
if ismat(i):
flat_list.extend(
[k for j in i.tolist() for k in j])
if any(i.shape):
ncol.add(i.cols)
elif raw(i):
if i:
ncol.add(len(i))
flat_list.extend(i)
else:
ncol.add(1)
flat_list.append(i)
if len(ncol) > 1:
raise ValueError('mismatched dimensions')
cols = ncol.pop()
rows = len(flat_list)//cols
else:
# list of lists; each sublist is a logical row
# which might consist of many rows if the values in
# the row are matrices
flat_list = []
ncol = set()
rows = cols = 0
for row in dat:
if not is_sequence(row) and \
not getattr(row, 'is_Matrix', False):
raise ValueError('expecting list of lists')
if not row:
continue
if evaluate and all(ismat(i) for i in row):
r, c, flatT = cls._handle_creation_inputs(
[i.T for i in row])
T = reshape(flatT, [c])
flat = [T[i][j] for j in range(c) for i in range(r)]
r, c = c, r
else:
r = 1
if getattr(row, 'is_Matrix', False):
c = 1
flat = [row]
else:
c = len(row)
flat = [cls._sympify(i) for i in row]
ncol.add(c)
if len(ncol) > 1:
raise ValueError('mismatched dimensions')
flat_list.extend(flat)
rows += r
cols = ncol.pop() if ncol else 0
elif len(args) == 3:
rows = as_int(args[0])
cols = as_int(args[1])
if rows < 0 or cols < 0:
raise ValueError("Cannot create a {} x {} matrix. "
"Both dimensions must be positive".format(rows, cols))
# Matrix(2, 2, lambda i, j: i+j)
if len(args) == 3 and isinstance(args[2], Callable):
op = args[2]
flat_list = []
for i in range(rows):
flat_list.extend(
[cls._sympify(op(cls._sympify(i), cls._sympify(j)))
for j in range(cols)])
# Matrix(2, 2, [1, 2, 3, 4])
elif len(args) == 3 and is_sequence(args[2]):
flat_list = args[2]
if len(flat_list) != rows * cols:
raise ValueError(
'List length should be equal to rows*columns')
flat_list = [cls._sympify(i) for i in flat_list]
# Matrix()
elif len(args) == 0:
# Empty Matrix
rows = cols = 0
flat_list = []
if flat_list is None:
raise TypeError(filldedent('''
Data type not understood; expecting list of lists
or lists of values.'''))
return rows, cols, flat_list
def _setitem(self, key, value):
"""Helper to set value at location given by key.
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
from .dense import Matrix
is_slice = isinstance(key, slice)
i, j = key = self.key2ij(key)
is_mat = isinstance(value, MatrixBase)
if type(i) is slice or type(j) is slice:
if is_mat:
self.copyin_matrix(key, value)
return
if not isinstance(value, Expr) and is_sequence(value):
self.copyin_list(key, value)
return
raise ValueError('unexpected value: %s' % value)
else:
if (not is_mat and
not isinstance(value, Basic) and is_sequence(value)):
value = Matrix(value)
is_mat = True
if is_mat:
if is_slice:
key = (slice(*divmod(i, self.cols)),
slice(*divmod(j, self.cols)))
else:
key = (slice(i, i + value.rows),
slice(j, j + value.cols))
self.copyin_matrix(key, value)
else:
return i, j, self._sympify(value)
return
def add(self, b):
"""Return self + b """
return self + b
def condition_number(self):
"""Returns the condition number of a matrix.
This is the maximum singular value divided by the minimum singular value
Examples
========
>>> from sympy import Matrix, S
>>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, S.One/10]])
>>> A.condition_number()
100
See Also
========
singular_values
"""
if not self:
return self.zero
singularvalues = self.singular_values()
return Max(*singularvalues) / Min(*singularvalues)
def copy(self):
"""
Returns the copy of a matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.copy()
Matrix([
[1, 2],
[3, 4]])
"""
return self._new(self.rows, self.cols, self._mat)
def cross(self, b):
r"""
Return the cross product of ``self`` and ``b`` relaxing the condition
of compatible dimensions: if each has 3 elements, a matrix of the
same type and shape as ``self`` will be returned. If ``b`` has the same
shape as ``self`` then common identities for the cross product (like
`a \times b = - b \times a`) will hold.
Parameters
==========
b : 3x1 or 1x3 Matrix
See Also
========
dot
multiply
multiply_elementwise
"""
if not is_sequence(b):
raise TypeError(
"`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if not (self.rows * self.cols == b.rows * b.cols == 3):
raise ShapeError("Dimensions incorrect for cross product: %s x %s" %
((self.rows, self.cols), (b.rows, b.cols)))
else:
return self._new(self.rows, self.cols, (
(self[1] * b[2] - self[2] * b[1]),
(self[2] * b[0] - self[0] * b[2]),
(self[0] * b[1] - self[1] * b[0])))
@property
def D(self):
"""Return Dirac conjugate (if ``self.rows == 4``).
Examples
========
>>> from sympy import Matrix, I, eye
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m.D
Matrix([[0, 1 - I, -2, -3]])
>>> m = (eye(4) + I*eye(4))
>>> m[0, 3] = 2
>>> m.D
Matrix([
[1 - I, 0, 0, 0],
[ 0, 1 - I, 0, 0],
[ 0, 0, -1 + I, 0],
[ 2, 0, 0, -1 + I]])
If the matrix does not have 4 rows an AttributeError will be raised
because this property is only defined for matrices with 4 rows.
>>> Matrix(eye(2)).D
Traceback (most recent call last):
...
AttributeError: Matrix has no attribute D.
See Also
========
sympy.matrices.common.MatrixCommon.conjugate: By-element conjugation
sympy.matrices.common.MatrixCommon.H: Hermite conjugation
"""
from sympy.physics.matrices import mgamma
if self.rows != 4:
# In Python 3.2, properties can only return an AttributeError
# so we can't raise a ShapeError -- see commit which added the
# first line of this inline comment. Also, there is no need
# for a message since MatrixBase will raise the AttributeError
raise AttributeError
return self.H * mgamma(0)
def dot(self, b, hermitian=None, conjugate_convention=None):
"""Return the dot or inner product of two vectors of equal length.
Here ``self`` must be a ``Matrix`` of size 1 x n or n x 1, and ``b``
must be either a matrix of size 1 x n, n x 1, or a list/tuple of length n.
A scalar is returned.
By default, ``dot`` does not conjugate ``self`` or ``b``, even if there are
complex entries. Set ``hermitian=True`` (and optionally a ``conjugate_convention``)
to compute the hermitian inner product.
Possible kwargs are ``hermitian`` and ``conjugate_convention``.
If ``conjugate_convention`` is ``"left"``, ``"math"`` or ``"maths"``,
the conjugate of the first vector (``self``) is used. If ``"right"``
or ``"physics"`` is specified, the conjugate of the second vector ``b`` is used.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> v = Matrix([1, 1, 1])
>>> M.row(0).dot(v)
6
>>> M.col(0).dot(v)
12
>>> v = [3, 2, 1]
>>> M.row(0).dot(v)
10
>>> from sympy import I
>>> q = Matrix([1*I, 1*I, 1*I])
>>> q.dot(q, hermitian=False)
-3
>>> q.dot(q, hermitian=True)
3
>>> q1 = Matrix([1, 1, 1*I])
>>> q.dot(q1, hermitian=True, conjugate_convention="maths")
1 - 2*I
>>> q.dot(q1, hermitian=True, conjugate_convention="physics")
1 + 2*I
See Also
========
cross
multiply
multiply_elementwise
"""
from .dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError(
"Dimensions incorrect for dot product: %s, %s" % (
self.shape, len(b)))
return self.dot(Matrix(b))
else:
raise TypeError(
"`b` must be an ordered iterable or Matrix, not %s." %
type(b))
mat = self
if (1 not in mat.shape) or (1 not in b.shape) :
SymPyDeprecationWarning(
feature="Dot product of non row/column vectors",
issue=13815,
deprecated_since_version="1.2",
useinstead="* to take matrix products").warn()
return mat._legacy_array_dot(b)
if len(mat) != len(b):
raise ShapeError("Dimensions incorrect for dot product: %s, %s" % (self.shape, b.shape))
n = len(mat)
if mat.shape != (1, n):
mat = mat.reshape(1, n)
if b.shape != (n, 1):
b = b.reshape(n, 1)
# Now ``mat`` is a row vector and ``b`` is a column vector.
# If it so happens that only conjugate_convention is passed
# then automatically set hermitian to True. If only hermitian
# is true but no conjugate_convention is not passed then
# automatically set it to ``"maths"``
if conjugate_convention is not None and hermitian is None:
hermitian = True
if hermitian and conjugate_convention is None:
conjugate_convention = "maths"
if hermitian == True:
if conjugate_convention in ("maths", "left", "math"):
mat = mat.conjugate()
elif conjugate_convention in ("physics", "right"):
b = b.conjugate()
else:
raise ValueError("Unknown conjugate_convention was entered."
" conjugate_convention must be one of the"
" following: math, maths, left, physics or right.")
return (mat * b)[0]
def dual(self):
"""Returns the dual of a matrix, which is:
``(1/2)*levicivita(i, j, k, l)*M(k, l)`` summed over indices `k` and `l`
Since the levicivita method is anti_symmetric for any pairwise
exchange of indices, the dual of a symmetric matrix is the zero
matrix. Strictly speaking the dual defined here assumes that the
'matrix' `M` is a contravariant anti_symmetric second rank tensor,
so that the dual is a covariant second rank tensor.
"""
from sympy import LeviCivita
from sympy.matrices import zeros
M, n = self[:, :], self.rows
work = zeros(n)
if self.is_symmetric():
return work
for i in range(1, n):
for j in range(1, n):
acum = 0
for k in range(1, n):
acum += LeviCivita(i, j, 0, k) * M[0, k]
work[i, j] = acum
work[j, i] = -acum
for l in range(1, n):
acum = 0
for a in range(1, n):
for b in range(1, n):
acum += LeviCivita(0, l, a, b) * M[a, b]
acum /= 2
work[0, l] = -acum
work[l, 0] = acum
return work
def _eval_matrix_exp_jblock(self):
"""A helper function to compute an exponential of a Jordan block
matrix
Examples
========
>>> from sympy import Symbol, Matrix
>>> l = Symbol('lamda')
A trivial example of 1*1 Jordan block:
>>> m = Matrix.jordan_block(1, l)
>>> m._eval_matrix_exp_jblock()
Matrix([[exp(lamda)]])
An example of 3*3 Jordan block:
>>> m = Matrix.jordan_block(3, l)
>>> m._eval_matrix_exp_jblock()
Matrix([
[exp(lamda), exp(lamda), exp(lamda)/2],
[ 0, exp(lamda), exp(lamda)],
[ 0, 0, exp(lamda)]])
References
==========
.. [1] https://en.wikipedia.org/wiki/Matrix_function#Jordan_decomposition
"""
size = self.rows
l = self[0, 0]
exp_l = exp(l)
bands = {i: exp_l / factorial(i) for i in range(size)}
from .sparsetools import banded
return self.__class__(banded(size, bands))
def analytic_func(self, f, x):
"""
Computes f(A) where A is a Square Matrix
and f is an analytic function.
Examples
========
>>> from sympy import Symbol, Matrix, exp, S, log
>>> x = Symbol('x')
>>> m = Matrix([[S(5)/4, S(3)/4], [S(3)/4, S(5)/4]])
>>> f = log(x)
>>> m.analytic_func(f, x)
Matrix([
[ 0, log(2)],
[log(2), 0]])
Parameters
==========
f : Expr
Analytic Function
x : Symbol
parameter of f
"""
from sympy import diff
if not self.is_square:
raise NonSquareMatrixError(
"Valid only for square matrices")
if not x.is_symbol:
raise ValueError("The parameter for f should be a symbol")
if x not in f.free_symbols:
raise ValueError("x should be a parameter in Function")
if x in self.free_symbols:
raise ValueError("x should be a parameter in Matrix")
eigen = self.eigenvals()
max_mul = max(eigen.values())
derivative = {}
dd = f
for i in range(max_mul - 1):
dd = diff(dd, x)
derivative[i + 1] = dd
n = self.shape[0]
r = self.zeros(n)
f_val = self.zeros(n, 1)
row = 0
for i in eigen:
mul = eigen[i]
f_val[row] = f.subs(x, i)
if not f.subs(x, i).free_symbols and not f.subs(x, i).is_complex:
raise ValueError("Cannot Evaluate the function is not"
" analytic at some eigen value")
val = 1
for a in range(n):
r[row, a] = val
val *= i
if mul > 1:
coe = [1 for ii in range(n)]
deri = 1
while mul > 1:
row = row + 1
mul -= 1
d_i = derivative[deri].subs(x, i)
if not d_i.free_symbols and not d_i.is_complex:
raise ValueError("Cannot Evaluate the function is not"
" analytic at some eigen value")
f_val[row] = d_i
for a in range(n):
if a - deri + 1 <= 0:
r[row, a] = 0
coe[a] = 0
continue
coe[a] = coe[a]*(a - deri + 1)
r[row, a] = coe[a]*pow(i, a - deri)
deri += 1
row += 1
c = r.solve(f_val)
ans = self.zeros(n)
pre = self.eye(n)
for i in range(n):
ans = ans + c[i]*pre
pre *= self
return ans
def exp(self):
"""Return the exponential of a square matrix
Examples
========
>>> from sympy import Symbol, Matrix
>>> t = Symbol('t')
>>> m = Matrix([[0, 1], [-1, 0]]) * t
>>> m.exp()
Matrix([
[ exp(I*t)/2 + exp(-I*t)/2, -I*exp(I*t)/2 + I*exp(-I*t)/2],
[I*exp(I*t)/2 - I*exp(-I*t)/2, exp(I*t)/2 + exp(-I*t)/2]])
"""
if not self.is_square:
raise NonSquareMatrixError(
"Exponentiation is valid only for square matrices")
try:
P, J = self.jordan_form()
cells = J.get_diag_blocks()
except MatrixError:
raise NotImplementedError(
"Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")
blocks = [cell._eval_matrix_exp_jblock() for cell in cells]
from sympy.matrices import diag
from sympy import re
eJ = diag(*blocks)
# n = self.rows
ret = P.multiply(eJ, dotprodsimp=True).multiply(P.inv(), dotprodsimp=True)
if all(value.is_real for value in self.values()):
return type(self)(re(ret))
else:
return type(self)(ret)
def _eval_matrix_log_jblock(self):
"""Helper function to compute logarithm of a jordan block.
Examples
========
>>> from sympy import Symbol, Matrix
>>> l = Symbol('lamda')
A trivial example of 1*1 Jordan block:
>>> m = Matrix.jordan_block(1, l)
>>> m._eval_matrix_log_jblock()
Matrix([[log(lamda)]])
An example of 3*3 Jordan block:
>>> m = Matrix.jordan_block(3, l)
>>> m._eval_matrix_log_jblock()
Matrix([
[log(lamda), 1/lamda, -1/(2*lamda**2)],
[ 0, log(lamda), 1/lamda],
[ 0, 0, log(lamda)]])
"""
size = self.rows
l = self[0, 0]
if l.is_zero:
raise MatrixError(
'Could not take logarithm or reciprocal for the given '
'eigenvalue {}'.format(l))
bands = {0: log(l)}
for i in range(1, size):
bands[i] = -((-l) ** -i) / i
from .sparsetools import banded
return self.__class__(banded(size, bands))
def log(self, simplify=cancel):
"""Return the logarithm of a square matrix
Parameters
==========
simplify : function, bool
The function to simplify the result with.
Default is ``cancel``, which is effective to reduce the
expression growing for taking reciprocals and inverses for
symbolic matrices.
Examples
========
>>> from sympy import S, Matrix
Examples for positive-definite matrices:
>>> m = Matrix([[1, 1], [0, 1]])
>>> m.log()
Matrix([
[0, 1],
[0, 0]])
>>> m = Matrix([[S(5)/4, S(3)/4], [S(3)/4, S(5)/4]])
>>> m.log()
Matrix([
[ 0, log(2)],
[log(2), 0]])
Examples for non positive-definite matrices:
>>> m = Matrix([[S(3)/4, S(5)/4], [S(5)/4, S(3)/4]])
>>> m.log()
Matrix([
[ I*pi/2, log(2) - I*pi/2],
[log(2) - I*pi/2, I*pi/2]])
>>> m = Matrix(
... [[0, 0, 0, 1],
... [0, 0, 1, 0],
... [0, 1, 0, 0],
... [1, 0, 0, 0]])
>>> m.log()
Matrix([
[ I*pi/2, 0, 0, -I*pi/2],
[ 0, I*pi/2, -I*pi/2, 0],
[ 0, -I*pi/2, I*pi/2, 0],
[-I*pi/2, 0, 0, I*pi/2]])
"""
if not self.is_square:
raise NonSquareMatrixError(
"Logarithm is valid only for square matrices")
try:
if simplify:
P, J = simplify(self).jordan_form()
else:
P, J = self.jordan_form()
cells = J.get_diag_blocks()
except MatrixError:
raise NotImplementedError(
"Logarithm is implemented only for matrices for which "
"the Jordan normal form can be computed")
blocks = [
cell._eval_matrix_log_jblock()
for cell in cells]
from sympy.matrices import diag
eJ = diag(*blocks)
if simplify:
ret = simplify(P * eJ * simplify(P.inv()))
ret = self.__class__(ret)
else:
ret = P * eJ * P.inv()
return ret
def is_nilpotent(self):
"""Checks if a matrix is nilpotent.
A matrix B is nilpotent if for some integer k, B**k is
a zero matrix.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
True
>>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
False
"""
if not self:
return True
if not self.is_square:
raise NonSquareMatrixError(
"Nilpotency is valid only for square matrices")
x = _uniquely_named_symbol('x', self)
p = self.charpoly(x)
if p.args[0] == x ** self.rows:
return True
return False
def key2bounds(self, keys):
"""Converts a key with potentially mixed types of keys (integer and slice)
into a tuple of ranges and raises an error if any index is out of ``self``'s
range.
See Also
========
key2ij
"""
from sympy.matrices.common import a2idx as a2idx_ # Remove this line after deprecation of a2idx from matrices.py
islice, jslice = [isinstance(k, slice) for k in keys]
if islice:
if not self.rows:
rlo = rhi = 0
else:
rlo, rhi = keys[0].indices(self.rows)[:2]
else:
rlo = a2idx_(keys[0], self.rows)
rhi = rlo + 1
if jslice:
if not self.cols:
clo = chi = 0
else:
clo, chi = keys[1].indices(self.cols)[:2]
else:
clo = a2idx_(keys[1], self.cols)
chi = clo + 1
return rlo, rhi, clo, chi
def key2ij(self, key):
"""Converts key into canonical form, converting integers or indexable
items into valid integers for ``self``'s range or returning slices
unchanged.
See Also
========
key2bounds
"""
from sympy.matrices.common import a2idx as a2idx_ # Remove this line after deprecation of a2idx from matrices.py
if is_sequence(key):
if not len(key) == 2:
raise TypeError('key must be a sequence of length 2')
return [a2idx_(i, n) if not isinstance(i, slice) else i
for i, n in zip(key, self.shape)]
elif isinstance(key, slice):
return key.indices(len(self))[:2]
else:
return divmod(a2idx_(key, len(self)), self.cols)
def normalized(self, iszerofunc=_iszero):
"""Return the normalized version of ``self``.
Parameters
==========
iszerofunc : Function, optional
A function to determine whether ``self`` is a zero vector.
The default ``_iszero`` tests to see if each element is
exactly zero.
Returns
=======
Matrix
Normalized vector form of ``self``.
It has the same length as a unit vector. However, a zero vector
will be returned for a vector with norm 0.
Raises
======
ShapeError
If the matrix is not in a vector form.
See Also
========
norm
"""
if self.rows != 1 and self.cols != 1:
raise ShapeError("A Matrix must be a vector to normalize.")
norm = self.norm()
if iszerofunc(norm):
out = self.zeros(self.rows, self.cols)
else:
out = self.applyfunc(lambda i: i / norm)
return out
def norm(self, ord=None):
"""Return the Norm of a Matrix or Vector.
In the simplest case this is the geometric size of the vector
Other norms can be specified by the ord parameter
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm - does not exist
inf maximum row sum max(abs(x))
-inf -- min(abs(x))
1 maximum column sum as below
-1 -- as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other - does not exist sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
Examples
========
>>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo
>>> x = Symbol('x', real=True)
>>> v = Matrix([cos(x), sin(x)])
>>> trigsimp( v.norm() )
1
>>> v.norm(10)
(sin(x)**10 + cos(x)**10)**(1/10)
>>> A = Matrix([[1, 1], [1, 1]])
>>> A.norm(1) # maximum sum of absolute values of A is 2
2
>>> A.norm(2) # Spectral norm (max of |Ax|/|x| under 2-vector-norm)
2
>>> A.norm(-2) # Inverse spectral norm (smallest singular value)
0
>>> A.norm() # Frobenius Norm
2
>>> A.norm(oo) # Infinity Norm
2
>>> Matrix([1, -2]).norm(oo)
2
>>> Matrix([-1, 2]).norm(-oo)
1
See Also
========
normalized
"""
# Row or Column Vector Norms
vals = list(self.values()) or [0]
if self.rows == 1 or self.cols == 1:
if ord == 2 or ord is None: # Common case sqrt(<x, x>)
return sqrt(Add(*(abs(i) ** 2 for i in vals)))
elif ord == 1: # sum(abs(x))
return Add(*(abs(i) for i in vals))
elif ord is S.Infinity: # max(abs(x))
return Max(*[abs(i) for i in vals])
elif ord is S.NegativeInfinity: # min(abs(x))
return Min(*[abs(i) for i in vals])
# Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
# Note that while useful this is not mathematically a norm
try:
return Pow(Add(*(abs(i) ** ord for i in vals)), S.One / ord)
except (NotImplementedError, TypeError):
raise ValueError("Expected order to be Number, Symbol, oo")
# Matrix Norms
else:
if ord == 1: # Maximum column sum
m = self.applyfunc(abs)
return Max(*[sum(m.col(i)) for i in range(m.cols)])
elif ord == 2: # Spectral Norm
# Maximum singular value
return Max(*self.singular_values())
elif ord == -2:
# Minimum singular value
return Min(*self.singular_values())
elif ord is S.Infinity: # Infinity Norm - Maximum row sum
m = self.applyfunc(abs)
return Max(*[sum(m.row(i)) for i in range(m.rows)])
elif (ord is None or isinstance(ord,
str) and ord.lower() in
['f', 'fro', 'frobenius', 'vector']):
# Reshape as vector and send back to norm function
return self.vec().norm(ord=2)
else:
raise NotImplementedError("Matrix Norms under development")
def print_nonzero(self, symb="X"):
"""Shows location of non-zero entries for fast shape lookup.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> m = Matrix(2, 3, lambda i, j: i*3+j)
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5]])
>>> m.print_nonzero()
[ XX]
[XXX]
>>> m = eye(4)
>>> m.print_nonzero("x")
[x ]
[ x ]
[ x ]
[ x]
"""
s = []
for i in range(self.rows):
line = []
for j in range(self.cols):
if self[i, j] == 0:
line.append(" ")
else:
line.append(str(symb))
s.append("[%s]" % ''.join(line))
print('\n'.join(s))
def project(self, v):
"""Return the projection of ``self`` onto the line containing ``v``.
Examples
========
>>> from sympy import Matrix, S, sqrt
>>> V = Matrix([sqrt(3)/2, S.Half])
>>> x = Matrix([[1, 0]])
>>> V.project(x)
Matrix([[sqrt(3)/2, 0]])
>>> V.project(-x)
Matrix([[sqrt(3)/2, 0]])
"""
return v * (self.dot(v) / v.dot(v))
def table(self, printer, rowstart='[', rowend=']', rowsep='\n',
colsep=', ', align='right'):
r"""
String form of Matrix as a table.
``printer`` is the printer to use for on the elements (generally
something like StrPrinter())
``rowstart`` is the string used to start each row (by default '[').
``rowend`` is the string used to end each row (by default ']').
``rowsep`` is the string used to separate rows (by default a newline).
``colsep`` is the string used to separate columns (by default ', ').
``align`` defines how the elements are aligned. Must be one of 'left',
'right', or 'center'. You can also use '<', '>', and '^' to mean the
same thing, respectively.
This is used by the string printer for Matrix.
Examples
========
>>> from sympy import Matrix
>>> from sympy.printing.str import StrPrinter
>>> M = Matrix([[1, 2], [-33, 4]])
>>> printer = StrPrinter()
>>> M.table(printer)
'[ 1, 2]\n[-33, 4]'
>>> print(M.table(printer))
[ 1, 2]
[-33, 4]
>>> print(M.table(printer, rowsep=',\n'))
[ 1, 2],
[-33, 4]
>>> print('[%s]' % M.table(printer, rowsep=',\n'))
[[ 1, 2],
[-33, 4]]
>>> print(M.table(printer, colsep=' '))
[ 1 2]
[-33 4]
>>> print(M.table(printer, align='center'))
[ 1 , 2]
[-33, 4]
>>> print(M.table(printer, rowstart='{', rowend='}'))
{ 1, 2}
{-33, 4}
"""
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return '[]'
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
s = printer._print(self[i, j])
res[-1].append(s)
maxlen[j] = max(len(s), maxlen[j])
# Patch strings together
align = {
'left': 'ljust',
'right': 'rjust',
'center': 'center',
'<': 'ljust',
'>': 'rjust',
'^': 'center',
}[align]
for i, row in enumerate(res):
for j, elem in enumerate(row):
row[j] = getattr(elem, align)(maxlen[j])
res[i] = rowstart + colsep.join(row) + rowend
return rowsep.join(res)
def vech(self, diagonal=True, check_symmetry=True):
"""Return the unique elements of a symmetric Matrix as a one column matrix
by stacking the elements in the lower triangle.
Arguments:
diagonal -- include the diagonal cells of ``self`` or not
check_symmetry -- checks symmetry of ``self`` but not completely reliably
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 2], [2, 3]])
>>> m
Matrix([
[1, 2],
[2, 3]])
>>> m.vech()
Matrix([
[1],
[2],
[3]])
>>> m.vech(diagonal=False)
Matrix([[2]])
See Also
========
vec
"""
from sympy.matrices import zeros
c = self.cols
if c != self.rows:
raise ShapeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError(
"Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros(c * (c + 1) // 2, 1)
for j in range(c):
for i in range(j, c):
v[count] = self[i, j]
count += 1
else:
v = zeros(c * (c - 1) // 2, 1)
for j in range(c):
for i in range(j + 1, c):
v[count] = self[i, j]
count += 1
return v
def rank_decomposition(self, iszerofunc=_iszero, simplify=False):
return _rank_decomposition(self, iszerofunc=iszerofunc,
simplify=simplify)
def cholesky(self, hermitian=True):
raise NotImplementedError('This function is implemented in DenseMatrix or SparseMatrix')
def LDLdecomposition(self, hermitian=True):
raise NotImplementedError('This function is implemented in DenseMatrix or SparseMatrix')
def LUdecomposition(self, iszerofunc=_iszero, simpfunc=None,
rankcheck=False):
return _LUdecomposition(self, iszerofunc=iszerofunc, simpfunc=simpfunc,
rankcheck=rankcheck)
def LUdecomposition_Simple(self, iszerofunc=_iszero, simpfunc=None,
rankcheck=False):
return _LUdecomposition_Simple(self, iszerofunc=iszerofunc,
simpfunc=simpfunc, rankcheck=rankcheck)
def LUdecompositionFF(self):
return _LUdecompositionFF(self)
def QRdecomposition(self):
return _QRdecomposition(self)
def diagonal_solve(self, rhs):
return _diagonal_solve(self, rhs)
def lower_triangular_solve(self, rhs):
raise NotImplementedError('This function is implemented in DenseMatrix or SparseMatrix')
def upper_triangular_solve(self, rhs):
raise NotImplementedError('This function is implemented in DenseMatrix or SparseMatrix')
def cholesky_solve(self, rhs):
return _cholesky_solve(self, rhs)
def LDLsolve(self, rhs):
return _LDLsolve(self, rhs)
def LUsolve(self, rhs, iszerofunc=_iszero):
return _LUsolve(self, rhs, iszerofunc=iszerofunc)
def QRsolve(self, b):
return _QRsolve(self, b)
def gauss_jordan_solve(self, B, freevar=False):
return _gauss_jordan_solve(self, B, freevar=freevar)
def pinv_solve(self, B, arbitrary_matrix=None):
return _pinv_solve(self, B, arbitrary_matrix=arbitrary_matrix)
def solve(self, rhs, method='GJ'):
return _solve(self, rhs, method=method)
def solve_least_squares(self, rhs, method='CH'):
return _solve_least_squares(self, rhs, method=method)
def pinv(self, method='RD'):
return _pinv(self, method=method)
def inv_mod(self, m):
return _inv_mod(self, m)
def inverse_ADJ(self, iszerofunc=_iszero):
return _inv_ADJ(self, iszerofunc=iszerofunc)
def inverse_BLOCK(self, iszerofunc=_iszero):
return _inv_block(self, iszerofunc=iszerofunc)
def inverse_GE(self, iszerofunc=_iszero):
return _inv_GE(self, iszerofunc=iszerofunc)
def inverse_LU(self, iszerofunc=_iszero):
return _inv_LU(self, iszerofunc=iszerofunc)
def inverse_CH(self, iszerofunc=_iszero):
return _inv_CH(self, iszerofunc=iszerofunc)
def inverse_LDL(self, iszerofunc=_iszero):
return _inv_LDL(self, iszerofunc=iszerofunc)
def inverse_QR(self, iszerofunc=_iszero):
return _inv_QR(self, iszerofunc=iszerofunc)
def inv(self, method=None, iszerofunc=_iszero, try_block_diag=False):
return _inv(self, method=method, iszerofunc=iszerofunc,
try_block_diag=try_block_diag)
rank_decomposition.__doc__ = _rank_decomposition.__doc__
cholesky.__doc__ = _cholesky.__doc__
LDLdecomposition.__doc__ = _LDLdecomposition.__doc__
LUdecomposition.__doc__ = _LUdecomposition.__doc__
LUdecomposition_Simple.__doc__ = _LUdecomposition_Simple.__doc__
LUdecompositionFF.__doc__ = _LUdecompositionFF.__doc__
QRdecomposition.__doc__ = _QRdecomposition.__doc__
diagonal_solve.__doc__ = _diagonal_solve.__doc__
lower_triangular_solve.__doc__ = _lower_triangular_solve.__doc__
upper_triangular_solve.__doc__ = _upper_triangular_solve.__doc__
cholesky_solve.__doc__ = _cholesky_solve.__doc__
LDLsolve.__doc__ = _LDLsolve.__doc__
LUsolve.__doc__ = _LUsolve.__doc__
QRsolve.__doc__ = _QRsolve.__doc__
gauss_jordan_solve.__doc__ = _gauss_jordan_solve.__doc__
pinv_solve.__doc__ = _pinv_solve.__doc__
solve.__doc__ = _solve.__doc__
solve_least_squares.__doc__ = _solve_least_squares.__doc__
pinv.__doc__ = _pinv.__doc__
inv_mod.__doc__ = _inv_mod.__doc__
inverse_ADJ.__doc__ = _inv_ADJ.__doc__
inverse_GE.__doc__ = _inv_GE.__doc__
inverse_LU.__doc__ = _inv_LU.__doc__
inverse_CH.__doc__ = _inv_CH.__doc__
inverse_LDL.__doc__ = _inv_LDL.__doc__
inverse_QR.__doc__ = _inv_QR.__doc__
inverse_BLOCK.__doc__ = _inv_block.__doc__
inv.__doc__ = _inv.__doc__
@deprecated(
issue=15109,
useinstead="from sympy.matrices.common import classof",
deprecated_since_version="1.3")
def classof(A, B):
from sympy.matrices.common import classof as classof_
return classof_(A, B)
@deprecated(
issue=15109,
deprecated_since_version="1.3",
useinstead="from sympy.matrices.common import a2idx")
def a2idx(j, n=None):
from sympy.matrices.common import a2idx as a2idx_
return a2idx_(j, n)
|
27ba29b83a1e818415d6cc42a94b684825f2fe4070ac9e7c360dfa123a5c9a76 | from __future__ import division, print_function
from types import FunctionType
from mpmath.libmp.libmpf import prec_to_dps
from sympy.core.compatibility import default_sort_key
from sympy.core.logic import fuzzy_and, fuzzy_or
from sympy.core.numbers import Float
from sympy.core.symbol import Dummy
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.polys import roots
from sympy.simplify import nsimplify, simplify as _simplify
from sympy.utilities.exceptions import SymPyDeprecationWarning
from .common import (MatrixError, NonSquareMatrixError,
NonPositiveDefiniteMatrixError)
from .utilities import _iszero
# This functions is a candidate for caching if it gets implemented for matrices.
def _eigenvals(M, error_when_incomplete=True, **flags):
r"""Return eigenvalues using the Berkowitz agorithm to compute
the characteristic polynomial.
Parameters
==========
error_when_incomplete : bool, optional
If it is set to ``True``, it will raise an error if not all
eigenvalues are computed. This is caused by ``roots`` not returning
a full list of eigenvalues.
simplify : bool or function, optional
If it is set to ``True``, it attempts to return the most
simplified form of expressions returned by applying default
simplification method in every routine.
If it is set to ``False``, it will skip simplification in this
particular routine to save computation resources.
If a function is passed to, it will attempt to apply
the particular function as simplification method.
rational : bool, optional
If it is set to ``True``, every floating point numbers would be
replaced with rationals before computation. It can solve some
issues of ``roots`` routine not working well with floats.
multiple : bool, optional
If it is set to ``True``, the result will be in the form of a
list.
If it is set to ``False``, the result will be in the form of a
dictionary.
Returns
=======
eigs : list or dict
Eigenvalues of a matrix. The return format would be specified by
the key ``multiple``.
Raises
======
MatrixError
If not enough roots had got computed.
NonSquareMatrixError
If attempted to compute eigenvalues from a non-square matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1])
>>> M.eigenvals()
{-1: 1, 0: 1, 2: 1}
See Also
========
MatrixDeterminant.charpoly
eigenvects
Notes
=====
Eigenvalues of a matrix `A` can be computed by solving a matrix
equation `\det(A - \lambda I) = 0`
"""
simplify = flags.get('simplify', False) # Collect simplify flag before popped up, to reuse later in the routine.
multiple = flags.get('multiple', False) # Collect multiple flag to decide whether return as a dict or list.
rational = flags.pop('rational', True)
if not M:
return {}
if rational:
M = M.applyfunc(
lambda x: nsimplify(x, rational=True) if x.has(Float) else x)
if M.is_upper or M.is_lower:
if not M.is_square:
raise NonSquareMatrixError()
diagonal_entries = [M[i, i] for i in range(M.rows)]
if multiple:
eigs = diagonal_entries
else:
eigs = {}
for diagonal_entry in diagonal_entries:
if diagonal_entry not in eigs:
eigs[diagonal_entry] = 0
eigs[diagonal_entry] += 1
else:
flags.pop('simplify', None) # pop unsupported flag
if isinstance(simplify, FunctionType):
eigs = roots(M.charpoly(x=Dummy('x'), simplify=simplify), **flags)
else:
eigs = roots(M.charpoly(x=Dummy('x')), **flags)
# make sure the algebraic multiplicity sums to the
# size of the matrix
if error_when_incomplete and (sum(eigs.values()) if
isinstance(eigs, dict) else len(eigs)) != M.cols:
raise MatrixError("Could not compute eigenvalues for {}".format(M))
# Since 'simplify' flag is unsupported in roots()
# simplify() function will be applied once at the end of the routine.
if not simplify:
return eigs
if not isinstance(simplify, FunctionType):
simplify = _simplify
# With 'multiple' flag set true, simplify() will be mapped for the list
# Otherwise, simplify() will be mapped for the keys of the dictionary
if not multiple:
return {simplify(key): value for key, value in eigs.items()}
else:
return [simplify(value) for value in eigs]
# This functions is a candidate for caching if it gets implemented for matrices.
def _eigenvects(M, error_when_incomplete=True, iszerofunc=_iszero, **flags):
"""Return list of triples (eigenval, multiplicity, eigenspace).
Parameters
==========
error_when_incomplete : bool, optional
Raise an error when not all eigenvalues are computed. This is
caused by ``roots`` not returning a full list of eigenvalues.
iszerofunc : function, optional
Specifies a zero testing function to be used in ``rref``.
Default value is ``_iszero``, which uses SymPy's naive and fast
default assumption handler.
It can also accept any user-specified zero testing function, if it
is formatted as a function which accepts a single symbolic argument
and returns ``True`` if it is tested as zero and ``False`` if it
is tested as non-zero, and ``None`` if it is undecidable.
simplify : bool or function, optional
If ``True``, ``as_content_primitive()`` will be used to tidy up
normalization artifacts.
It will also be used by the ``nullspace`` routine.
chop : bool or positive number, optional
If the matrix contains any Floats, they will be changed to Rationals
for computation purposes, but the answers will be returned after
being evaluated with evalf. The ``chop`` flag is passed to ``evalf``.
When ``chop=True`` a default precision will be used; a number will
be interpreted as the desired level of precision.
Returns
=======
ret : [(eigenval, multiplicity, eigenspace), ...]
A ragged list containing tuples of data obtained by ``eigenvals``
and ``nullspace``.
``eigenspace`` is a list containing the ``eigenvector`` for each
eigenvalue.
``eigenvector`` is a vector in the form of a ``Matrix``. e.g.
a vector of length 3 is returned as ``Matrix([a_1, a_2, a_3])``.
Raises
======
NotImplementedError
If failed to compute nullspace.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1])
>>> M.eigenvects()
[(-1, 1, [Matrix([
[-1],
[ 1],
[ 0]])]), (0, 1, [Matrix([
[ 0],
[-1],
[ 1]])]), (2, 1, [Matrix([
[2/3],
[1/3],
[ 1]])])]
See Also
========
eigenvals
MatrixSubspaces.nullspace
"""
def eigenspace(eigenval):
"""Get a basis for the eigenspace for a particular eigenvalue"""
m = M - M.eye(M.rows) * eigenval
ret = m.nullspace(iszerofunc=iszerofunc)
# the nullspace for a real eigenvalue should be
# non-trivial. If we didn't find an eigenvector, try once
# more a little harder
if len(ret) == 0 and simplify:
ret = m.nullspace(iszerofunc=iszerofunc, simplify=True)
if len(ret) == 0:
raise NotImplementedError(
"Can't evaluate eigenvector for eigenvalue %s" % eigenval)
return ret
simplify = flags.get('simplify', True)
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
primitive = flags.get('simplify', False)
chop = flags.pop('chop', False)
flags.pop('multiple', None) # remove this if it's there
has_floats = M.has(Float) # roots doesn't like Floats, so replace them with Rationals
if has_floats:
M = M.applyfunc(lambda x: nsimplify(x, rational=True))
eigenvals = M.eigenvals(rational=False,
error_when_incomplete=error_when_incomplete, **flags)
ret = [(val, mult, eigenspace(val)) for val, mult in
sorted(eigenvals.items(), key=default_sort_key)]
if primitive:
# if the primitive flag is set, get rid of any common
# integer denominators
def denom_clean(l):
from sympy import gcd
return [(v / gcd(list(v))).applyfunc(simpfunc) for v in l]
ret = [(val, mult, denom_clean(es)) for val, mult, es in ret]
if has_floats:
# if we had floats to start with, turn the eigenvectors to floats
ret = [(val.evalf(chop=chop), mult, [v.evalf(chop=chop) for v in es])
for val, mult, es in ret]
return ret
def _is_diagonalizable_with_eigen(M, reals_only=False):
"""See _is_diagonalizable. This function returns the bool along with the
eigenvectors to avoid calculating them again in functions like
``diagonalize``."""
if not M.is_square:
return False, []
eigenvecs = M.eigenvects(simplify=True)
for val, mult, basis in eigenvecs:
if reals_only and not val.is_real: # if we have a complex eigenvalue
return False, eigenvecs
if mult != len(basis): # if the geometric multiplicity doesn't equal the algebraic
return False, eigenvecs
return True, eigenvecs
def _is_diagonalizable(M, reals_only=False, **kwargs):
"""Returns ``True`` if a matrix is diagonalizable.
Parameters
==========
reals_only : bool, optional
If ``True``, it tests whether the matrix can be diagonalized
to contain only real numbers on the diagonal.
If ``False``, it tests whether the matrix can be diagonalized
at all, even with numbers that may not be real.
Examples
========
Example of a diagonalizable matrix:
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 0], [0, 3, 0], [2, -4, 2]])
>>> M.is_diagonalizable()
True
Example of a non-diagonalizable matrix:
>>> M = Matrix([[0, 1], [0, 0]])
>>> M.is_diagonalizable()
False
Example of a matrix that is diagonalized in terms of non-real entries:
>>> M = Matrix([[0, 1], [-1, 0]])
>>> M.is_diagonalizable(reals_only=False)
True
>>> M.is_diagonalizable(reals_only=True)
False
See Also
========
is_diagonal
diagonalize
"""
if 'clear_cache' in kwargs:
SymPyDeprecationWarning(
feature='clear_cache',
deprecated_since_version=1.4,
issue=15887
).warn()
if 'clear_subproducts' in kwargs:
SymPyDeprecationWarning(
feature='clear_subproducts',
deprecated_since_version=1.4,
issue=15887
).warn()
if not M.is_square:
return False
if all(e.is_real for e in M) and M.is_symmetric():
return True
if all(e.is_complex for e in M) and M.is_hermitian:
return True
return _is_diagonalizable_with_eigen(M, reals_only=reals_only)[0]
def _diagonalize(M, reals_only=False, sort=False, normalize=False):
"""
Return (P, D), where D is diagonal and
D = P^-1 * M * P
where M is current matrix.
Parameters
==========
reals_only : bool. Whether to throw an error if complex numbers are need
to diagonalize. (Default: False)
sort : bool. Sort the eigenvalues along the diagonal. (Default: False)
normalize : bool. If True, normalize the columns of P. (Default: False)
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> M
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> (P, D) = M.diagonalize()
>>> D
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> P
Matrix([
[-1, 0, -1],
[ 0, 0, -1],
[ 2, 1, 2]])
>>> P.inv() * M * P
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
See Also
========
is_diagonal
is_diagonalizable
"""
if not M.is_square:
raise NonSquareMatrixError()
is_diagonalizable, eigenvecs = _is_diagonalizable_with_eigen(M,
reals_only=reals_only)
if not is_diagonalizable:
raise MatrixError("Matrix is not diagonalizable")
if sort:
eigenvecs = sorted(eigenvecs, key=default_sort_key)
p_cols, diag = [], []
for val, mult, basis in eigenvecs:
diag += [val] * mult
p_cols += basis
if normalize:
p_cols = [v / v.norm() for v in p_cols]
return M.hstack(*p_cols), M.diag(*diag)
def _eval_is_positive_definite(M, method="eigen"):
"""Algorithm dump for computing positive-definiteness of a
matrix.
Parameters
==========
method : str, optional
Specifies the method for computing positive-definiteness of
a matrix.
If ``'eigen'``, it computes the full eigenvalues and decides
if the matrix is positive-definite.
If ``'CH'``, it attempts computing the Cholesky
decomposition to detect the definitiveness.
If ``'LDL'``, it attempts computing the LDL
decomposition to detect the definitiveness.
"""
if M.is_hermitian:
if method == 'eigen':
eigen = M.eigenvals()
args = [x.is_positive for x in eigen.keys()]
return fuzzy_and(args)
elif method == 'CH':
try:
M.cholesky(hermitian=True)
except NonPositiveDefiniteMatrixError:
return False
return True
elif method == 'LDL':
try:
M.LDLdecomposition(hermitian=True)
except NonPositiveDefiniteMatrixError:
return False
return True
else:
raise NotImplementedError()
elif M.is_square:
M_H = (M + M.H) / 2
return M_H._eval_is_positive_definite(method=method)
def _is_positive_definite(M):
return M._eval_is_positive_definite()
def _is_positive_semidefinite(M):
if M.is_hermitian:
eigen = M.eigenvals()
args = [x.is_nonnegative for x in eigen.keys()]
return fuzzy_and(args)
elif M.is_square:
return ((M + M.H) / 2).is_positive_semidefinite
return None
def _is_negative_definite(M):
if M.is_hermitian:
eigen = M.eigenvals()
args = [x.is_negative for x in eigen.keys()]
return fuzzy_and(args)
elif M.is_square:
return ((M + M.H) / 2).is_negative_definite
return None
def _is_negative_semidefinite(M):
if M.is_hermitian:
eigen = M.eigenvals()
args = [x.is_nonpositive for x in eigen.keys()]
return fuzzy_and(args)
elif M.is_square:
return ((M + M.H) / 2).is_negative_semidefinite
return None
def _is_indefinite(M):
if M.is_hermitian:
eigen = M.eigenvals()
args1 = [x.is_positive for x in eigen.keys()]
any_positive = fuzzy_or(args1)
args2 = [x.is_negative for x in eigen.keys()]
any_negative = fuzzy_or(args2)
return fuzzy_and([any_positive, any_negative])
elif M.is_square:
return ((M + M.H) / 2).is_indefinite
return None
_doc_positive_definite = \
r"""Finds out the definiteness of a matrix.
Examples
========
An example of numeric positive definite matrix:
>>> from sympy import Matrix
>>> A = Matrix([[1, -2], [-2, 6]])
>>> A.is_positive_definite
True
>>> A.is_positive_semidefinite
True
>>> A.is_negative_definite
False
>>> A.is_negative_semidefinite
False
>>> A.is_indefinite
False
An example of numeric negative definite matrix:
>>> A = Matrix([[-1, 2], [2, -6]])
>>> A.is_positive_definite
False
>>> A.is_positive_semidefinite
False
>>> A.is_negative_definite
True
>>> A.is_negative_semidefinite
True
>>> A.is_indefinite
False
An example of numeric indefinite matrix:
>>> A = Matrix([[1, 2], [2, 1]])
>>> A.is_positive_definite
False
>>> A.is_positive_semidefinite
False
>>> A.is_negative_definite
False
>>> A.is_negative_semidefinite
False
>>> A.is_indefinite
True
Notes
=====
Definitiveness is not very commonly discussed for non-hermitian
matrices.
However, computing the definitiveness of a matrix can be
generalized over any real matrix by taking the symmetric part:
`A_S = 1/2 (A + A^{T})`
Or over any complex matrix by taking the hermitian part:
`A_H = 1/2 (A + A^{H})`
And computing the eigenvalues.
References
==========
.. [1] https://en.wikipedia.org/wiki/Definiteness_of_a_matrix#Eigenvalues
.. [2] http://mathworld.wolfram.com/PositiveDefiniteMatrix.html
.. [3] Johnson, C. R. "Positive Definite Matrices." Amer.
Math. Monthly 77, 259-264 1970.
"""
_is_positive_definite.__doc__ = _doc_positive_definite
_is_positive_semidefinite.__doc__ = _doc_positive_definite
_is_negative_definite.__doc__ = _doc_positive_definite
_is_negative_semidefinite.__doc__ = _doc_positive_definite
_is_indefinite.__doc__ = _doc_positive_definite
def _jordan_form(M, calc_transform=True, **kwargs):
"""Return ``(P, J)`` where `J` is a Jordan block
matrix and `P` is a matrix such that
``M == P*J*P**-1``
Parameters
==========
calc_transform : bool
If ``False``, then only `J` is returned.
chop : bool
All matrices are converted to exact types when computing
eigenvalues and eigenvectors. As a result, there may be
approximation errors. If ``chop==True``, these errors
will be truncated.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[ 6, 5, -2, -3], [-3, -1, 3, 3], [ 2, 1, -2, -3], [-1, 1, 5, 5]])
>>> P, J = M.jordan_form()
>>> J
Matrix([
[2, 1, 0, 0],
[0, 2, 0, 0],
[0, 0, 2, 1],
[0, 0, 0, 2]])
See Also
========
jordan_block
"""
if not M.is_square:
raise NonSquareMatrixError("Only square matrices have Jordan forms")
chop = kwargs.pop('chop', False)
mat = M
has_floats = M.has(Float)
if has_floats:
try:
max_prec = max(term._prec for term in M._mat if isinstance(term, Float))
except ValueError:
# if no term in the matrix is explicitly a Float calling max()
# will throw a error so setting max_prec to default value of 53
max_prec = 53
# setting minimum max_dps to 15 to prevent loss of precision in
# matrix containing non evaluated expressions
max_dps = max(prec_to_dps(max_prec), 15)
def restore_floats(*args):
"""If ``has_floats`` is `True`, cast all ``args`` as
matrices of floats."""
if has_floats:
args = [m.evalf(n=max_dps, chop=chop) for m in args]
if len(args) == 1:
return args[0]
return args
# cache calculations for some speedup
mat_cache = {}
def eig_mat(val, pow):
"""Cache computations of ``(M - val*I)**pow`` for quick
retrieval"""
if (val, pow) in mat_cache:
return mat_cache[(val, pow)]
if (val, pow - 1) in mat_cache:
mat_cache[(val, pow)] = mat_cache[(val, pow - 1)].multiply(
mat_cache[(val, 1)], dotprodsimp=True)
else:
mat_cache[(val, pow)] = (mat - val*M.eye(M.rows)).pow(pow)
return mat_cache[(val, pow)]
# helper functions
def nullity_chain(val, algebraic_multiplicity):
"""Calculate the sequence [0, nullity(E), nullity(E**2), ...]
until it is constant where ``E = M - val*I``"""
# mat.rank() is faster than computing the null space,
# so use the rank-nullity theorem
cols = M.cols
ret = [0]
nullity = cols - eig_mat(val, 1).rank()
i = 2
while nullity != ret[-1]:
ret.append(nullity)
if nullity == algebraic_multiplicity:
break
nullity = cols - eig_mat(val, i).rank()
i += 1
# Due to issues like #7146 and #15872, SymPy sometimes
# gives the wrong rank. In this case, raise an error
# instead of returning an incorrect matrix
if nullity < ret[-1] or nullity > algebraic_multiplicity:
raise MatrixError(
"SymPy had encountered an inconsistent "
"result while computing Jordan block: "
"{}".format(M))
return ret
def blocks_from_nullity_chain(d):
"""Return a list of the size of each Jordan block.
If d_n is the nullity of E**n, then the number
of Jordan blocks of size n is
2*d_n - d_(n-1) - d_(n+1)"""
# d[0] is always the number of columns, so skip past it
mid = [2*d[n] - d[n - 1] - d[n + 1] for n in range(1, len(d) - 1)]
# d is assumed to plateau with "d[ len(d) ] == d[-1]", so
# 2*d_n - d_(n-1) - d_(n+1) == d_n - d_(n-1)
end = [d[-1] - d[-2]] if len(d) > 1 else [d[0]]
return mid + end
def pick_vec(small_basis, big_basis):
"""Picks a vector from big_basis that isn't in
the subspace spanned by small_basis"""
if len(small_basis) == 0:
return big_basis[0]
for v in big_basis:
_, pivots = M.hstack(*(small_basis + [v])).echelon_form(
with_pivots=True)
if pivots[-1] == len(small_basis):
return v
# roots doesn't like Floats, so replace them with Rationals
if has_floats:
mat = mat.applyfunc(lambda x: nsimplify(x, rational=True))
# first calculate the jordan block structure
eigs = mat.eigenvals()
# make sure that we found all the roots by counting
# the algebraic multiplicity
if sum(m for m in eigs.values()) != mat.cols:
raise MatrixError("Could not compute eigenvalues for {}".format(mat))
# most matrices have distinct eigenvalues
# and so are diagonalizable. In this case, don't
# do extra work!
if len(eigs.keys()) == mat.cols:
blocks = list(sorted(eigs.keys(), key=default_sort_key))
jordan_mat = mat.diag(*blocks)
if not calc_transform:
return restore_floats(jordan_mat)
jordan_basis = [eig_mat(eig, 1).nullspace()[0]
for eig in blocks]
basis_mat = mat.hstack(*jordan_basis)
return restore_floats(basis_mat, jordan_mat)
block_structure = []
for eig in sorted(eigs.keys(), key=default_sort_key):
algebraic_multiplicity = eigs[eig]
chain = nullity_chain(eig, algebraic_multiplicity)
block_sizes = blocks_from_nullity_chain(chain)
# if block_sizes = = [a, b, c, ...], then the number of
# Jordan blocks of size 1 is a, of size 2 is b, etc.
# create an array that has (eig, block_size) with one
# entry for each block
size_nums = [(i+1, num) for i, num in enumerate(block_sizes)]
# we expect larger Jordan blocks to come earlier
size_nums.reverse()
block_structure.extend(
(eig, size) for size, num in size_nums for _ in range(num))
jordan_form_size = sum(size for eig, size in block_structure)
if jordan_form_size != M.rows:
raise MatrixError(
"SymPy had encountered an inconsistent result while "
"computing Jordan block. : {}".format(M))
blocks = (mat.jordan_block(size=size, eigenvalue=eig) for eig, size in block_structure)
jordan_mat = mat.diag(*blocks)
if not calc_transform:
return restore_floats(jordan_mat)
# For each generalized eigenspace, calculate a basis.
# We start by looking for a vector in null( (A - eig*I)**n )
# which isn't in null( (A - eig*I)**(n-1) ) where n is
# the size of the Jordan block
#
# Ideally we'd just loop through block_structure and
# compute each generalized eigenspace. However, this
# causes a lot of unneeded computation. Instead, we
# go through the eigenvalues separately, since we know
# their generalized eigenspaces must have bases that
# are linearly independent.
jordan_basis = []
for eig in sorted(eigs.keys(), key=default_sort_key):
eig_basis = []
for block_eig, size in block_structure:
if block_eig != eig:
continue
null_big = (eig_mat(eig, size)).nullspace()
null_small = (eig_mat(eig, size - 1)).nullspace()
# we want to pick something that is in the big basis
# and not the small, but also something that is independent
# of any other generalized eigenvectors from a different
# generalized eigenspace sharing the same eigenvalue.
vec = pick_vec(null_small + eig_basis, null_big)
new_vecs = [eig_mat(eig, i).multiply(vec, dotprodsimp=True)
for i in range(size)]
eig_basis.extend(new_vecs)
jordan_basis.extend(reversed(new_vecs))
basis_mat = mat.hstack(*jordan_basis)
return restore_floats(basis_mat, jordan_mat)
def _left_eigenvects(M, **flags):
"""Returns left eigenvectors and eigenvalues.
This function returns the list of triples (eigenval, multiplicity,
basis) for the left eigenvectors. Options are the same as for
eigenvects(), i.e. the ``**flags`` arguments gets passed directly to
eigenvects().
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]])
>>> M.eigenvects()
[(-1, 1, [Matrix([
[-1],
[ 1],
[ 0]])]), (0, 1, [Matrix([
[ 0],
[-1],
[ 1]])]), (2, 1, [Matrix([
[2/3],
[1/3],
[ 1]])])]
>>> M.left_eigenvects()
[(-1, 1, [Matrix([[-2, 1, 1]])]), (0, 1, [Matrix([[-1, -1, 1]])]), (2,
1, [Matrix([[1, 1, 1]])])]
"""
eigs = M.transpose().eigenvects(**flags)
return [(val, mult, [l.transpose() for l in basis]) for val, mult, basis in eigs]
def _singular_values(M):
"""Compute the singular values of a Matrix
Examples
========
>>> from sympy import Matrix, Symbol
>>> x = Symbol('x', real=True)
>>> M = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
>>> M.singular_values()
[sqrt(x**2 + 1), 1, 0]
See Also
========
condition_number
"""
if M.rows >= M.cols:
valmultpairs = M.H.multiply(M).eigenvals()
else:
valmultpairs = M.multiply(M.H).eigenvals()
# Expands result from eigenvals into a simple list
vals = []
for k, v in valmultpairs.items():
vals += [sqrt(k)] * v # dangerous! same k in several spots!
# Pad with zeros if singular values are computed in reverse way,
# to give consistent format.
if len(vals) < M.cols:
vals += [M.zero] * (M.cols - len(vals))
# sort them in descending order
vals.sort(reverse=True, key=default_sort_key)
return vals
|
d0698c431fd28e5de47948293ab2fff511976c7dc3e958084ebca61aec483e61 | from __future__ import division, print_function
from sympy.core.compatibility import reduce
from .utilities import _iszero
def _columnspace(M, simplify=False):
"""Returns a list of vectors (Matrix objects) that span columnspace of ``M``
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6])
>>> M
Matrix([
[ 1, 3, 0],
[-2, -6, 0],
[ 3, 9, 6]])
>>> M.columnspace()
[Matrix([
[ 1],
[-2],
[ 3]]), Matrix([
[0],
[0],
[6]])]
See Also
========
nullspace
rowspace
"""
reduced, pivots = M.echelon_form(simplify=simplify, with_pivots=True)
return [M.col(i) for i in pivots]
def _nullspace(M, simplify=False, iszerofunc=_iszero):
"""Returns list of vectors (Matrix objects) that span nullspace of ``M``
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6])
>>> M
Matrix([
[ 1, 3, 0],
[-2, -6, 0],
[ 3, 9, 6]])
>>> M.nullspace()
[Matrix([
[-3],
[ 1],
[ 0]])]
See Also
========
columnspace
rowspace
"""
reduced, pivots = M.rref(iszerofunc=iszerofunc, simplify=simplify)
free_vars = [i for i in range(M.cols) if i not in pivots]
basis = []
for free_var in free_vars:
# for each free variable, we will set it to 1 and all others
# to 0. Then, we will use back substitution to solve the system
vec = [M.zero] * M.cols
vec[free_var] = M.one
for piv_row, piv_col in enumerate(pivots):
vec[piv_col] -= reduced[piv_row, free_var]
basis.append(vec)
return [M._new(M.cols, 1, b) for b in basis]
def _rowspace(M, simplify=False):
"""Returns a list of vectors that span the row space of ``M``.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6])
>>> M
Matrix([
[ 1, 3, 0],
[-2, -6, 0],
[ 3, 9, 6]])
>>> M.rowspace()
[Matrix([[1, 3, 0]]), Matrix([[0, 0, 6]])]
"""
reduced, pivots = M.echelon_form(simplify=simplify, with_pivots=True)
return [reduced.row(i) for i in range(len(pivots))]
def _orthogonalize(cls, *vecs, **kwargs):
"""Apply the Gram-Schmidt orthogonalization procedure
to vectors supplied in ``vecs``.
Parameters
==========
vecs
vectors to be made orthogonal
normalize : bool
If ``True``, return an orthonormal basis.
rankcheck : bool
If ``True``, the computation does not stop when encountering
linearly dependent vectors.
If ``False``, it will raise ``ValueError`` when any zero
or linearly dependent vectors are found.
Returns
=======
list
List of orthogonal (or orthonormal) basis vectors.
Examples
========
>>> from sympy import I, Matrix
>>> v = [Matrix([1, I]), Matrix([1, -I])]
>>> Matrix.orthogonalize(*v)
[Matrix([
[1],
[I]]), Matrix([
[ 1],
[-I]])]
See Also
========
MatrixBase.QRdecomposition
References
==========
.. [1] https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
"""
normalize = kwargs.get('normalize', False)
rankcheck = kwargs.get('rankcheck', False)
def project(a, b):
return b * (a.dot(b, hermitian=True) / b.dot(b, hermitian=True))
def perp_to_subspace(vec, basis):
"""projects vec onto the subspace given
by the orthogonal basis ``basis``"""
components = [project(vec, b) for b in basis]
if len(basis) == 0:
return vec
return vec - reduce(lambda a, b: a + b, components)
ret = []
vecs = list(vecs) # make sure we start with a non-zero vector
while len(vecs) > 0 and vecs[0].is_zero_matrix:
if rankcheck is False:
del vecs[0]
else:
raise ValueError("GramSchmidt: vector set not linearly independent")
for vec in vecs:
perp = perp_to_subspace(vec, ret)
if not perp.is_zero_matrix:
ret.append(cls(perp))
elif rankcheck is True:
raise ValueError("GramSchmidt: vector set not linearly independent")
if normalize:
ret = [vec / vec.norm() for vec in ret]
return ret
|
1e054d0d7f2dc8789bb7661e56270a1e188eb0ebb1aceb06448b420edb201b26 | from __future__ import division, print_function
from sympy.core.compatibility import as_int
from sympy.utilities.iterables import is_sequence
from sympy.utilities.misc import filldedent
from .sparse import SparseMatrix
def _doktocsr(dok):
"""Converts a sparse matrix to Compressed Sparse Row (CSR) format.
Parameters
==========
A : contains non-zero elements sorted by key (row, column)
JA : JA[i] is the column corresponding to A[i]
IA : IA[i] contains the index in A for the first non-zero element
of row[i]. Thus IA[i+1] - IA[i] gives number of non-zero
elements row[i]. The length of IA is always 1 more than the
number of rows in the matrix.
"""
row, JA, A = [list(i) for i in zip(*dok.row_list())]
IA = [0]*((row[0] if row else 0) + 1)
for i, r in enumerate(row):
IA.extend([i]*(r - row[i - 1])) # if i = 0 nothing is extended
IA.extend([len(A)]*(dok.rows - len(IA) + 1))
shape = [dok.rows, dok.cols]
return [A, JA, IA, shape]
def _csrtodok(csr):
"""Converts a CSR representation to DOK representation"""
smat = {}
A, JA, IA, shape = csr
for i in range(len(IA) - 1):
indices = slice(IA[i], IA[i + 1])
for l, m in zip(A[indices], JA[indices]):
smat[i, m] = l
return SparseMatrix(*(shape + [smat]))
def banded(*args, **kwargs):
"""Returns a SparseMatrix from the given dictionary describing
the diagonals of the matrix. The keys are positive for upper
diagonals and negative for those below the main diagonal. The
values may be:
* expressions or single-argument functions,
* lists or tuples of values,
* matrices
Unless dimensions are given, the size of the returned matrix will
be large enough to contain the largest non-zero value provided.
kwargs
======
rows : rows of the resulting matrix; computed if
not given.
cols : columns of the resulting matrix; computed if
not given.
Examples
========
>>> from sympy import banded, ones, Matrix
>>> from sympy.abc import x
If explicit values are given in tuples,
the matrix will autosize to contain all values, otherwise
a single value is filled onto the entire diagonal:
>>> banded({1: (1, 2, 3), -1: (4, 5, 6), 0: x})
Matrix([
[x, 1, 0, 0],
[4, x, 2, 0],
[0, 5, x, 3],
[0, 0, 6, x]])
A function accepting a single argument can be used to fill the
diagonal as a function of diagonal index (which starts at 0).
The size (or shape) of the matrix must be given to obtain more
than a 1x1 matrix:
>>> s = lambda d: (1 + d)**2
>>> banded(5, {0: s, 2: s, -2: 2})
Matrix([
[1, 0, 1, 0, 0],
[0, 4, 0, 4, 0],
[2, 0, 9, 0, 9],
[0, 2, 0, 16, 0],
[0, 0, 2, 0, 25]])
The diagonal of matrices placed on a diagonal will coincide
with the indicated diagonal:
>>> vert = Matrix([1, 2, 3])
>>> banded({0: vert}, cols=3)
Matrix([
[1, 0, 0],
[2, 1, 0],
[3, 2, 1],
[0, 3, 2],
[0, 0, 3]])
>>> banded(4, {0: ones(2)})
Matrix([
[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 1, 1]])
Errors are raised if the designated size will not hold
all values an integral number of times. Here, the rows
are designated as odd (but an even number is required to
hold the off-diagonal 2x2 ones):
>>> banded({0: 2, 1: ones(2)}, rows=5)
Traceback (most recent call last):
...
ValueError:
sequence does not fit an integral number of times in the matrix
And here, an even number of rows is given...but the square
matrix has an even number of columns, too. As we saw
in the previous example, an odd number is required:
>>> banded(4, {0: 2, 1: ones(2)}) # trying to make 4x4 and cols must be odd
Traceback (most recent call last):
...
ValueError:
sequence does not fit an integral number of times in the matrix
A way around having to count rows is to enclosing matrix elements
in a tuple and indicate the desired number of them to the right:
>>> banded({0: 2, 2: (ones(2),)*3})
Matrix([
[2, 0, 1, 1, 0, 0, 0, 0],
[0, 2, 1, 1, 0, 0, 0, 0],
[0, 0, 2, 0, 1, 1, 0, 0],
[0, 0, 0, 2, 1, 1, 0, 0],
[0, 0, 0, 0, 2, 0, 1, 1],
[0, 0, 0, 0, 0, 2, 1, 1]])
An error will be raised if more than one value
is written to a given entry. Here, the ones overlap
with the main diagonal if they are placed on the
first diagonal:
>>> banded({0: (2,)*5, 1: (ones(2),)*3})
Traceback (most recent call last):
...
ValueError: collision at (1, 1)
By placing a 0 at the bottom left of the 2x2 matrix of
ones, the collision is avoided:
>>> u2 = Matrix([
... [1, 1],
... [0, 1]])
>>> banded({0: [2]*5, 1: [u2]*3})
Matrix([
[2, 1, 1, 0, 0, 0, 0],
[0, 2, 1, 0, 0, 0, 0],
[0, 0, 2, 1, 1, 0, 0],
[0, 0, 0, 2, 1, 0, 0],
[0, 0, 0, 0, 2, 1, 1],
[0, 0, 0, 0, 0, 0, 1]])
"""
from sympy import Dict, Dummy, SparseMatrix
try:
if len(args) not in (1, 2, 3):
raise TypeError
if not isinstance(args[-1], (dict, Dict)):
raise TypeError
if len(args) == 1:
rows = kwargs.get('rows', None)
cols = kwargs.get('cols', None)
if rows is not None:
rows = as_int(rows)
if cols is not None:
cols = as_int(cols)
elif len(args) == 2:
rows = cols = as_int(args[0])
else:
rows, cols = map(as_int, args[:2])
# fails with ValueError if any keys are not ints
_ = all(as_int(k) for k in args[-1])
except (ValueError, TypeError):
raise TypeError(filldedent(
'''unrecognized input to banded:
expecting [[row,] col,] {int: value}'''))
def rc(d):
# return row,col coord of diagonal start
r = -d if d < 0 else 0
c = 0 if r else d
return r, c
smat = {}
undone = []
tba = Dummy()
# first handle objects with size
for d, v in args[-1].items():
r, c = rc(d)
# note: only list and tuple are recognized since this
# will allow other Basic objects like Tuple
# into the matrix if so desired
if isinstance(v, (list, tuple)):
extra = 0
for i, vi in enumerate(v):
i += extra
if is_sequence(vi):
vi = SparseMatrix(vi)
smat[r + i, c + i] = vi
extra += min(vi.shape) - 1
else:
smat[r + i, c + i] = vi
elif is_sequence(v):
v = SparseMatrix(v)
rv, cv = v.shape
if rows and cols:
nr, xr = divmod(rows - r, rv)
nc, xc = divmod(cols - c, cv)
x = xr or xc
do = min(nr, nc)
elif rows:
do, x = divmod(rows - r, rv)
elif cols:
do, x = divmod(cols - c, cv)
else:
do = 1
x = 0
if x:
raise ValueError(filldedent('''
sequence does not fit an integral number of times
in the matrix'''))
j = min(v.shape)
for i in range(do):
smat[r, c] = v
r += j
c += j
elif v:
smat[r, c] = tba
undone.append((d, v))
s = SparseMatrix(None, smat) # to expand matrices
smat = s._smat
# check for dim errors here
if rows is not None and rows < s.rows:
raise ValueError('Designated rows %s < needed %s' % (rows, s.rows))
if cols is not None and cols < s.cols:
raise ValueError('Designated cols %s < needed %s' % (cols, s.cols))
if rows is cols is None:
rows = s.rows
cols = s.cols
elif rows is not None and cols is None:
cols = max(rows, s.cols)
elif cols is not None and rows is None:
rows = max(cols, s.rows)
def update(i, j, v):
# update smat and make sure there are
# no collisions
if v:
if (i, j) in smat and smat[i, j] not in (tba, v):
raise ValueError('collision at %s' % ((i, j),))
smat[i, j] = v
if undone:
for d, vi in undone:
r, c = rc(d)
v = vi if callable(vi) else lambda _: vi
i = 0
while r + i < rows and c + i < cols:
update(r + i, c + i, v(i))
i += 1
return SparseMatrix(rows, cols, smat)
|
a99145af45117de60ee6fe5eaba710423258dc07e666452139bb9febdc7f29c2 | from sympy.core import Basic, Integer
import operator
class OmegaPower(Basic):
"""
Represents ordinal exponential and multiplication terms one of the
building blocks of the Ordinal class.
In OmegaPower(a, b) a represents exponent and b represents multiplicity.
"""
def __new__(cls, a, b):
if isinstance(b, int):
b = Integer(b)
if not isinstance(b, Integer) or b <= 0:
raise TypeError("multiplicity must be a positive integer")
if not isinstance(a, Ordinal):
a = Ordinal.convert(a)
return Basic.__new__(cls, a, b)
@property
def exp(self):
return self.args[0]
@property
def mult(self):
return self.args[1]
def _compare_term(self, other, op):
if self.exp == other.exp:
return op(self.mult, other.mult)
else:
return op(self.exp, other.exp)
def __eq__(self, other):
if not isinstance(other, OmegaPower):
try:
other = OmegaPower(0, other)
except TypeError:
return NotImplemented
return self.args == other.args
def __hash__(self):
return Basic.__hash__(self)
def __lt__(self, other):
if not isinstance(other, OmegaPower):
try:
other = OmegaPower(0, other)
except TypeError:
return NotImplemented
return self._compare_term(other, operator.lt)
class Ordinal(Basic):
"""
Represents ordinals in Cantor normal form.
Internally, this class is just a list of instances of OmegaPower
Examples
========
>>> from sympy.sets import Ordinal, ord0, OmegaPower
>>> from sympy.sets.ordinals import omega
>>> w = omega
>>> w.is_limit_ordinal
True
>>> Ordinal(OmegaPower(w + 1 ,1), OmegaPower(3, 2))
w**(w + 1) + w**3*2
>>> 3 + w
w
>>> (w + 1) * w
w**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Ordinal_arithmetic
"""
def __new__(cls, *terms):
obj = super(Ordinal, cls).__new__(cls, *terms)
powers = [i.exp for i in obj.args]
if not all(powers[i] >= powers[i+1] for i in range(len(powers) - 1)):
raise ValueError("powers must be in decreasing order")
return obj
@property
def terms(self):
return self.args
@property
def leading_term(self):
if self == ord0:
raise ValueError("ordinal zero has no leading term")
return self.terms[0]
@property
def trailing_term(self):
if self == ord0:
raise ValueError("ordinal zero has no trailing term")
return self.terms[-1]
@property
def is_successor_ordinal(self):
try:
return self.trailing_term.exp == ord0
except ValueError:
return False
@property
def is_limit_ordinal(self):
try:
return not self.trailing_term.exp == ord0
except ValueError:
return False
@property
def degree(self):
return self.leading_term.exp
@classmethod
def convert(cls, integer_value):
if integer_value == 0:
return ord0
return Ordinal(OmegaPower(0, integer_value))
def __eq__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
return self.terms == other.terms
def __hash__(self):
return hash(self.args)
def __lt__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
for term_self, term_other in zip(self.terms, other.terms):
if term_self != term_other:
return term_self < term_other
return len(self.terms) < len(other.terms)
def __le__(self, other):
return (self == other or self < other)
def __gt__(self, other):
return not self <= other
def __ge__(self, other):
return not self < other
def __str__(self):
net_str = ""
plus_count = 0
if self == ord0:
return 'ord0'
for i in self.terms:
if plus_count:
net_str += " + "
if i.exp == ord0:
net_str += str(i.mult)
elif i.exp == 1:
net_str += 'w'
elif len(i.exp.terms) > 1 or i.exp.is_limit_ordinal:
net_str += 'w**(%s)'%i.exp
else:
net_str += 'w**%s'%i.exp
if not i.mult == 1 and not i.exp == ord0:
net_str += '*%s'%i.mult
plus_count += 1
return(net_str)
__repr__ = __str__
def __add__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
if other == ord0:
return self
a_terms = list(self.terms)
b_terms = list(other.terms)
r = len(a_terms) - 1
b_exp = other.degree
while r >= 0 and a_terms[r].exp < b_exp:
r -= 1
if r < 0:
terms = b_terms
elif a_terms[r].exp == b_exp:
sum_term = OmegaPower(b_exp, a_terms[r].mult + other.leading_term.mult)
terms = a_terms[:r] + [sum_term] + b_terms[1:]
else:
terms = a_terms[:r+1] + b_terms
return Ordinal(*terms)
def __radd__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
return other + self
def __mul__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
if ord0 in (self, other):
return ord0
a_exp = self.degree
a_mult = self.leading_term.mult
sum = []
if other.is_limit_ordinal:
for arg in other.terms:
sum.append(OmegaPower(a_exp + arg.exp, arg.mult))
else:
for arg in other.terms[:-1]:
sum.append(OmegaPower(a_exp + arg.exp, arg.mult))
b_mult = other.trailing_term.mult
sum.append(OmegaPower(a_exp, a_mult*b_mult))
sum += list(self.terms[1:])
return Ordinal(*sum)
def __rmul__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
return other * self
def __pow__(self, other):
if not self == omega:
return NotImplemented
return Ordinal(OmegaPower(other, 1))
class OrdinalZero(Ordinal):
"""The ordinal zero.
OrdinalZero can be imported as ``ord0``.
"""
pass
class OrdinalOmega(Ordinal):
"""The ordinal omega which forms the base of all ordinals in cantor normal form.
OrdinalOmega can be imported as ``omega``.
Examples
========
>>> from sympy.sets.ordinals import omega
>>> omega + omega
w*2
"""
def __new__(cls):
return Ordinal.__new__(cls)
@property
def terms(self):
return (OmegaPower(1, 1),)
ord0 = OrdinalZero()
omega = OrdinalOmega()
|
a037e004460158010928c037e7724cb92e158efb9467719e7f7a8f2822419bd1 | from __future__ import print_function, division
from functools import reduce
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import Lambda
from sympy.core.logic import fuzzy_not, fuzzy_or, fuzzy_and
from sympy.core.numbers import oo, Integer
from sympy.core.relational import Eq
from sympy.core.singleton import Singleton, S
from sympy.core.symbol import Dummy, symbols, Symbol
from sympy.core.sympify import _sympify, sympify, converter
from sympy.logic.boolalg import And
from sympy.sets.sets import (Set, Interval, Union, FiniteSet,
ProductSet)
from sympy.utilities.misc import filldedent
from sympy.utilities.iterables import cartes
class Rationals(Set, metaclass=Singleton):
"""
Represents the rational numbers. This set is also available as
the Singleton, S.Rationals.
Examples
========
>>> from sympy import S
>>> S.Half in S.Rationals
True
>>> iterable = iter(S.Rationals)
>>> [next(iterable) for i in range(12)]
[0, 1, -1, 1/2, 2, -1/2, -2, 1/3, 3, -1/3, -3, 2/3]
"""
is_iterable = True
_inf = S.NegativeInfinity
_sup = S.Infinity
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return False
if other.is_Number:
return other.is_Rational
return other.is_rational
def __iter__(self):
from sympy.core.numbers import igcd, Rational
yield S.Zero
yield S.One
yield S.NegativeOne
d = 2
while True:
for n in range(d):
if igcd(n, d) == 1:
yield Rational(n, d)
yield Rational(d, n)
yield Rational(-n, d)
yield Rational(-d, n)
d += 1
@property
def _boundary(self):
return S.Reals
class Naturals(Set, metaclass=Singleton):
"""
Represents the natural numbers (or counting numbers) which are all
positive integers starting from 1. This set is also available as
the Singleton, S.Naturals.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Naturals)
>>> next(iterable)
1
>>> next(iterable)
2
>>> next(iterable)
3
>>> pprint(S.Naturals.intersect(Interval(0, 10)))
{1, 2, ..., 10}
See Also
========
Naturals0 : non-negative integers (i.e. includes 0, too)
Integers : also includes negative integers
"""
is_iterable = True
_inf = S.One
_sup = S.Infinity
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return False
elif other.is_positive and other.is_integer:
return True
elif other.is_integer is False or other.is_positive is False:
return False
def _eval_is_subset(self, other):
return Range(1, oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(1, oo).is_superset(other)
def __iter__(self):
i = self._inf
while True:
yield i
i = i + 1
@property
def _boundary(self):
return self
def as_relational(self, x):
from sympy.functions.elementary.integers import floor
return And(Eq(floor(x), x), x >= self.inf, x < oo)
class Naturals0(Naturals):
"""Represents the whole numbers which are all the non-negative integers,
inclusive of zero.
See Also
========
Naturals : positive integers; does not include 0
Integers : also includes the negative integers
"""
_inf = S.Zero
def _contains(self, other):
if not isinstance(other, Expr):
return S.false
elif other.is_integer and other.is_nonnegative:
return S.true
elif other.is_integer is False or other.is_nonnegative is False:
return S.false
def _eval_is_subset(self, other):
return Range(oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(oo).is_superset(other)
class Integers(Set, metaclass=Singleton):
"""
Represents all integers: positive, negative and zero. This set is also
available as the Singleton, S.Integers.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Integers)
>>> next(iterable)
0
>>> next(iterable)
1
>>> next(iterable)
-1
>>> next(iterable)
2
>>> pprint(S.Integers.intersect(Interval(-4, 4)))
{-4, -3, ..., 4}
See Also
========
Naturals0 : non-negative integers
Integers : positive and negative integers and zero
"""
is_iterable = True
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return S.false
return other.is_integer
def __iter__(self):
yield S.Zero
i = S.One
while True:
yield i
yield -i
i = i + 1
@property
def _inf(self):
return S.NegativeInfinity
@property
def _sup(self):
return S.Infinity
@property
def _boundary(self):
return self
def as_relational(self, x):
from sympy.functions.elementary.integers import floor
return And(Eq(floor(x), x), -oo < x, x < oo)
def _eval_is_subset(self, other):
return Range(-oo, oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(-oo, oo).is_superset(other)
class Reals(Interval, metaclass=Singleton):
"""
Represents all real numbers
from negative infinity to positive infinity,
including all integer, rational and irrational numbers.
This set is also available as the Singleton, S.Reals.
Examples
========
>>> from sympy import S, Interval, Rational, pi, I
>>> 5 in S.Reals
True
>>> Rational(-1, 2) in S.Reals
True
>>> pi in S.Reals
True
>>> 3*I in S.Reals
False
>>> S.Reals.contains(pi)
True
See Also
========
ComplexRegion
"""
def __new__(cls):
return Interval.__new__(cls, S.NegativeInfinity, S.Infinity)
def __eq__(self, other):
return other == Interval(S.NegativeInfinity, S.Infinity)
def __hash__(self):
return hash(Interval(S.NegativeInfinity, S.Infinity))
class ImageSet(Set):
"""
Image of a set under a mathematical function. The transformation
must be given as a Lambda function which has as many arguments
as the elements of the set upon which it operates, e.g. 1 argument
when acting on the set of integers or 2 arguments when acting on
a complex region.
This function is not normally called directly, but is called
from `imageset`.
Examples
========
>>> from sympy import Symbol, S, pi, Dummy, Lambda
>>> from sympy.sets.sets import FiniteSet, Interval
>>> from sympy.sets.fancysets import ImageSet
>>> x = Symbol('x')
>>> N = S.Naturals
>>> squares = ImageSet(Lambda(x, x**2), N) # {x**2 for x in N}
>>> 4 in squares
True
>>> 5 in squares
False
>>> FiniteSet(0, 1, 2, 3, 4, 5, 6, 7, 9, 10).intersect(squares)
FiniteSet(1, 4, 9)
>>> square_iterable = iter(squares)
>>> for i in range(4):
... next(square_iterable)
1
4
9
16
If you want to get value for `x` = 2, 1/2 etc. (Please check whether the
`x` value is in `base_set` or not before passing it as args)
>>> squares.lamda(2)
4
>>> squares.lamda(S(1)/2)
1/4
>>> n = Dummy('n')
>>> solutions = ImageSet(Lambda(n, n*pi), S.Integers) # solutions of sin(x) = 0
>>> dom = Interval(-1, 1)
>>> dom.intersect(solutions)
FiniteSet(0)
See Also
========
sympy.sets.sets.imageset
"""
def __new__(cls, flambda, *sets):
if not isinstance(flambda, Lambda):
raise ValueError('First argument must be a Lambda')
signature = flambda.signature
if len(signature) != len(sets):
raise ValueError('Incompatible signature')
sets = [_sympify(s) for s in sets]
if not all(isinstance(s, Set) for s in sets):
raise TypeError("Set arguments to ImageSet should of type Set")
if not all(cls._check_sig(sg, st) for sg, st in zip(signature, sets)):
raise ValueError("Signature %s does not match sets %s" % (signature, sets))
if flambda is S.IdentityFunction and len(sets) == 1:
return sets[0]
if not set(flambda.variables) & flambda.expr.free_symbols:
is_empty = fuzzy_or(s.is_empty for s in sets)
if is_empty == True:
return S.EmptySet
elif is_empty == False:
return FiniteSet(flambda.expr)
return Basic.__new__(cls, flambda, *sets)
lamda = property(lambda self: self.args[0])
base_sets = property(lambda self: self.args[1:])
@property
def base_set(self):
# XXX: Maybe deprecate this? It is poorly defined in handling
# the multivariate case...
sets = self.base_sets
if len(sets) == 1:
return sets[0]
else:
return ProductSet(*sets).flatten()
@property
def base_pset(self):
return ProductSet(*self.base_sets)
@classmethod
def _check_sig(cls, sig_i, set_i):
if sig_i.is_symbol:
return True
elif isinstance(set_i, ProductSet):
sets = set_i.sets
if len(sig_i) != len(sets):
return False
# Recurse through the signature for nested tuples:
return all(cls._check_sig(ts, ps) for ts, ps in zip(sig_i, sets))
else:
# XXX: Need a better way of checking whether a set is a set of
# Tuples or not. For example a FiniteSet can contain Tuples
# but so can an ImageSet or a ConditionSet. Others like
# Integers, Reals etc can not contain Tuples. We could just
# list the possibilities here... Current code for e.g.
# _contains probably only works for ProductSet.
return True # Give the benefit of the doubt
def __iter__(self):
already_seen = set()
for i in self.base_pset:
val = self.lamda(*i)
if val in already_seen:
continue
else:
already_seen.add(val)
yield val
def _is_multivariate(self):
return len(self.lamda.variables) > 1
def _contains(self, other):
from sympy.solvers.solveset import _solveset_multi
def get_symsetmap(signature, base_sets):
'''Attempt to get a map of symbols to base_sets'''
queue = list(zip(signature, base_sets))
symsetmap = {}
for sig, base_set in queue:
if sig.is_symbol:
symsetmap[sig] = base_set
elif base_set.is_ProductSet:
sets = base_set.sets
if len(sig) != len(sets):
raise ValueError("Incompatible signature")
# Recurse
queue.extend(zip(sig, sets))
else:
# If we get here then we have something like sig = (x, y) and
# base_set = {(1, 2), (3, 4)}. For now we give up.
return None
return symsetmap
def get_equations(expr, candidate):
'''Find the equations relating symbols in expr and candidate.'''
queue = [(expr, candidate)]
for e, c in queue:
if not isinstance(e, Tuple):
yield Eq(e, c)
elif not isinstance(c, Tuple) or len(e) != len(c):
yield False
return
else:
queue.extend(zip(e, c))
# Get the basic objects together:
other = _sympify(other)
expr = self.lamda.expr
sig = self.lamda.signature
variables = self.lamda.variables
base_sets = self.base_sets
# Use dummy symbols for ImageSet parameters so they don't match
# anything in other
rep = {v: Dummy(v.name) for v in variables}
variables = [v.subs(rep) for v in variables]
sig = sig.subs(rep)
expr = expr.subs(rep)
# Map the parts of other to those in the Lambda expr
equations = []
for eq in get_equations(expr, other):
# Unsatisfiable equation?
if eq is False:
return False
equations.append(eq)
# Map the symbols in the signature to the corresponding domains
symsetmap = get_symsetmap(sig, base_sets)
if symsetmap is None:
# Can't factor the base sets to a ProductSet
return None
# Which of the variables in the Lambda signature need to be solved for?
symss = (eq.free_symbols for eq in equations)
variables = set(variables) & reduce(set.union, symss, set())
# Use internal multivariate solveset
variables = tuple(variables)
base_sets = [symsetmap[v] for v in variables]
solnset = _solveset_multi(equations, variables, base_sets)
if solnset is None:
return None
return fuzzy_not(solnset.is_empty)
@property
def is_iterable(self):
return all(s.is_iterable for s in self.base_sets)
def doit(self, **kwargs):
from sympy.sets.setexpr import SetExpr
f = self.lamda
sig = f.signature
if len(sig) == 1 and sig[0].is_symbol and isinstance(f.expr, Expr):
base_set = self.base_sets[0]
return SetExpr(base_set)._eval_func(f).set
if all(s.is_FiniteSet for s in self.base_sets):
return FiniteSet(*(f(*a) for a in cartes(*self.base_sets)))
return self
class Range(Set):
"""
Represents a range of integers. Can be called as Range(stop),
Range(start, stop), or Range(start, stop, step); when stop is
not given it defaults to 1.
`Range(stop)` is the same as `Range(0, stop, 1)` and the stop value
(juse as for Python ranges) is not included in the Range values.
>>> from sympy import Range
>>> list(Range(3))
[0, 1, 2]
The step can also be negative:
>>> list(Range(10, 0, -2))
[10, 8, 6, 4, 2]
The stop value is made canonical so equivalent ranges always
have the same args:
>>> Range(0, 10, 3)
Range(0, 12, 3)
Infinite ranges are allowed. ``oo`` and ``-oo`` are never included in the
set (``Range`` is always a subset of ``Integers``). If the starting point
is infinite, then the final value is ``stop - step``. To iterate such a
range, it needs to be reversed:
>>> from sympy import oo
>>> r = Range(-oo, 1)
>>> r[-1]
0
>>> next(iter(r))
Traceback (most recent call last):
...
TypeError: Cannot iterate over Range with infinite start
>>> next(iter(r.reversed))
0
Although Range is a set (and supports the normal set
operations) it maintains the order of the elements and can
be used in contexts where `range` would be used.
>>> from sympy import Interval
>>> Range(0, 10, 2).intersect(Interval(3, 7))
Range(4, 8, 2)
>>> list(_)
[4, 6]
Although slicing of a Range will always return a Range -- possibly
empty -- an empty set will be returned from any intersection that
is empty:
>>> Range(3)[:0]
Range(0, 0, 1)
>>> Range(3).intersect(Interval(4, oo))
EmptySet
>>> Range(3).intersect(Range(4, oo))
EmptySet
Range will accept symbolic arguments but has very limited support
for doing anything other than displaying the Range:
>>> from sympy import Symbol, pprint
>>> from sympy.abc import i, j, k
>>> Range(i, j, k).start
i
>>> Range(i, j, k).inf
Traceback (most recent call last):
...
ValueError: invalid method for symbolic range
Better success will be had when using integer symbols:
>>> n = Symbol('n', integer=True)
>>> r = Range(n, n + 20, 3)
>>> r.inf
n
>>> pprint(r)
{n, n + 3, ..., n + 17}
"""
is_iterable = True
def __new__(cls, *args):
from sympy.functions.elementary.integers import ceiling
if len(args) == 1:
if isinstance(args[0], range):
raise TypeError(
'use sympify(%s) to convert range to Range' % args[0])
# expand range
slc = slice(*args)
if slc.step == 0:
raise ValueError("step cannot be 0")
start, stop, step = slc.start or 0, slc.stop, slc.step or 1
try:
ok = []
for w in (start, stop, step):
w = sympify(w)
if w in [S.NegativeInfinity, S.Infinity] or (
w.has(Symbol) and w.is_integer != False):
ok.append(w)
elif not w.is_Integer:
raise ValueError
else:
ok.append(w)
except ValueError:
raise ValueError(filldedent('''
Finite arguments to Range must be integers; `imageset` can define
other cases, e.g. use `imageset(i, i/10, Range(3))` to give
[0, 1/10, 1/5].'''))
start, stop, step = ok
null = False
if any(i.has(Symbol) for i in (start, stop, step)):
if start == stop:
null = True
else:
end = stop
elif start.is_infinite:
span = step*(stop - start)
if span is S.NaN or span <= 0:
null = True
elif step.is_Integer and stop.is_infinite and abs(step) != 1:
raise ValueError(filldedent('''
Step size must be %s in this case.''' % (1 if step > 0 else -1)))
else:
end = stop
else:
oostep = step.is_infinite
if oostep:
step = S.One if step > 0 else S.NegativeOne
n = ceiling((stop - start)/step)
if n <= 0:
null = True
elif oostep:
end = start + 1
step = S.One # make it a canonical single step
else:
end = start + n*step
if null:
start = end = S.Zero
step = S.One
return Basic.__new__(cls, start, end, step)
start = property(lambda self: self.args[0])
stop = property(lambda self: self.args[1])
step = property(lambda self: self.args[2])
@property
def reversed(self):
"""Return an equivalent Range in the opposite order.
Examples
========
>>> from sympy import Range
>>> Range(10).reversed
Range(9, -1, -1)
"""
if self.has(Symbol):
_ = self.size # validate
if not self:
return self
return self.func(
self.stop - self.step, self.start - self.step, -self.step)
def _contains(self, other):
if not self:
return S.false
if other.is_infinite:
return S.false
if not other.is_integer:
return other.is_integer
if self.has(Symbol):
try:
_ = self.size # validate
except ValueError:
return
if self.start.is_finite:
ref = self.start
elif self.stop.is_finite:
ref = self.stop
else: # both infinite; step is +/- 1 (enforced by __new__)
return S.true
if self.size == 1:
return Eq(other, self[0])
res = (ref - other) % self.step
if res == S.Zero:
return And(other >= self.inf, other <= self.sup)
elif res.is_Integer: # off sequence
return S.false
else: # symbolic/unsimplified residue modulo step
return None
def __iter__(self):
if self.has(Symbol):
_ = self.size # validate
if self.start in [S.NegativeInfinity, S.Infinity]:
raise TypeError("Cannot iterate over Range with infinite start")
elif self:
i = self.start
step = self.step
while True:
if (step > 0 and not (self.start <= i < self.stop)) or \
(step < 0 and not (self.stop < i <= self.start)):
break
yield i
i += step
def __len__(self):
rv = self.size
if rv is S.Infinity:
raise ValueError('Use .size to get the length of an infinite Range')
return int(rv)
@property
def size(self):
if not self:
return S.Zero
dif = self.stop - self.start
if self.has(Symbol):
if dif.has(Symbol) or self.step.has(Symbol) or (
not self.start.is_integer and not self.stop.is_integer):
raise ValueError('invalid method for symbolic range')
if dif.is_infinite:
return S.Infinity
return Integer(abs(dif//self.step))
def __nonzero__(self):
return self.start != self.stop
__bool__ = __nonzero__
def __getitem__(self, i):
from sympy.functions.elementary.integers import ceiling
ooslice = "cannot slice from the end with an infinite value"
zerostep = "slice step cannot be zero"
infinite = "slicing not possible on range with infinite start"
# if we had to take every other element in the following
# oo, ..., 6, 4, 2, 0
# we might get oo, ..., 4, 0 or oo, ..., 6, 2
ambiguous = "cannot unambiguously re-stride from the end " + \
"with an infinite value"
if isinstance(i, slice):
if self.size.is_finite: # validates, too
start, stop, step = i.indices(self.size)
n = ceiling((stop - start)/step)
if n <= 0:
return Range(0)
canonical_stop = start + n*step
end = canonical_stop - step
ss = step*self.step
return Range(self[start], self[end] + ss, ss)
else: # infinite Range
start = i.start
stop = i.stop
if i.step == 0:
raise ValueError(zerostep)
step = i.step or 1
ss = step*self.step
#---------------------
# handle infinite Range
# i.e. Range(-oo, oo) or Range(oo, -oo, -1)
# --------------------
if self.start.is_infinite and self.stop.is_infinite:
raise ValueError(infinite)
#---------------------
# handle infinite on right
# e.g. Range(0, oo) or Range(0, -oo, -1)
# --------------------
if self.stop.is_infinite:
# start and stop are not interdependent --
# they only depend on step --so we use the
# equivalent reversed values
return self.reversed[
stop if stop is None else -stop + 1:
start if start is None else -start:
step].reversed
#---------------------
# handle infinite on the left
# e.g. Range(oo, 0, -1) or Range(-oo, 0)
# --------------------
# consider combinations of
# start/stop {== None, < 0, == 0, > 0} and
# step {< 0, > 0}
if start is None:
if stop is None:
if step < 0:
return Range(self[-1], self.start, ss)
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step < 0:
return Range(self[-1], self[stop], ss)
else: # > 0
return Range(self.start, self[stop], ss)
elif stop == 0:
if step > 0:
return Range(0)
else: # < 0
raise ValueError(ooslice)
elif stop == 1:
if step > 0:
raise ValueError(ooslice) # infinite singleton
else: # < 0
raise ValueError(ooslice)
else: # > 1
raise ValueError(ooslice)
elif start < 0:
if stop is None:
if step < 0:
return Range(self[start], self.start, ss)
else: # > 0
return Range(self[start], self.stop, ss)
elif stop < 0:
return Range(self[start], self[stop], ss)
elif stop == 0:
if step < 0:
raise ValueError(ooslice)
else: # > 0
return Range(0)
elif stop > 0:
raise ValueError(ooslice)
elif start == 0:
if stop is None:
if step < 0:
raise ValueError(ooslice) # infinite singleton
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step > 1:
raise ValueError(ambiguous)
elif step == 1:
return Range(self.start, self[stop], ss)
else: # < 0
return Range(0)
else: # >= 0
raise ValueError(ooslice)
elif start > 0:
raise ValueError(ooslice)
else:
if not self:
raise IndexError('Range index out of range')
if i == 0:
if self.start.is_infinite:
raise ValueError(ooslice)
if self.has(Symbol):
if (self.stop > self.start) == self.step.is_positive and self.step.is_positive is not None:
pass
else:
_ = self.size # validate
return self.start
if i == -1:
if self.stop.is_infinite:
raise ValueError(ooslice)
n = self.stop - self.step
if n.is_Integer or (
n.is_integer and (
(n - self.start).is_nonnegative ==
self.step.is_positive)):
return n
_ = self.size # validate
rv = (self.stop if i < 0 else self.start) + i*self.step
if rv.is_infinite:
raise ValueError(ooslice)
if rv < self.inf or rv > self.sup:
raise IndexError("Range index out of range")
return rv
@property
def _inf(self):
if not self:
raise NotImplementedError
if self.has(Symbol):
if self.step.is_positive:
return self[0]
elif self.step.is_negative:
return self[-1]
_ = self.size # validate
if self.step > 0:
return self.start
else:
return self.stop - self.step
@property
def _sup(self):
if not self:
raise NotImplementedError
if self.has(Symbol):
if self.step.is_positive:
return self[-1]
elif self.step.is_negative:
return self[0]
_ = self.size # validate
if self.step > 0:
return self.stop - self.step
else:
return self.start
@property
def _boundary(self):
return self
def as_relational(self, x):
"""Rewrite a Range in terms of equalities and logic operators. """
from sympy.functions.elementary.integers import floor
if self.size == 1:
return Eq(x, self[0])
else:
return And(
Eq(x, floor(x)),
x >= self.inf if self.inf in self else x > self.inf,
x <= self.sup if self.sup in self else x < self.sup)
converter[range] = lambda r: Range(r.start, r.stop, r.step)
def normalize_theta_set(theta):
"""
Normalize a Real Set `theta` in the Interval [0, 2*pi). It returns
a normalized value of theta in the Set. For Interval, a maximum of
one cycle [0, 2*pi], is returned i.e. for theta equal to [0, 10*pi],
returned normalized value would be [0, 2*pi). As of now intervals
with end points as non-multiples of `pi` is not supported.
Raises
======
NotImplementedError
The algorithms for Normalizing theta Set are not yet
implemented.
ValueError
The input is not valid, i.e. the input is not a real set.
RuntimeError
It is a bug, please report to the github issue tracker.
Examples
========
>>> from sympy.sets.fancysets import normalize_theta_set
>>> from sympy import Interval, FiniteSet, pi
>>> normalize_theta_set(Interval(9*pi/2, 5*pi))
Interval(pi/2, pi)
>>> normalize_theta_set(Interval(-3*pi/2, pi/2))
Interval.Ropen(0, 2*pi)
>>> normalize_theta_set(Interval(-pi/2, pi/2))
Union(Interval(0, pi/2), Interval.Ropen(3*pi/2, 2*pi))
>>> normalize_theta_set(Interval(-4*pi, 3*pi))
Interval.Ropen(0, 2*pi)
>>> normalize_theta_set(Interval(-3*pi/2, -pi/2))
Interval(pi/2, 3*pi/2)
>>> normalize_theta_set(FiniteSet(0, pi, 3*pi))
FiniteSet(0, pi)
"""
from sympy.functions.elementary.trigonometric import _pi_coeff as coeff
if theta.is_Interval:
interval_len = theta.measure
# one complete circle
if interval_len >= 2*S.Pi:
if interval_len == 2*S.Pi and theta.left_open and theta.right_open:
k = coeff(theta.start)
return Union(Interval(0, k*S.Pi, False, True),
Interval(k*S.Pi, 2*S.Pi, True, True))
return Interval(0, 2*S.Pi, False, True)
k_start, k_end = coeff(theta.start), coeff(theta.end)
if k_start is None or k_end is None:
raise NotImplementedError("Normalizing theta without pi as coefficient is "
"not yet implemented")
new_start = k_start*S.Pi
new_end = k_end*S.Pi
if new_start > new_end:
return Union(Interval(S.Zero, new_end, False, theta.right_open),
Interval(new_start, 2*S.Pi, theta.left_open, True))
else:
return Interval(new_start, new_end, theta.left_open, theta.right_open)
elif theta.is_FiniteSet:
new_theta = []
for element in theta:
k = coeff(element)
if k is None:
raise NotImplementedError('Normalizing theta without pi as '
'coefficient, is not Implemented.')
else:
new_theta.append(k*S.Pi)
return FiniteSet(*new_theta)
elif theta.is_Union:
return Union(*[normalize_theta_set(interval) for interval in theta.args])
elif theta.is_subset(S.Reals):
raise NotImplementedError("Normalizing theta when, it is of type %s is not "
"implemented" % type(theta))
else:
raise ValueError(" %s is not a real set" % (theta))
class ComplexRegion(Set):
"""
Represents the Set of all Complex Numbers. It can represent a
region of Complex Plane in both the standard forms Polar and
Rectangular coordinates.
* Polar Form
Input is in the form of the ProductSet or Union of ProductSets
of the intervals of r and theta, & use the flag polar=True.
Z = {z in C | z = r*[cos(theta) + I*sin(theta)], r in [r], theta in [theta]}
* Rectangular Form
Input is in the form of the ProductSet or Union of ProductSets
of interval of x and y the of the Complex numbers in a Plane.
Default input type is in rectangular form.
Z = {z in C | z = x + I*y, x in [Re(z)], y in [Im(z)]}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion
>>> from sympy.sets import Interval
>>> from sympy import S, I, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 6)
>>> c = Interval(1, 8)
>>> c1 = ComplexRegion(a*b) # Rectangular Form
>>> c1
CartesianComplexRegion(ProductSet(Interval(2, 3), Interval(4, 6)))
* c1 represents the rectangular region in complex plane
surrounded by the coordinates (2, 4), (3, 4), (3, 6) and
(2, 6), of the four vertices.
>>> c2 = ComplexRegion(Union(a*b, b*c))
>>> c2
CartesianComplexRegion(Union(ProductSet(Interval(2, 3), Interval(4, 6)), ProductSet(Interval(4, 6), Interval(1, 8))))
* c2 represents the Union of two rectangular regions in complex
plane. One of them surrounded by the coordinates of c1 and
other surrounded by the coordinates (4, 1), (6, 1), (6, 8) and
(4, 8).
>>> 2.5 + 4.5*I in c1
True
>>> 2.5 + 6.5*I in c1
False
>>> r = Interval(0, 1)
>>> theta = Interval(0, 2*S.Pi)
>>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form
>>> c2 # unit Disk
PolarComplexRegion(ProductSet(Interval(0, 1), Interval.Ropen(0, 2*pi)))
* c2 represents the region in complex plane inside the
Unit Disk centered at the origin.
>>> 0.5 + 0.5*I in c2
True
>>> 1 + 2*I in c2
False
>>> unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True)
>>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True)
>>> intersection = unit_disk.intersect(upper_half_unit_disk)
>>> intersection
PolarComplexRegion(ProductSet(Interval(0, 1), Interval(0, pi)))
>>> intersection == upper_half_unit_disk
True
See Also
========
CartesianComplexRegion
PolarComplexRegion
Complexes
"""
is_ComplexRegion = True
def __new__(cls, sets, polar=False):
if polar is False:
return CartesianComplexRegion(sets)
elif polar is True:
return PolarComplexRegion(sets)
else:
raise ValueError("polar should be either True or False")
@property
def sets(self):
"""
Return raw input sets to the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.sets
ProductSet(Interval(2, 3), Interval(4, 5))
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.sets
Union(ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7)))
"""
return self.args[0]
@property
def psets(self):
"""
Return a tuple of sets (ProductSets) input of the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.psets
(ProductSet(Interval(2, 3), Interval(4, 5)),)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.psets
(ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7)))
"""
if self.sets.is_ProductSet:
psets = ()
psets = psets + (self.sets, )
else:
psets = self.sets.args
return psets
@property
def a_interval(self):
"""
Return the union of intervals of `x` when, self is in
rectangular form, or the union of intervals of `r` when
self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.a_interval
Interval(2, 3)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.a_interval
Union(Interval(2, 3), Interval(4, 5))
"""
a_interval = []
for element in self.psets:
a_interval.append(element.args[0])
a_interval = Union(*a_interval)
return a_interval
@property
def b_interval(self):
"""
Return the union of intervals of `y` when, self is in
rectangular form, or the union of intervals of `theta`
when self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.b_interval
Interval(4, 5)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.b_interval
Interval(1, 7)
"""
b_interval = []
for element in self.psets:
b_interval.append(element.args[1])
b_interval = Union(*b_interval)
return b_interval
@property
def _measure(self):
"""
The measure of self.sets.
Examples
========
>>> from sympy import Interval, ComplexRegion, S
>>> a, b = Interval(2, 5), Interval(4, 8)
>>> c = Interval(0, 2*S.Pi)
>>> c1 = ComplexRegion(a*b)
>>> c1.measure
12
>>> c2 = ComplexRegion(a*c, polar=True)
>>> c2.measure
6*pi
"""
return self.sets._measure
@classmethod
def from_real(cls, sets):
"""
Converts given subset of real numbers to a complex region.
Examples
========
>>> from sympy import Interval, ComplexRegion
>>> unit = Interval(0,1)
>>> ComplexRegion.from_real(unit)
CartesianComplexRegion(ProductSet(Interval(0, 1), FiniteSet(0)))
"""
if not sets.is_subset(S.Reals):
raise ValueError("sets must be a subset of the real line")
return CartesianComplexRegion(sets * FiniteSet(0))
def _contains(self, other):
from sympy.functions import arg, Abs
from sympy.core.containers import Tuple
other = sympify(other)
isTuple = isinstance(other, Tuple)
if isTuple and len(other) != 2:
raise ValueError('expecting Tuple of length 2')
# If the other is not an Expression, and neither a Tuple
if not isinstance(other, Expr) and not isinstance(other, Tuple):
return S.false
# self in rectangular form
if not self.polar:
re, im = other if isTuple else other.as_real_imag()
return fuzzy_or(fuzzy_and([
pset.args[0]._contains(re),
pset.args[1]._contains(im)])
for pset in self.psets)
# self in polar form
elif self.polar:
if other.is_zero:
# ignore undefined complex argument
return fuzzy_or(pset.args[0]._contains(S.Zero)
for pset in self.psets)
if isTuple:
r, theta = other
else:
r, theta = Abs(other), arg(other)
if theta.is_real and theta.is_number:
# angles in psets are normalized to [0, 2pi)
theta %= 2*S.Pi
return fuzzy_or(fuzzy_and([
pset.args[0]._contains(r),
pset.args[1]._contains(theta)])
for pset in self.psets)
class CartesianComplexRegion(ComplexRegion):
"""
Set representing a square region of the complex plane.
Z = {z in C | z = x + I*y, x in [Re(z)], y in [Im(z)]}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion
>>> from sympy.sets.sets import Interval
>>> from sympy import I
>>> region = ComplexRegion(Interval(1, 3) * Interval(4, 6))
>>> 2 + 5*I in region
True
>>> 5*I in region
False
See also
========
ComplexRegion
PolarComplexRegion
Complexes
"""
polar = False
variables = symbols('x, y', cls=Dummy)
def __new__(cls, sets):
if sets == S.Reals*S.Reals:
return S.Complexes
if all(_a.is_FiniteSet for _a in sets.args) and (len(sets.args) == 2):
# ** ProductSet of FiniteSets in the Complex Plane. **
# For Cases like ComplexRegion({2, 4}*{3}), It
# would return {2 + 3*I, 4 + 3*I}
# FIXME: This should probably be handled with something like:
# return ImageSet(Lambda((x, y), x+I*y), sets).rewrite(FiniteSet)
complex_num = []
for x in sets.args[0]:
for y in sets.args[1]:
complex_num.append(x + S.ImaginaryUnit*y)
return FiniteSet(*complex_num)
else:
return Set.__new__(cls, sets)
@property
def expr(self):
x, y = self.variables
return x + S.ImaginaryUnit*y
class PolarComplexRegion(ComplexRegion):
"""
Set representing a polar region of the complex plane.
Z = {z in C | z = r*[cos(theta) + I*sin(theta)], r in [r], theta in [theta]}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion, Interval
>>> from sympy import oo, pi, I
>>> rset = Interval(0, oo)
>>> thetaset = Interval(0, pi)
>>> upper_half_plane = ComplexRegion(rset * thetaset, polar=True)
>>> 1 + I in upper_half_plane
True
>>> 1 - I in upper_half_plane
False
See also
========
ComplexRegion
CartesianComplexRegion
Complexes
"""
polar = True
variables = symbols('r, theta', cls=Dummy)
def __new__(cls, sets):
new_sets = []
# sets is Union of ProductSets
if not sets.is_ProductSet:
for k in sets.args:
new_sets.append(k)
# sets is ProductSets
else:
new_sets.append(sets)
# Normalize input theta
for k, v in enumerate(new_sets):
new_sets[k] = ProductSet(v.args[0],
normalize_theta_set(v.args[1]))
sets = Union(*new_sets)
return Set.__new__(cls, sets)
@property
def expr(self):
from sympy.functions.elementary.trigonometric import sin, cos
r, theta = self.variables
return r*(cos(theta) + S.ImaginaryUnit*sin(theta))
class Complexes(CartesianComplexRegion, metaclass=Singleton):
"""
The Set of all complex numbers
Examples
========
>>> from sympy import S, I
>>> S.Complexes
Complexes
>>> 1 + I in S.Complexes
True
See also
========
Reals
ComplexRegion
"""
is_empty = False
is_finite_set = False
# Override property from superclass since Complexes has no args
sets = ProductSet(S.Reals, S.Reals)
def __new__(cls):
return Set.__new__(cls)
def __str__(self):
return "S.Complexes"
def __repr__(self):
return "S.Complexes"
|
768c5e27f79e2a674ef7831aef839bb280a75883757a192bcca87989d8ee1296 | from __future__ import print_function, division
from typing import Optional
from collections import defaultdict
import inspect
from sympy.core.basic import Basic
from sympy.core.compatibility import iterable, ordered, reduce
from sympy.core.containers import Tuple
from sympy.core.decorators import (deprecated, sympify_method_args,
sympify_return)
from sympy.core.evalf import EvalfMixin
from sympy.core.parameters import global_parameters
from sympy.core.expr import Expr
from sympy.core.logic import (FuzzyBool, fuzzy_bool, fuzzy_or, fuzzy_and,
fuzzy_not)
from sympy.core.numbers import Float
from sympy.core.operations import LatticeOp
from sympy.core.relational import Eq, Ne
from sympy.core.singleton import Singleton, S
from sympy.core.symbol import Symbol, Dummy, _uniquely_named_symbol
from sympy.core.sympify import _sympify, sympify, converter
from sympy.logic.boolalg import And, Or, Not, Xor, true, false
from sympy.sets.contains import Contains
from sympy.utilities import subsets
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import iproduct, sift, roundrobin
from sympy.utilities.misc import func_name, filldedent
from mpmath import mpi, mpf
tfn = defaultdict(lambda: None, {
True: S.true,
S.true: S.true,
False: S.false,
S.false: S.false})
@sympify_method_args
class Set(Basic):
"""
The base class for any kind of set.
This is not meant to be used directly as a container of items. It does not
behave like the builtin ``set``; see :class:`FiniteSet` for that.
Real intervals are represented by the :class:`Interval` class and unions of
sets by the :class:`Union` class. The empty set is represented by the
:class:`EmptySet` class and available as a singleton as ``S.EmptySet``.
"""
is_number = False
is_iterable = False
is_interval = False
is_FiniteSet = False
is_Interval = False
is_ProductSet = False
is_Union = False
is_Intersection = None # type: Optional[bool]
is_UniversalSet = None # type: Optional[bool]
is_Complement = None # type: Optional[bool]
is_ComplexRegion = False
is_empty = None # type: FuzzyBool
is_finite_set = None # type: FuzzyBool
@property # type: ignore
@deprecated(useinstead="is S.EmptySet or is_empty",
issue=16946, deprecated_since_version="1.5")
def is_EmptySet(self):
return None
@staticmethod
def _infimum_key(expr):
"""
Return infimum (if possible) else S.Infinity.
"""
try:
infimum = expr.inf
assert infimum.is_comparable
except (NotImplementedError,
AttributeError, AssertionError, ValueError):
infimum = S.Infinity
return infimum
def union(self, other):
"""
Returns the union of 'self' and 'other'.
Examples
========
As a shortcut it is possible to use the '+' operator:
>>> from sympy import Interval, FiniteSet
>>> Interval(0, 1).union(Interval(2, 3))
Union(Interval(0, 1), Interval(2, 3))
>>> Interval(0, 1) + Interval(2, 3)
Union(Interval(0, 1), Interval(2, 3))
>>> Interval(1, 2, True, True) + FiniteSet(2, 3)
Union(FiniteSet(3), Interval.Lopen(1, 2))
Similarly it is possible to use the '-' operator for set differences:
>>> Interval(0, 2) - Interval(0, 1)
Interval.Lopen(1, 2)
>>> Interval(1, 3) - FiniteSet(2)
Union(Interval.Ropen(1, 2), Interval.Lopen(2, 3))
"""
return Union(self, other)
def intersect(self, other):
"""
Returns the intersection of 'self' and 'other'.
>>> from sympy import Interval
>>> Interval(1, 3).intersect(Interval(1, 2))
Interval(1, 2)
>>> from sympy import imageset, Lambda, symbols, S
>>> n, m = symbols('n m')
>>> a = imageset(Lambda(n, 2*n), S.Integers)
>>> a.intersect(imageset(Lambda(m, 2*m + 1), S.Integers))
EmptySet
"""
return Intersection(self, other)
def intersection(self, other):
"""
Alias for :meth:`intersect()`
"""
return self.intersect(other)
def is_disjoint(self, other):
"""
Returns True if 'self' and 'other' are disjoint
Examples
========
>>> from sympy import Interval
>>> Interval(0, 2).is_disjoint(Interval(1, 2))
False
>>> Interval(0, 2).is_disjoint(Interval(3, 4))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Disjoint_sets
"""
return self.intersect(other) == S.EmptySet
def isdisjoint(self, other):
"""
Alias for :meth:`is_disjoint()`
"""
return self.is_disjoint(other)
def complement(self, universe):
r"""
The complement of 'self' w.r.t the given universe.
Examples
========
>>> from sympy import Interval, S
>>> Interval(0, 1).complement(S.Reals)
Union(Interval.open(-oo, 0), Interval.open(1, oo))
>>> Interval(0, 1).complement(S.UniversalSet)
Complement(UniversalSet, Interval(0, 1))
"""
return Complement(universe, self)
def _complement(self, other):
# this behaves as other - self
if isinstance(self, ProductSet) and isinstance(other, ProductSet):
# If self and other are disjoint then other - self == self
if len(self.sets) != len(other.sets):
return other
# There can be other ways to represent this but this gives:
# (A x B) - (C x D) = ((A - C) x B) U (A x (B - D))
overlaps = []
pairs = list(zip(self.sets, other.sets))
for n in range(len(pairs)):
sets = (o if i != n else o-s for i, (s, o) in enumerate(pairs))
overlaps.append(ProductSet(*sets))
return Union(*overlaps)
elif isinstance(other, Interval):
if isinstance(self, Interval) or isinstance(self, FiniteSet):
return Intersection(other, self.complement(S.Reals))
elif isinstance(other, Union):
return Union(*(o - self for o in other.args))
elif isinstance(other, Complement):
return Complement(other.args[0], Union(other.args[1], self), evaluate=False)
elif isinstance(other, EmptySet):
return S.EmptySet
elif isinstance(other, FiniteSet):
from sympy.utilities.iterables import sift
sifted = sift(other, lambda x: fuzzy_bool(self.contains(x)))
# ignore those that are contained in self
return Union(FiniteSet(*(sifted[False])),
Complement(FiniteSet(*(sifted[None])), self, evaluate=False)
if sifted[None] else S.EmptySet)
def symmetric_difference(self, other):
"""
Returns symmetric difference of `self` and `other`.
Examples
========
>>> from sympy import Interval, S
>>> Interval(1, 3).symmetric_difference(S.Reals)
Union(Interval.open(-oo, 1), Interval.open(3, oo))
>>> Interval(1, 10).symmetric_difference(S.Reals)
Union(Interval.open(-oo, 1), Interval.open(10, oo))
>>> from sympy import S, EmptySet
>>> S.Reals.symmetric_difference(EmptySet)
Reals
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_difference
"""
return SymmetricDifference(self, other)
def _symmetric_difference(self, other):
return Union(Complement(self, other), Complement(other, self))
@property
def inf(self):
"""
The infimum of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).inf
0
>>> Union(Interval(0, 1), Interval(2, 3)).inf
0
"""
return self._inf
@property
def _inf(self):
raise NotImplementedError("(%s)._inf" % self)
@property
def sup(self):
"""
The supremum of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).sup
1
>>> Union(Interval(0, 1), Interval(2, 3)).sup
3
"""
return self._sup
@property
def _sup(self):
raise NotImplementedError("(%s)._sup" % self)
def contains(self, other):
"""
Returns a SymPy value indicating whether ``other`` is contained
in ``self``: ``true`` if it is, ``false`` if it isn't, else
an unevaluated ``Contains`` expression (or, as in the case of
ConditionSet and a union of FiniteSet/Intervals, an expression
indicating the conditions for containment).
Examples
========
>>> from sympy import Interval, S
>>> from sympy.abc import x
>>> Interval(0, 1).contains(0.5)
True
As a shortcut it is possible to use the 'in' operator, but that
will raise an error unless an affirmative true or false is not
obtained.
>>> Interval(0, 1).contains(x)
(0 <= x) & (x <= 1)
>>> x in Interval(0, 1)
Traceback (most recent call last):
...
TypeError: did not evaluate to a bool: None
The result of 'in' is a bool, not a SymPy value
>>> 1 in Interval(0, 2)
True
>>> _ is S.true
False
"""
other = sympify(other, strict=True)
c = self._contains(other)
if c is None:
return Contains(other, self, evaluate=False)
b = tfn[c]
if b is None:
return c
return b
def _contains(self, other):
raise NotImplementedError(filldedent('''
(%s)._contains(%s) is not defined. This method, when
defined, will receive a sympified object. The method
should return True, False, None or something that
expresses what must be true for the containment of that
object in self to be evaluated. If None is returned
then a generic Contains object will be returned
by the ``contains`` method.''' % (self, other)))
def is_subset(self, other):
"""
Returns True if 'self' is a subset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_subset(Interval(0, 1, left_open=True))
False
"""
if not isinstance(other, Set):
raise ValueError("Unknown argument '%s'" % other)
# Handle the trivial cases
if self == other:
return True
is_empty = self.is_empty
if is_empty is True:
return True
elif fuzzy_not(is_empty) and other.is_empty:
return False
if self.is_finite_set is False and other.is_finite_set:
return False
# Dispatch on subclass rules
ret = self._eval_is_subset(other)
if ret is not None:
return ret
ret = other._eval_is_superset(self)
if ret is not None:
return ret
# Use pairwise rules from multiple dispatch
from sympy.sets.handlers.issubset import is_subset_sets
ret = is_subset_sets(self, other)
if ret is not None:
return ret
# Fall back on computing the intersection
# XXX: We shouldn't do this. A query like this should be handled
# without evaluating new Set objects. It should be the other way round
# so that the intersect method uses is_subset for evaluation.
if self.intersect(other) == self:
return True
def _eval_is_subset(self, other):
'''Returns a fuzzy bool for whether self is a subset of other.'''
return None
def _eval_is_superset(self, other):
'''Returns a fuzzy bool for whether self is a subset of other.'''
return None
# This should be deprecated:
def issubset(self, other):
"""
Alias for :meth:`is_subset()`
"""
return self.is_subset(other)
def is_proper_subset(self, other):
"""
Returns True if 'self' is a proper subset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_proper_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_proper_subset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_subset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def is_superset(self, other):
"""
Returns True if 'self' is a superset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_superset(Interval(0, 1))
False
>>> Interval(0, 1).is_superset(Interval(0, 1, left_open=True))
True
"""
if isinstance(other, Set):
return other.is_subset(self)
else:
raise ValueError("Unknown argument '%s'" % other)
# This should be deprecated:
def issuperset(self, other):
"""
Alias for :meth:`is_superset()`
"""
return self.is_superset(other)
def is_proper_superset(self, other):
"""
Returns True if 'self' is a proper superset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_proper_superset(Interval(0, 0.5))
True
>>> Interval(0, 1).is_proper_superset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_superset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def _eval_powerset(self):
from .powerset import PowerSet
return PowerSet(self)
def powerset(self):
"""
Find the Power set of 'self'.
Examples
========
>>> from sympy import EmptySet, FiniteSet, Interval, PowerSet
A power set of an empty set:
>>> from sympy import FiniteSet, EmptySet
>>> A = EmptySet
>>> A.powerset()
FiniteSet(EmptySet)
A power set of a finite set:
>>> A = FiniteSet(1, 2)
>>> a, b, c = FiniteSet(1), FiniteSet(2), FiniteSet(1, 2)
>>> A.powerset() == FiniteSet(a, b, c, EmptySet)
True
A power set of an interval:
>>> Interval(1, 2).powerset()
PowerSet(Interval(1, 2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Power_set
"""
return self._eval_powerset()
@property
def measure(self):
"""
The (Lebesgue) measure of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).measure
1
>>> Union(Interval(0, 1), Interval(2, 3)).measure
2
"""
return self._measure
@property
def boundary(self):
"""
The boundary or frontier of a set
A point x is on the boundary of a set S if
1. x is in the closure of S.
I.e. Every neighborhood of x contains a point in S.
2. x is not in the interior of S.
I.e. There does not exist an open set centered on x contained
entirely within S.
There are the points on the outer rim of S. If S is open then these
points need not actually be contained within S.
For example, the boundary of an interval is its start and end points.
This is true regardless of whether or not the interval is open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).boundary
FiniteSet(0, 1)
>>> Interval(0, 1, True, False).boundary
FiniteSet(0, 1)
"""
return self._boundary
@property
def is_open(self):
"""
Property method to check whether a set is open.
A set is open if and only if it has an empty intersection with its
boundary. In particular, a subset A of the reals is open if and only
if each one of its points is contained in an open interval that is a
subset of A.
Examples
========
>>> from sympy import S
>>> S.Reals.is_open
True
>>> S.Rationals.is_open
False
"""
return Intersection(self, self.boundary).is_empty
@property
def is_closed(self):
"""
A property method to check whether a set is closed.
A set is closed if its complement is an open set. The closedness of a
subset of the reals is determined with respect to R and its standard
topology.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_closed
True
"""
return self.boundary.is_subset(self)
@property
def closure(self):
"""
Property method which returns the closure of a set.
The closure is defined as the union of the set itself and its
boundary.
Examples
========
>>> from sympy import S, Interval
>>> S.Reals.closure
Reals
>>> Interval(0, 1).closure
Interval(0, 1)
"""
return self + self.boundary
@property
def interior(self):
"""
Property method which returns the interior of a set.
The interior of a set S consists all points of S that do not
belong to the boundary of S.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).interior
Interval.open(0, 1)
>>> Interval(0, 1).boundary.interior
EmptySet
"""
return self - self.boundary
@property
def _boundary(self):
raise NotImplementedError()
@property
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
@sympify_return([('other', 'Set')], NotImplemented)
def __add__(self, other):
return self.union(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __or__(self, other):
return self.union(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __and__(self, other):
return self.intersect(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __mul__(self, other):
return ProductSet(self, other)
@sympify_return([('other', 'Set')], NotImplemented)
def __xor__(self, other):
return SymmetricDifference(self, other)
@sympify_return([('exp', Expr)], NotImplemented)
def __pow__(self, exp):
if not (exp.is_Integer and exp >= 0):
raise ValueError("%s: Exponent must be a positive Integer" % exp)
return ProductSet(*[self]*exp)
@sympify_return([('other', 'Set')], NotImplemented)
def __sub__(self, other):
return Complement(self, other)
def __contains__(self, other):
other = _sympify(other)
c = self._contains(other)
b = tfn[c]
if b is None:
raise TypeError('did not evaluate to a bool: %r' % c)
return b
class ProductSet(Set):
"""
Represents a Cartesian Product of Sets.
Returns a Cartesian product given several sets as either an iterable
or individual arguments.
Can use '*' operator on any sets for convenient shorthand.
Examples
========
>>> from sympy import Interval, FiniteSet, ProductSet
>>> I = Interval(0, 5); S = FiniteSet(1, 2, 3)
>>> ProductSet(I, S)
ProductSet(Interval(0, 5), FiniteSet(1, 2, 3))
>>> (2, 2) in ProductSet(I, S)
True
>>> Interval(0, 1) * Interval(0, 1) # The unit square
ProductSet(Interval(0, 1), Interval(0, 1))
>>> coin = FiniteSet('H', 'T')
>>> set(coin**2)
{(H, H), (H, T), (T, H), (T, T)}
The Cartesian product is not commutative or associative e.g.:
>>> I*S == S*I
False
>>> (I*I)*I == I*(I*I)
False
Notes
=====
- Passes most operations down to the argument sets
References
==========
.. [1] https://en.wikipedia.org/wiki/Cartesian_product
"""
is_ProductSet = True
def __new__(cls, *sets, **assumptions):
if len(sets) == 1 and iterable(sets[0]) and not isinstance(sets[0], (Set, set)):
SymPyDeprecationWarning(
feature="ProductSet(iterable)",
useinstead="ProductSet(*iterable)",
issue=17557,
deprecated_since_version="1.5"
).warn()
sets = tuple(sets[0])
sets = [sympify(s) for s in sets]
if not all(isinstance(s, Set) for s in sets):
raise TypeError("Arguments to ProductSet should be of type Set")
# Nullary product of sets is *not* the empty set
if len(sets) == 0:
return FiniteSet(())
if S.EmptySet in sets:
return S.EmptySet
return Basic.__new__(cls, *sets, **assumptions)
@property
def sets(self):
return self.args
def flatten(self):
def _flatten(sets):
for s in sets:
if s.is_ProductSet:
for s2 in _flatten(s.sets):
yield s2
else:
yield s
return ProductSet(*_flatten(self.sets))
def _eval_Eq(self, other):
if not other.is_ProductSet:
return
if len(self.sets) != len(other.sets):
return false
eqs = (Eq(x, y) for x, y in zip(self.sets, other.sets))
return tfn[fuzzy_and(map(fuzzy_bool, eqs))]
def _contains(self, element):
"""
'in' operator for ProductSets
Examples
========
>>> from sympy import Interval
>>> (2, 3) in Interval(0, 5) * Interval(0, 5)
True
>>> (10, 10) in Interval(0, 5) * Interval(0, 5)
False
Passes operation on to constituent sets
"""
if element.is_Symbol:
return None
if not isinstance(element, Tuple) or len(element) != len(self.sets):
return False
return fuzzy_and(s._contains(e) for s, e in zip(self.sets, element))
def as_relational(self, *symbols):
symbols = [_sympify(s) for s in symbols]
if len(symbols) != len(self.sets) or not all(
i.is_Symbol for i in symbols):
raise ValueError(
'number of symbols must match the number of sets')
return And(*[s.as_relational(i) for s, i in zip(self.sets, symbols)])
@property
def _boundary(self):
return Union(*(ProductSet(*(b + b.boundary if i != j else b.boundary
for j, b in enumerate(self.sets)))
for i, a in enumerate(self.sets)))
@property
def is_iterable(self):
"""
A property method which tests whether a set is iterable or not.
Returns True if set is iterable, otherwise returns False.
Examples
========
>>> from sympy import FiniteSet, Interval, ProductSet
>>> I = Interval(0, 1)
>>> A = FiniteSet(1, 2, 3, 4, 5)
>>> I.is_iterable
False
>>> A.is_iterable
True
"""
return all(set.is_iterable for set in self.sets)
def __iter__(self):
"""
A method which implements is_iterable property method.
If self.is_iterable returns True (both constituent sets are iterable),
then return the Cartesian Product. Otherwise, raise TypeError.
"""
return iproduct(*self.sets)
@property
def is_empty(self):
return fuzzy_or(s.is_empty for s in self.sets)
@property
def is_finite_set(self):
all_finite = fuzzy_and(s.is_finite_set for s in self.sets)
return fuzzy_or([self.is_empty, all_finite])
@property
def _measure(self):
measure = 1
for s in self.sets:
measure *= s.measure
return measure
def __len__(self):
return reduce(lambda a, b: a*b, (len(s) for s in self.args))
def __bool__(self):
return all([bool(s) for s in self.sets])
__nonzero__ = __bool__
class Interval(Set, EvalfMixin):
"""
Represents a real interval as a Set.
Usage:
Returns an interval with end points "start" and "end".
For left_open=True (default left_open is False) the interval
will be open on the left. Similarly, for right_open=True the interval
will be open on the right.
Examples
========
>>> from sympy import Symbol, Interval
>>> Interval(0, 1)
Interval(0, 1)
>>> Interval.Ropen(0, 1)
Interval.Ropen(0, 1)
>>> Interval.Ropen(0, 1)
Interval.Ropen(0, 1)
>>> Interval.Lopen(0, 1)
Interval.Lopen(0, 1)
>>> Interval.open(0, 1)
Interval.open(0, 1)
>>> a = Symbol('a', real=True)
>>> Interval(0, a)
Interval(0, a)
Notes
=====
- Only real end points are supported
- Interval(a, b) with a > b will return the empty set
- Use the evalf() method to turn an Interval into an mpmath
'mpi' interval instance
References
==========
.. [1] https://en.wikipedia.org/wiki/Interval_%28mathematics%29
"""
is_Interval = True
def __new__(cls, start, end, left_open=False, right_open=False):
start = _sympify(start)
end = _sympify(end)
left_open = _sympify(left_open)
right_open = _sympify(right_open)
if not all(isinstance(a, (type(true), type(false)))
for a in [left_open, right_open]):
raise NotImplementedError(
"left_open and right_open can have only true/false values, "
"got %s and %s" % (left_open, right_open))
# Only allow real intervals
if fuzzy_not(fuzzy_and(i.is_extended_real for i in (start, end, end-start))):
raise ValueError("Non-real intervals are not supported")
# evaluate if possible
if (end < start) == True:
return S.EmptySet
elif (end - start).is_negative:
return S.EmptySet
if end == start and (left_open or right_open):
return S.EmptySet
if end == start and not (left_open or right_open):
if start is S.Infinity or start is S.NegativeInfinity:
return S.EmptySet
return FiniteSet(end)
# Make sure infinite interval end points are open.
if start is S.NegativeInfinity:
left_open = true
if end is S.Infinity:
right_open = true
if start == S.Infinity or end == S.NegativeInfinity:
return S.EmptySet
return Basic.__new__(cls, start, end, left_open, right_open)
@property
def start(self):
"""
The left end point of 'self'.
This property takes the same value as the 'inf' property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).start
0
"""
return self._args[0]
_inf = left = start
@classmethod
def open(cls, a, b):
"""Return an interval including neither boundary."""
return cls(a, b, True, True)
@classmethod
def Lopen(cls, a, b):
"""Return an interval not including the left boundary."""
return cls(a, b, True, False)
@classmethod
def Ropen(cls, a, b):
"""Return an interval not including the right boundary."""
return cls(a, b, False, True)
@property
def end(self):
"""
The right end point of 'self'.
This property takes the same value as the 'sup' property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).end
1
"""
return self._args[1]
_sup = right = end
@property
def left_open(self):
"""
True if 'self' is left-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, left_open=True).left_open
True
>>> Interval(0, 1, left_open=False).left_open
False
"""
return self._args[2]
@property
def right_open(self):
"""
True if 'self' is right-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, right_open=True).right_open
True
>>> Interval(0, 1, right_open=False).right_open
False
"""
return self._args[3]
@property
def is_empty(self):
if self.left_open or self.right_open:
cond = self.start >= self.end # One/both bounds open
else:
cond = self.start > self.end # Both bounds closed
return fuzzy_bool(cond)
@property
def is_finite_set(self):
return self.measure.is_zero
def _complement(self, other):
if other == S.Reals:
a = Interval(S.NegativeInfinity, self.start,
True, not self.left_open)
b = Interval(self.end, S.Infinity, not self.right_open, True)
return Union(a, b)
if isinstance(other, FiniteSet):
nums = [m for m in other.args if m.is_number]
if nums == []:
return None
return Set._complement(self, other)
@property
def _boundary(self):
finite_points = [p for p in (self.start, self.end)
if abs(p) != S.Infinity]
return FiniteSet(*finite_points)
def _contains(self, other):
if (not isinstance(other, Expr) or other is S.NaN
or other.is_real is False):
return false
if self.start is S.NegativeInfinity and self.end is S.Infinity:
if other.is_real is not None:
return other.is_real
d = Dummy()
return self.as_relational(d).subs(d, other)
def as_relational(self, x):
"""Rewrite an interval in terms of inequalities and logic operators."""
x = sympify(x)
if self.right_open:
right = x < self.end
else:
right = x <= self.end
if self.left_open:
left = self.start < x
else:
left = self.start <= x
return And(left, right)
@property
def _measure(self):
return self.end - self.start
def to_mpi(self, prec=53):
return mpi(mpf(self.start._eval_evalf(prec)),
mpf(self.end._eval_evalf(prec)))
def _eval_evalf(self, prec):
return Interval(self.left._evalf(prec), self.right._evalf(prec),
left_open=self.left_open, right_open=self.right_open)
def _is_comparable(self, other):
is_comparable = self.start.is_comparable
is_comparable &= self.end.is_comparable
is_comparable &= other.start.is_comparable
is_comparable &= other.end.is_comparable
return is_comparable
@property
def is_left_unbounded(self):
"""Return ``True`` if the left endpoint is negative infinity. """
return self.left is S.NegativeInfinity or self.left == Float("-inf")
@property
def is_right_unbounded(self):
"""Return ``True`` if the right endpoint is positive infinity. """
return self.right is S.Infinity or self.right == Float("+inf")
def _eval_Eq(self, other):
if not isinstance(other, Interval):
if isinstance(other, FiniteSet):
return false
elif isinstance(other, Set):
return None
return false
return And(Eq(self.left, other.left),
Eq(self.right, other.right),
self.left_open == other.left_open,
self.right_open == other.right_open)
class Union(Set, LatticeOp, EvalfMixin):
"""
Represents a union of sets as a :class:`Set`.
Examples
========
>>> from sympy import Union, Interval
>>> Union(Interval(1, 2), Interval(3, 4))
Union(Interval(1, 2), Interval(3, 4))
The Union constructor will always try to merge overlapping intervals,
if possible. For example:
>>> Union(Interval(1, 2), Interval(2, 3))
Interval(1, 3)
See Also
========
Intersection
References
==========
.. [1] https://en.wikipedia.org/wiki/Union_%28set_theory%29
"""
is_Union = True
@property
def identity(self):
return S.EmptySet
@property
def zero(self):
return S.UniversalSet
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs to merge intersections and iterables
args = _sympify(args)
# Reduce sets using known rules
if evaluate:
args = list(cls._new_args_filter(args))
return simplify_union(args)
args = list(ordered(args, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._argset = frozenset(args)
return obj
@property
def args(self):
return self._args
def _complement(self, universe):
# DeMorgan's Law
return Intersection(s.complement(universe) for s in self.args)
@property
def _inf(self):
# We use Min so that sup is meaningful in combination with symbolic
# interval end points.
from sympy.functions.elementary.miscellaneous import Min
return Min(*[set.inf for set in self.args])
@property
def _sup(self):
# We use Max so that sup is meaningful in combination with symbolic
# end points.
from sympy.functions.elementary.miscellaneous import Max
return Max(*[set.sup for set in self.args])
@property
def is_empty(self):
return fuzzy_and(set.is_empty for set in self.args)
@property
def is_finite_set(self):
return fuzzy_and(set.is_finite_set for set in self.args)
@property
def _measure(self):
# Measure of a union is the sum of the measures of the sets minus
# the sum of their pairwise intersections plus the sum of their
# triple-wise intersections minus ... etc...
# Sets is a collection of intersections and a set of elementary
# sets which made up those intersections (called "sos" for set of sets)
# An example element might of this list might be:
# ( {A,B,C}, A.intersect(B).intersect(C) )
# Start with just elementary sets ( ({A}, A), ({B}, B), ... )
# Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero
sets = [(FiniteSet(s), s) for s in self.args]
measure = 0
parity = 1
while sets:
# Add up the measure of these sets and add or subtract it to total
measure += parity * sum(inter.measure for sos, inter in sets)
# For each intersection in sets, compute the intersection with every
# other set not already part of the intersection.
sets = ((sos + FiniteSet(newset), newset.intersect(intersection))
for sos, intersection in sets for newset in self.args
if newset not in sos)
# Clear out sets with no measure
sets = [(sos, inter) for sos, inter in sets if inter.measure != 0]
# Clear out duplicates
sos_list = []
sets_list = []
for set in sets:
if set[0] in sos_list:
continue
else:
sos_list.append(set[0])
sets_list.append(set)
sets = sets_list
# Flip Parity - next time subtract/add if we added/subtracted here
parity *= -1
return measure
@property
def _boundary(self):
def boundary_of_set(i):
""" The boundary of set i minus interior of all other sets """
b = self.args[i].boundary
for j, a in enumerate(self.args):
if j != i:
b = b - a.interior
return b
return Union(*map(boundary_of_set, range(len(self.args))))
def _contains(self, other):
return Or(*[s.contains(other) for s in self.args])
def is_subset(self, other):
return fuzzy_and(s.is_subset(other) for s in self.args)
def as_relational(self, symbol):
"""Rewrite a Union in terms of equalities and logic operators. """
if all(isinstance(i, (FiniteSet, Interval)) for i in self.args):
if len(self.args) == 2:
a, b = self.args
if (a.sup == b.inf and a.inf is S.NegativeInfinity
and b.sup is S.Infinity):
return And(Ne(symbol, a.sup), symbol < b.sup, symbol > a.inf)
return Or(*[set.as_relational(symbol) for set in self.args])
raise NotImplementedError('relational of Union with non-Intervals')
@property
def is_iterable(self):
return all(arg.is_iterable for arg in self.args)
def _eval_evalf(self, prec):
try:
return Union(*(set._eval_evalf(prec) for set in self.args))
except (TypeError, ValueError, NotImplementedError):
import sys
raise (TypeError("Not all sets are evalf-able"),
None,
sys.exc_info()[2])
def __iter__(self):
return roundrobin(*(iter(arg) for arg in self.args))
class Intersection(Set, LatticeOp):
"""
Represents an intersection of sets as a :class:`Set`.
Examples
========
>>> from sympy import Intersection, Interval
>>> Intersection(Interval(1, 3), Interval(2, 4))
Interval(2, 3)
We often use the .intersect method
>>> Interval(1,3).intersect(Interval(2,4))
Interval(2, 3)
See Also
========
Union
References
==========
.. [1] https://en.wikipedia.org/wiki/Intersection_%28set_theory%29
"""
is_Intersection = True
@property
def identity(self):
return S.UniversalSet
@property
def zero(self):
return S.EmptySet
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs to merge intersections and iterables
args = list(ordered(set(_sympify(args))))
# Reduce sets using known rules
if evaluate:
args = list(cls._new_args_filter(args))
return simplify_intersection(args)
args = list(ordered(args, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._argset = frozenset(args)
return obj
@property
def args(self):
return self._args
@property
def is_iterable(self):
return any(arg.is_iterable for arg in self.args)
@property
def is_finite_set(self):
if fuzzy_or(arg.is_finite_set for arg in self.args):
return True
@property
def _inf(self):
raise NotImplementedError()
@property
def _sup(self):
raise NotImplementedError()
def _contains(self, other):
return And(*[set.contains(other) for set in self.args])
def __iter__(self):
sets_sift = sift(self.args, lambda x: x.is_iterable)
completed = False
candidates = sets_sift[True] + sets_sift[None]
finite_candidates, others = [], []
for candidate in candidates:
length = None
try:
length = len(candidate)
except TypeError:
others.append(candidate)
if length is not None:
finite_candidates.append(candidate)
finite_candidates.sort(key=len)
for s in finite_candidates + others:
other_sets = set(self.args) - set((s,))
other = Intersection(*other_sets, evaluate=False)
completed = True
for x in s:
try:
if x in other:
yield x
except TypeError:
completed = False
if completed:
return
if not completed:
if not candidates:
raise TypeError("None of the constituent sets are iterable")
raise TypeError(
"The computation had not completed because of the "
"undecidable set membership is found in every candidates.")
@staticmethod
def _handle_finite_sets(args):
'''Simplify intersection of one or more FiniteSets and other sets'''
# First separate the FiniteSets from the others
fs_args, others = sift(args, lambda x: x.is_FiniteSet, binary=True)
# Let the caller handle intersection of non-FiniteSets
if not fs_args:
return
# Convert to Python sets and build the set of all elements
fs_sets = [set(fs) for fs in fs_args]
all_elements = reduce(lambda a, b: a | b, fs_sets, set())
# Extract elements that are definitely in or definitely not in the
# intersection. Here we check contains for all of args.
definite = set()
for e in all_elements:
inall = fuzzy_and(s.contains(e) for s in args)
if inall is True:
definite.add(e)
if inall is not None:
for s in fs_sets:
s.discard(e)
# At this point all elements in all of fs_sets are possibly in the
# intersection. In some cases this is because they are definitely in
# the intersection of the finite sets but it's not clear if they are
# members of others. We might have {m, n}, {m}, and Reals where we
# don't know if m or n is real. We want to remove n here but it is
# possibly in because it might be equal to m. So what we do now is
# extract the elements that are definitely in the remaining finite
# sets iteratively until we end up with {n}, {}. At that point if we
# get any empty set all remaining elements are discarded.
fs_elements = reduce(lambda a, b: a | b, fs_sets, set())
# Need fuzzy containment testing
fs_symsets = [FiniteSet(*s) for s in fs_sets]
while fs_elements:
for e in fs_elements:
infs = fuzzy_and(s.contains(e) for s in fs_symsets)
if infs is True:
definite.add(e)
if infs is not None:
for n, s in enumerate(fs_sets):
# Update Python set and FiniteSet
if e in s:
s.remove(e)
fs_symsets[n] = FiniteSet(*s)
fs_elements.remove(e)
break
# If we completed the for loop without removing anything we are
# done so quit the outer while loop
else:
break
# If any of the sets of remainder elements is empty then we discard
# all of them for the intersection.
if not all(fs_sets):
fs_sets = [set()]
# Here we fold back the definitely included elements into each fs.
# Since they are definitely included they must have been members of
# each FiniteSet to begin with. We could instead fold these in with a
# Union at the end to get e.g. {3}|({x}&{y}) rather than {3,x}&{3,y}.
if definite:
fs_sets = [fs | definite for fs in fs_sets]
if fs_sets == [set()]:
return S.EmptySet
sets = [FiniteSet(*s) for s in fs_sets]
# Any set in others is redundant if it contains all the elements that
# are in the finite sets so we don't need it in the Intersection
all_elements = reduce(lambda a, b: a | b, fs_sets, set())
is_redundant = lambda o: all(fuzzy_bool(o.contains(e)) for e in all_elements)
others = [o for o in others if not is_redundant(o)]
if others:
rest = Intersection(*others)
# XXX: Maybe this shortcut should be at the beginning. For large
# FiniteSets it could much more efficient to process the other
# sets first...
if rest is S.EmptySet:
return S.EmptySet
# Flatten the Intersection
if rest.is_Intersection:
sets.extend(rest.args)
else:
sets.append(rest)
if len(sets) == 1:
return sets[0]
else:
return Intersection(*sets, evaluate=False)
def as_relational(self, symbol):
"""Rewrite an Intersection in terms of equalities and logic operators"""
return And(*[set.as_relational(symbol) for set in self.args])
class Complement(Set, EvalfMixin):
r"""Represents the set difference or relative complement of a set with
another set.
`A - B = \{x \in A \mid x \notin B\}`
Examples
========
>>> from sympy import Complement, FiniteSet
>>> Complement(FiniteSet(0, 1, 2), FiniteSet(1))
FiniteSet(0, 2)
See Also
=========
Intersection, Union
References
==========
.. [1] http://mathworld.wolfram.com/ComplementSet.html
"""
is_Complement = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return Complement.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
"""
Simplify a :class:`Complement`.
"""
if B == S.UniversalSet or A.is_subset(B):
return S.EmptySet
if isinstance(B, Union):
return Intersection(*(s.complement(A) for s in B.args))
result = B._complement(A)
if result is not None:
return result
else:
return Complement(A, B, evaluate=False)
def _contains(self, other):
A = self.args[0]
B = self.args[1]
return And(A.contains(other), Not(B.contains(other)))
def as_relational(self, symbol):
"""Rewrite a complement in terms of equalities and logic
operators"""
A, B = self.args
A_rel = A.as_relational(symbol)
B_rel = Not(B.as_relational(symbol))
return And(A_rel, B_rel)
@property
def is_iterable(self):
if self.args[0].is_iterable:
return True
@property
def is_finite_set(self):
A, B = self.args
a_finite = A.is_finite_set
if a_finite is True:
return True
elif a_finite is False and B.is_finite_set:
return False
def __iter__(self):
A, B = self.args
for a in A:
if a not in B:
yield a
else:
continue
class EmptySet(Set, metaclass=Singleton):
"""
Represents the empty set. The empty set is available as a singleton
as S.EmptySet.
Examples
========
>>> from sympy import S, Interval
>>> S.EmptySet
EmptySet
>>> Interval(1, 2).intersect(S.EmptySet)
EmptySet
See Also
========
UniversalSet
References
==========
.. [1] https://en.wikipedia.org/wiki/Empty_set
"""
is_empty = True
is_finite_set = True
is_FiniteSet = True
@property # type: ignore
@deprecated(useinstead="is S.EmptySet or is_empty",
issue=16946, deprecated_since_version="1.5")
def is_EmptySet(self):
return True
@property
def _measure(self):
return 0
def _contains(self, other):
return false
def as_relational(self, symbol):
return false
def __len__(self):
return 0
def __iter__(self):
return iter([])
def _eval_powerset(self):
return FiniteSet(self)
@property
def _boundary(self):
return self
def _complement(self, other):
return other
def _symmetric_difference(self, other):
return other
class UniversalSet(Set, metaclass=Singleton):
"""
Represents the set of all things.
The universal set is available as a singleton as S.UniversalSet
Examples
========
>>> from sympy import S, Interval
>>> S.UniversalSet
UniversalSet
>>> Interval(1, 2).intersect(S.UniversalSet)
Interval(1, 2)
See Also
========
EmptySet
References
==========
.. [1] https://en.wikipedia.org/wiki/Universal_set
"""
is_UniversalSet = True
is_empty = False
is_finite_set = False
def _complement(self, other):
return S.EmptySet
def _symmetric_difference(self, other):
return other
@property
def _measure(self):
return S.Infinity
def _contains(self, other):
return true
def as_relational(self, symbol):
return true
@property
def _boundary(self):
return S.EmptySet
class FiniteSet(Set, EvalfMixin):
"""
Represents a finite set of discrete numbers
Examples
========
>>> from sympy import FiniteSet
>>> FiniteSet(1, 2, 3, 4)
FiniteSet(1, 2, 3, 4)
>>> 3 in FiniteSet(1, 2, 3, 4)
True
>>> members = [1, 2, 3, 4]
>>> f = FiniteSet(*members)
>>> f
FiniteSet(1, 2, 3, 4)
>>> f - FiniteSet(2)
FiniteSet(1, 3, 4)
>>> f + FiniteSet(2, 5)
FiniteSet(1, 2, 3, 4, 5)
References
==========
.. [1] https://en.wikipedia.org/wiki/Finite_set
"""
is_FiniteSet = True
is_iterable = True
is_empty = False
is_finite_set = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
if evaluate:
args = list(map(sympify, args))
if len(args) == 0:
return S.EmptySet
else:
args = list(map(sympify, args))
_args_set = set(args)
args = list(ordered(_args_set, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._args_set = _args_set
return obj
def _eval_Eq(self, other):
if not isinstance(other, FiniteSet):
# XXX: If Interval(x, x, evaluate=False) worked then the line
# below would mean that
# FiniteSet(x) & Interval(x, x, evaluate=False) -> false
if isinstance(other, Interval):
return false
elif isinstance(other, Set):
return None
return false
def all_in_both():
s_set = set(self.args)
o_set = set(other.args)
yield fuzzy_and(self._contains(e) for e in o_set - s_set)
yield fuzzy_and(other._contains(e) for e in s_set - o_set)
return tfn[fuzzy_and(all_in_both())]
def __iter__(self):
return iter(self.args)
def _complement(self, other):
if isinstance(other, Interval):
# Splitting in sub-intervals is only done for S.Reals;
# other cases that need splitting will first pass through
# Set._complement().
nums, syms = [], []
for m in self.args:
if m.is_number and m.is_real:
nums.append(m)
elif m.is_real == False:
pass # drop non-reals
else:
syms.append(m) # various symbolic expressions
if other == S.Reals and nums != []:
nums.sort()
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, nums[0], True, True)]
for a, b in zip(nums[:-1], nums[1:]):
intervals.append(Interval(a, b, True, True)) # both open
intervals.append(Interval(nums[-1], S.Infinity, True, True))
if syms != []:
return Complement(Union(*intervals, evaluate=False),
FiniteSet(*syms), evaluate=False)
else:
return Union(*intervals, evaluate=False)
elif nums == []: # no splitting necessary or possible:
if syms:
return Complement(other, FiniteSet(*syms), evaluate=False)
else:
return other
elif isinstance(other, FiniteSet):
unk = []
for i in self:
c = sympify(other.contains(i))
if c is not S.true and c is not S.false:
unk.append(i)
unk = FiniteSet(*unk)
if unk == self:
return
not_true = []
for i in other:
c = sympify(self.contains(i))
if c is not S.true:
not_true.append(i)
return Complement(FiniteSet(*not_true), unk)
return Set._complement(self, other)
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
The actual test is for mathematical equality (as opposed to
syntactical equality). In the worst case all elements of the
set must be checked.
Examples
========
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
if other in self._args_set:
return True
else:
# evaluate=True is needed to override evaluate=False context;
# we need Eq to do the evaluation
return fuzzy_or(fuzzy_bool(Eq(e, other, evaluate=True))
for e in self.args)
def _eval_is_subset(self, other):
return fuzzy_and(other._contains(e) for e in self.args)
@property
def _boundary(self):
return self
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*self)
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*self)
@property
def measure(self):
return 0
def __len__(self):
return len(self.args)
def as_relational(self, symbol):
"""Rewrite a FiniteSet in terms of equalities and logic operators. """
from sympy.core.relational import Eq
return Or(*[Eq(symbol, elem) for elem in self])
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
return FiniteSet(*[elem._evalf(prec) for elem in self])
@property
def _sorted_args(self):
return self.args
def _eval_powerset(self):
return self.func(*[self.func(*s) for s in subsets(self.args)])
def _eval_rewrite_as_PowerSet(self, *args, **kwargs):
"""Rewriting method for a finite set to a power set."""
from .powerset import PowerSet
is2pow = lambda n: bool(n and not n & (n - 1))
if not is2pow(len(self)):
return None
fs_test = lambda arg: isinstance(arg, Set) and arg.is_FiniteSet
if not all((fs_test(arg) for arg in args)):
return None
biggest = max(args, key=len)
for arg in subsets(biggest.args):
arg_set = FiniteSet(*arg)
if arg_set not in args:
return None
return PowerSet(biggest)
def __ge__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return other.is_subset(self)
def __gt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_superset(other)
def __le__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_subset(other)
def __lt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_subset(other)
converter[set] = lambda x: FiniteSet(*x)
converter[frozenset] = lambda x: FiniteSet(*x)
class SymmetricDifference(Set):
"""Represents the set of elements which are in either of the
sets and not in their intersection.
Examples
========
>>> from sympy import SymmetricDifference, FiniteSet
>>> SymmetricDifference(FiniteSet(1, 2, 3), FiniteSet(3, 4, 5))
FiniteSet(1, 2, 4, 5)
See Also
========
Complement, Union
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_difference
"""
is_SymmetricDifference = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return SymmetricDifference.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
result = B._symmetric_difference(A)
if result is not None:
return result
else:
return SymmetricDifference(A, B, evaluate=False)
def as_relational(self, symbol):
"""Rewrite a symmetric_difference in terms of equalities and
logic operators"""
A, B = self.args
A_rel = A.as_relational(symbol)
B_rel = B.as_relational(symbol)
return Xor(A_rel, B_rel)
@property
def is_iterable(self):
if all(arg.is_iterable for arg in self.args):
return True
def __iter__(self):
args = self.args
union = roundrobin(*(iter(arg) for arg in args))
for item in union:
count = 0
for s in args:
if item in s:
count += 1
if count % 2 == 1:
yield item
def imageset(*args):
r"""
Return an image of the set under transformation ``f``.
If this function can't compute the image, it returns an
unevaluated ImageSet object.
.. math::
\{ f(x) \mid x \in \mathrm{self} \}
Examples
========
>>> from sympy import S, Interval, Symbol, imageset, sin, Lambda
>>> from sympy.abc import x, y
>>> imageset(x, 2*x, Interval(0, 2))
Interval(0, 4)
>>> imageset(lambda x: 2*x, Interval(0, 2))
Interval(0, 4)
>>> imageset(Lambda(x, sin(x)), Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), Interval(-2, 1))
>>> imageset(sin, Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), Interval(-2, 1))
>>> imageset(lambda y: x + y, Interval(-2, 1))
ImageSet(Lambda(y, x + y), Interval(-2, 1))
Expressions applied to the set of Integers are simplified
to show as few negatives as possible and linear expressions
are converted to a canonical form. If this is not desirable
then the unevaluated ImageSet should be used.
>>> imageset(x, -2*x + 5, S.Integers)
ImageSet(Lambda(x, 2*x + 1), Integers)
See Also
========
sympy.sets.fancysets.ImageSet
"""
from sympy.core import Lambda
from sympy.sets.fancysets import ImageSet
from sympy.sets.setexpr import set_function
if len(args) < 2:
raise ValueError('imageset expects at least 2 args, got: %s' % len(args))
if isinstance(args[0], (Symbol, tuple)) and len(args) > 2:
f = Lambda(args[0], args[1])
set_list = args[2:]
else:
f = args[0]
set_list = args[1:]
if isinstance(f, Lambda):
pass
elif callable(f):
nargs = getattr(f, 'nargs', {})
if nargs:
if len(nargs) != 1:
raise NotImplementedError(filldedent('''
This function can take more than 1 arg
but the potentially complicated set input
has not been analyzed at this point to
know its dimensions. TODO
'''))
N = nargs.args[0]
if N == 1:
s = 'x'
else:
s = [Symbol('x%i' % i) for i in range(1, N + 1)]
else:
s = inspect.signature(f).parameters
dexpr = _sympify(f(*[Dummy() for i in s]))
var = tuple(_uniquely_named_symbol(Symbol(i), dexpr) for i in s)
f = Lambda(var, f(*var))
else:
raise TypeError(filldedent('''
expecting lambda, Lambda, or FunctionClass,
not \'%s\'.''' % func_name(f)))
if any(not isinstance(s, Set) for s in set_list):
name = [func_name(s) for s in set_list]
raise ValueError(
'arguments after mapping should be sets, not %s' % name)
if len(set_list) == 1:
set = set_list[0]
try:
# TypeError if arg count != set dimensions
r = set_function(f, set)
if r is None:
raise TypeError
if not r:
return r
except TypeError:
r = ImageSet(f, set)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
# XXX: Maybe this should just be:
# f2 = set.lambda
# fun = Lambda(f2.signature, f(*f2.expr))
# return imageset(fun, *set.base_sets)
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
x = set.lamda.variables[0]
y = f.variables[0]
return imageset(
Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets)
if r is not None:
return r
return ImageSet(f, *set_list)
def is_function_invertible_in_set(func, setv):
"""
Checks whether function ``func`` is invertible when the domain is
restricted to set ``setv``.
"""
from sympy import exp, log
# Functions known to always be invertible:
if func in (exp, log):
return True
u = Dummy("u")
fdiff = func(u).diff(u)
# monotonous functions:
# TODO: check subsets (`func` in `setv`)
if (fdiff > 0) == True or (fdiff < 0) == True:
return True
# TODO: support more
return None
def simplify_union(args):
"""
Simplify a :class:`Union` using known rules
We first start with global rules like 'Merge all FiniteSets'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent. This process depends
on ``union_sets(a, b)`` functions.
"""
from sympy.sets.handlers.union import union_sets
# ===== Global Rules =====
if not args:
return S.EmptySet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
a = (x for set in finite_sets for x in set)
finite_set = FiniteSet(*a)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - set((s,)):
new_set = union_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = set((new_set, ))
new_args = (args - set((s, t))).union(new_set)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Union(*args, evaluate=False)
def simplify_intersection(args):
"""
Simplify an intersection using known rules
We first start with global rules like
'if any empty sets return empty set' and 'distribute any unions'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
if not args:
return S.UniversalSet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# If any EmptySets return EmptySet
if S.EmptySet in args:
return S.EmptySet
# Handle Finite sets
rv = Intersection._handle_finite_sets(args)
if rv is not None:
return rv
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - set((s,))
if len(other_sets) > 0:
other = Intersection(*other_sets)
return Union(*(Intersection(arg, other) for arg in s.args))
else:
return Union(*[arg for arg in s.args])
for s in args:
if s.is_Complement:
args.remove(s)
other_sets = args + [s.args[0]]
return Complement(Intersection(*other_sets), s.args[1])
from sympy.sets.handlers.intersection import intersection_sets
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - set((s,)):
new_set = intersection_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - set((s, t))).union(set((new_set, )))
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Intersection(*args, evaluate=False)
def _handle_finite_sets(op, x, y, commutative):
# Handle finite sets:
fs_args, other = sift([x, y], lambda x: isinstance(x, FiniteSet), binary=True)
if len(fs_args) == 2:
return FiniteSet(*[op(i, j) for i in fs_args[0] for j in fs_args[1]])
elif len(fs_args) == 1:
sets = [_apply_operation(op, other[0], i, commutative) for i in fs_args[0]]
return Union(*sets)
else:
return None
def _apply_operation(op, x, y, commutative):
from sympy.sets import ImageSet
from sympy import symbols,Lambda
d = Dummy('d')
out = _handle_finite_sets(op, x, y, commutative)
if out is None:
out = op(x, y)
if out is None and commutative:
out = op(y, x)
if out is None:
_x, _y = symbols("x y")
if isinstance(x, Set) and not isinstance(y, Set):
out = ImageSet(Lambda(d, op(d, y)), x).doit()
elif not isinstance(x, Set) and isinstance(y, Set):
out = ImageSet(Lambda(d, op(x, d)), y).doit()
else:
out = ImageSet(Lambda((_x, _y), op(_x, _y)), x, y)
return out
def set_add(x, y):
from sympy.sets.handlers.add import _set_add
return _apply_operation(_set_add, x, y, commutative=True)
def set_sub(x, y):
from sympy.sets.handlers.add import _set_sub
return _apply_operation(_set_sub, x, y, commutative=False)
def set_mul(x, y):
from sympy.sets.handlers.mul import _set_mul
return _apply_operation(_set_mul, x, y, commutative=True)
def set_div(x, y):
from sympy.sets.handlers.mul import _set_div
return _apply_operation(_set_div, x, y, commutative=False)
def set_pow(x, y):
from sympy.sets.handlers.power import _set_pow
return _apply_operation(_set_pow, x, y, commutative=False)
def set_function(f, x):
from sympy.sets.handlers.functions import _set_function
return _set_function(f, x)
|
19522341ae28760b75bce272897d9e5eac10a5917b907fdbeab2d721b66ce093 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
import warnings
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.core.function import arity
from sympy.core.compatibility import Callable
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/testing/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
"""
Disable show(). For use in the tests.
"""
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
- backend : {'default', 'matplotlib', 'text'}
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
self.annotations = None
self.markers = None
self.rectangles = None
self.fill = None
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = plot_backends[kwargs.pop('backend', 'default')]
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
.. plot::
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x, show=False)
>>> p2 = plot(x, show=False)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
>>> p1.show()
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
.. plot::
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x**2, show=False)
>>> p2 = plot(x, -x, show=False)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
[2]: cartesian line: -x for x over (-10.0, 10.0)
>>> p1.show()
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
class PlotGrid(object):
"""This class helps to plot subplots from already created sympy plots
in a single figure.
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot, plot3d, PlotGrid
>>> x, y = symbols('x, y')
>>> p1 = plot(x, x**2, x**3, (x, -5, 5))
>>> p2 = plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
>>> p3 = plot(x**3, (x, -5, 5))
>>> p4 = plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plotting vertically in a single line:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> PlotGrid(2, 1 , p1, p2)
PlotGrid object containing:
Plot[0]:Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[1]:Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
Plotting horizontally in a single line:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> PlotGrid(1, 3 , p2, p3, p4)
PlotGrid object containing:
Plot[0]:Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
Plot[1]:Plot object containing:
[0]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[2]:Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Plotting in a grid form:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> PlotGrid(2, 2, p1, p2 ,p3, p4)
PlotGrid object containing:
Plot[0]:Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[1]:Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
Plot[2]:Plot object containing:
[0]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[3]:Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
"""
def __init__(self, nrows, ncolumns, *args, **kwargs):
"""
Parameters
==========
nrows : The number of rows that should be in the grid of the
required subplot
ncolumns : The number of columns that should be in the grid
of the required subplot
nrows and ncolumns together define the required grid
Arguments
=========
A list of predefined plot objects entered in a row-wise sequence
i.e. plot objects which are to be in the top row of the required
grid are written first, then the second row objects and so on
Keyword arguments
=================
show : Boolean
The default value is set to ``True``. Set show to ``False`` and
the function will not display the subplot. The returned instance
of the ``PlotGrid`` class can then be used to save or display the
plot by calling the ``save()`` and ``show()`` methods
respectively.
"""
self.nrows = nrows
self.ncolumns = ncolumns
self._series = []
self.args = args
for arg in args:
self._series.append(arg._series)
self.backend = DefaultBackend
show = kwargs.pop('show', True)
if show:
self.show()
def show(self):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
plot_strs = [('Plot[%d]:' % i) + str(plot)
for i, plot in enumerate(self.args)]
return 'PlotGrid object containing:\n' + '\n'.join(plot_strs)
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
# Different from is_contour as the colormap in backend will be
# different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
nargs = arity(c)
if nargs == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
self.xscale = kwargs.get('xscale', 'linear')
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
np = import_module('numpy')
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
# Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
if self.xscale == 'log':
xnew = 10**(np.log10(p[0]) + random * (np.log10(q[0]) -
np.log10(p[0])))
else:
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > self.depth:
list_segments.append([p, q])
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif p[1] is None and q[1] is None:
if self.xscale == 'log':
xarray = np.logspace(p[0], q[0], 10)
else:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
# Sample further if one of the end points in None (i.e. a
# complex value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample(np.array([self.start, f_start]),
np.array([self.end, f_end]), 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
if self.xscale == 'log':
list_x = np.logspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
if self.xscale == 'log':
list_x = np.logspace(self.start, self.end, num=self.nb_of_points)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
# Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > self.depth:
list_segments.append([p, q])
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
# Sample further if one of the end points in None (i.e. a complex
# value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
nargs = arity(c)
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
# The code is mostly repetition of SurfaceOver2DRange.
# Presently used in contour_plot function
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
# Don't have to check for the success of importing matplotlib in each case;
# we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
self.matplotlib = import_module('matplotlib',
import_kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
aspect = getattr(self.parent, 'aspect_ratio', 'auto')
if aspect != 'auto':
aspect = float(aspect[1]) / aspect[0]
if isinstance(self.parent, Plot):
nrows, ncolumns = 1, 1
series_list = [self.parent._series]
elif isinstance(self.parent, PlotGrid):
nrows, ncolumns = self.parent.nrows, self.parent.ncolumns
series_list = self.parent._series
self.ax = []
self.fig = self.plt.figure()
for i, series in enumerate(series_list):
are_3D = [s.is_3D for s in series]
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif all(are_3D):
# mpl_toolkits.mplot3d is necessary for
# projection='3d'
mpl_toolkits = import_module('mpl_toolkits', # noqa
import_kwargs={'fromlist': ['mplot3d']})
self.ax.append(self.fig.add_subplot(nrows, ncolumns, i + 1, projection='3d', aspect=aspect))
elif not any(are_3D):
self.ax.append(self.fig.add_subplot(nrows, ncolumns, i + 1, aspect=aspect))
self.ax[i].spines['left'].set_position('zero')
self.ax[i].spines['right'].set_color('none')
self.ax[i].spines['bottom'].set_position('zero')
self.ax[i].spines['top'].set_color('none')
self.ax[i].spines['left'].set_smart_bounds(True)
self.ax[i].spines['bottom'].set_smart_bounds(False)
self.ax[i].xaxis.set_ticks_position('bottom')
self.ax[i].yaxis.set_ticks_position('left')
def _process_series(self, series, ax, parent):
for s in series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
ax.add_collection(collection)
elif s.is_contour:
ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
import_kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
ax.add_collection(collection)
x, y, z = s.get_points()
ax.set_xlim((min(x), max(x)))
ax.set_ylim((min(y), max(y)))
ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = ax.plot_surface(x, y, z,
cmap=getattr(self.cm, 'viridis', self.cm.jet),
rstride=1, cstride=1, linewidth=0.1)
elif s.is_implicit:
# Smart bounds have to be set to False for implicit plots.
ax.spines['left'].set_smart_bounds(False)
ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
# interval math plotting
x, y = _matplotlib_list(points[0])
ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
# XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
ax.contour(xarray, yarray, zarray, cmap=colormap)
else:
ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
import_kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(ax, Axes3D):
ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(ax, Axes3D):
ax.set_yscale(parent.yscale)
if not isinstance(ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(ax, Axes3D):
pass
elif val == 'center':
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = ax.get_xlim()
yl, yh = ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
ax.spines['left'].set_position(pos_left)
ax.spines['bottom'].set_position(pos_bottom)
else:
ax.spines['left'].set_position(('data', val[0]))
ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
ax.set_axis_off()
if parent.legend:
if ax.legend():
ax.legend_.set_visible(parent.legend)
if parent.margin:
ax.set_xmargin(parent.margin)
ax.set_ymargin(parent.margin)
if parent.title:
ax.set_title(parent.title)
if parent.xlabel:
ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
ax.set_ylabel(parent.ylabel, position=(0, 1))
if parent.annotations:
for a in parent.annotations:
ax.annotate(**a)
if parent.markers:
for marker in parent.markers:
# make a copy of the marker dictionary
# so that it doesn't get altered
m = marker.copy()
args = m.pop('args')
ax.plot(*args, **m)
if parent.rectangles:
for r in parent.rectangles:
rect = self.matplotlib.patches.Rectangle(**r)
ax.add_patch(rect)
if parent.fill:
ax.fill_between(**parent.fill)
# xlim and ylim shoulld always be set at last so that plot limits
# doesn't get altered during the process.
if parent.xlim:
from sympy.core.basic import Basic
xlim = parent.xlim
if any(isinstance(i, Basic) and not i.is_real for i in xlim):
raise ValueError(
"All numbers from xlim={} must be real".format(xlim))
if any(isinstance(i, Basic) and not i.is_finite for i in xlim):
raise ValueError(
"All numbers from xlim={} must be finite".format(xlim))
xlim = (float(i) for i in xlim)
ax.set_xlim(xlim)
else:
if parent._series and all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
ax.set_xlim(min(starts), max(ends))
if parent.ylim:
from sympy.core.basic import Basic
ylim = parent.ylim
if any(isinstance(i,Basic) and not i.is_real for i in ylim):
raise ValueError(
"All numbers from ylim={} must be real".format(ylim))
if any(isinstance(i,Basic) and not i.is_finite for i in ylim):
raise ValueError(
"All numbers from ylim={} must be finite".format(ylim))
ylim = (float(i) for i in ylim)
ax.set_ylim(ylim)
def process_series(self):
"""
Iterates over every ``Plot`` object and further calls
_process_series()
"""
parent = self.parent
if isinstance(parent, Plot):
series_list = [parent._series]
else:
series_list = parent._series
for i, (series, ax) in enumerate(zip(series_list, self.ax)):
if isinstance(self.parent, PlotGrid):
parent = self.parent.args[i]
self._process_series(series, ax, parent)
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.fig.tight_layout()
self.plt.show()
else:
self.close()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if not _show:
return
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.mean(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.mean(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1:],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float)
vector_b = (z - y).astype(np.float)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for :obj:`LineOver1DRangeSeries` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
``annotations``: list. A list of dictionaries specifying the type of
annotation required. The keys in the dictionary should be equivalent
to the arguments of the matplotlib's annotate() function.
``markers``: list. A list of dictionaries specifying the type the
markers required. The keys in the dictionary should be equivalent
to the arguments of the matplotlib's plot() function along with the
marker related keyworded arguments.
``rectangles``: list. A list of dictionaries specifying the dimensions
of the rectangles to be plotted. The keys in the dictionary should be
equivalent to the arguments of the matplotlib's patches.Rectangle class.
``fill``: dict. A dictionary specifying the type of color filling
required in the plot. The keys in the dictionary should be equivalent
to the arguments of the matplotlib's fill_between() function.
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot_contour(*args, **kwargs):
"""
Draws contour plot of a function
Usage
=====
Single plot
``plot_contour(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot_contour(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_contour((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``ContourSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
See Also
========
Plot, ContourSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
plot_expr = check_arguments(args, 1, 2)
series = [ContourSeries(*arg) for arg in plot_expr]
plot_contours = Plot(*series, **kwargs)
if len(plot_expr[0].free_symbols) > 2:
raise ValueError('Contour Plot cannot Plot for more than two variables.')
if show:
plot_contours.show()
return plot_contours
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if not args:
return []
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
# Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(nb_of_free_symbols - len(free_symbols)):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
# Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
|
82bd1368f69672a4779f1ddd7a665f8991554bc85a0815d03eedeb44c249bcfe | from __future__ import print_function, division
from sympy.core.numbers import Float
from sympy.core.symbol import Dummy
from sympy.utilities.lambdify import lambdify
import math
def is_valid(x):
"""Check if a floating point number is valid"""
if x is None:
return False
if isinstance(x, complex):
return False
return not math.isinf(x) and not math.isnan(x)
def rescale(y, W, H, mi, ma):
"""Rescale the given array `y` to fit into the integer values
between `0` and `H-1` for the values between ``mi`` and ``ma``.
"""
y_new = list()
norm = ma - mi
offset = (ma + mi) / 2
for x in range(W):
if is_valid(y[x]):
normalized = (y[x] - offset) / norm
if not is_valid(normalized):
y_new.append(None)
else:
# XXX There are some test failings because of the
# difference between the python 2 and 3 rounding.
rescaled = Float((normalized*H + H/2) * (H-1)/H).round()
rescaled = int(rescaled)
y_new.append(rescaled)
else:
y_new.append(None)
return y_new
def linspace(start, stop, num):
return [start + (stop - start) * x / (num-1) for x in range(num)]
def textplot_str(expr, a, b, W=55, H=18):
"""Generator for the lines of the plot"""
free = expr.free_symbols
if len(free) > 1:
raise ValueError(
"The expression must have a single variable. (Got {})"
.format(free))
x = free.pop() if free else Dummy()
f = lambdify([x], expr)
a = float(a)
b = float(b)
# Calculate function values
x = linspace(a, b, W)
y = list()
for val in x:
try:
y.append(f(val))
# Not sure what exceptions to catch here or why...
except (ValueError, TypeError, ZeroDivisionError):
y.append(None)
# Normalize height to screen space
y_valid = list(filter(is_valid, y))
if y_valid:
ma = max(y_valid)
mi = min(y_valid)
if ma == mi:
if ma:
mi, ma = sorted([0, 2*ma])
else:
mi, ma = -1, 1
else:
mi, ma = -1, 1
y = rescale(y, W, H, mi, ma)
y_bins = linspace(mi, ma, H)
# Draw plot
margin = 7
for h in range(H - 1, -1, -1):
s = [' '] * W
for i in range(W):
if y[i] == h:
if (i == 0 or y[i - 1] == h - 1) and (i == W - 1 or y[i + 1] == h + 1):
s[i] = '/'
elif (i == 0 or y[i - 1] == h + 1) and (i == W - 1 or y[i + 1] == h - 1):
s[i] = '\\'
else:
s[i] = '.'
# Print y values
if h in (0, H//2, H - 1):
prefix = ("%g" % y_bins[h]).rjust(margin)[:margin]
else:
prefix = " "*margin
s = "".join(s)
if h == H//2:
s = s.replace(" ", "-")
yield prefix + " | " + s
# Print x values
bottom = " " * (margin + 3)
bottom += ("%g" % x[0]).ljust(W//2)
if W % 2 == 1:
bottom += ("%g" % x[W//2]).ljust(W//2)
else:
bottom += ("%g" % x[W//2]).ljust(W//2-1)
bottom += "%g" % x[-1]
yield bottom
def textplot(expr, a, b, W=55, H=18):
r"""
Print a crude ASCII art plot of the SymPy expression 'expr' (which
should contain a single symbol, e.g. x or something else) over the
interval [a, b].
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.plotting import textplot
>>> t = Symbol('t')
>>> textplot(sin(t)*t, 0, 15)
14.1605 | ...
| .
| .
| . .
| ..
| / .. .
| / .
| /
2.30284 | ------...---------------/--------.------------.--------
| .... ... /
| .. \ / . .
| .. / .
| .. / .
| ... .
| .
| .
| \ .
-11.037 | ...
0 7.5 15
"""
for line in textplot_str(expr, a, b, W, H):
print(line)
|
3cc267ec41f230dfe79c865647f57c0056dae13ad826d0eea1e85dbfcac789ce | """ rewrite of lambdify - This stuff is not stable at all.
It is for internal use in the new plotting module.
It may (will! see the Q'n'A in the source) be rewritten.
It's completely self contained. Especially it does not use lambdarepr.
It does not aim to replace the current lambdify. Most importantly it will never
ever support anything else than sympy expressions (no Matrices, dictionaries
and so on).
"""
from __future__ import print_function, division
import re
from sympy import Symbol, NumberSymbol, I, zoo, oo
from sympy.core.compatibility import exec_
from sympy.utilities.iterables import numbered_symbols
# We parse the expression string into a tree that identifies functions. Then
# we translate the names of the functions and we translate also some strings
# that are not names of functions (all this according to translation
# dictionaries).
# If the translation goes to another module (like numpy) the
# module is imported and 'func' is translated to 'module.func'.
# If a function can not be translated, the inner nodes of that part of the
# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not
# translated to np.sqrt and the Integral does not crash.
# A namespace for all this is generated by crawling the (func, args) tree of
# the expression. The creation of this namespace involves many ugly
# workarounds.
# The namespace consists of all the names needed for the sympy expression and
# all the name of modules used for translation. Those modules are imported only
# as a name (import numpy as np) in order to keep the namespace small and
# manageable.
# Please, if there is a bug, do not try to fix it here! Rewrite this by using
# the method proposed in the last Q'n'A below. That way the new function will
# work just as well, be just as simple, but it wont need any new workarounds.
# If you insist on fixing it here, look at the workarounds in the function
# sympy_expression_namespace and in lambdify.
# Q: Why are you not using python abstract syntax tree?
# A: Because it is more complicated and not much more powerful in this case.
# Q: What if I have Symbol('sin') or g=Function('f')?
# A: You will break the algorithm. We should use srepr to defend against this?
# The problem with Symbol('sin') is that it will be printed as 'sin'. The
# parser will distinguish it from the function 'sin' because functions are
# detected thanks to the opening parenthesis, but the lambda expression won't
# understand the difference if we have also the sin function.
# The solution (complicated) is to use srepr and maybe ast.
# The problem with the g=Function('f') is that it will be printed as 'f' but in
# the global namespace we have only 'g'. But as the same printer is used in the
# constructor of the namespace there will be no problem.
# Q: What if some of the printers are not printing as expected?
# A: The algorithm wont work. You must use srepr for those cases. But even
# srepr may not print well. All problems with printers should be considered
# bugs.
# Q: What about _imp_ functions?
# A: Those are taken care for by evalf. A special case treatment will work
# faster but it's not worth the code complexity.
# Q: Will ast fix all possible problems?
# A: No. You will always have to use some printer. Even srepr may not work in
# some cases. But if the printer does not work, that should be considered a
# bug.
# Q: Is there same way to fix all possible problems?
# A: Probably by constructing our strings ourself by traversing the (func,
# args) tree and creating the namespace at the same time. That actually sounds
# good.
from sympy.external import import_module
import warnings
#TODO debugging output
class vectorized_lambdify(object):
""" Return a sufficiently smart, vectorized and lambdified function.
Returns only reals.
This function uses experimental_lambdify to created a lambdified
expression ready to be used with numpy. Many of the functions in sympy
are not implemented in numpy so in some cases we resort to python cmath or
even to evalf.
The following translations are tried:
only numpy complex
- on errors raised by sympy trying to work with ndarray:
only python cmath and then vectorize complex128
When using python cmath there is no need for evalf or float/complex
because python cmath calls those.
This function never tries to mix numpy directly with evalf because numpy
does not understand sympy Float. If this is needed one can use the
float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or
better one can be explicit about the dtypes that numpy works with.
Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what
types of errors to expect.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_np=True)
self.vector_func = self.lambda_func
self.failure = False
def __call__(self, *args):
np = import_module('numpy')
np_old_err = np.seterr(invalid='raise')
try:
temp_args = (np.array(a, dtype=np.complex) for a in args)
results = self.vector_func(*temp_args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
except Exception as e:
#DEBUG: print 'Error', type(e), e
if ((isinstance(e, TypeError)
and 'unhashable type: \'numpy.ndarray\'' in str(e))
or
(isinstance(e, ValueError)
and ('Invalid limits given:' in str(e)
or 'negative dimensions are not allowed' in str(e) # XXX
or 'sequence too large; must be smaller than 32' in str(e)))): # XXX
# Almost all functions were translated to numpy, but some were
# left as sympy functions. They received an ndarray as an
# argument and failed.
# sin(ndarray(...)) raises "unhashable type"
# Integral(x, (x, 0, ndarray(...))) raises "Invalid limits"
# other ugly exceptions that are not well understood (marked with XXX)
# TODO: Cleanup the ugly special cases marked with xxx above.
# Solution: use cmath and vectorize the final lambda.
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_python_cmath=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
else:
# Complete failure. One last try with no translations, only
# wrapping in complex((...).evalf()) and returning the real
# part.
if self.failure:
raise e
else:
self.failure = True
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_evalf=True,
complex_wrap_evalf=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
finally:
np.seterr(**np_old_err)
return results
class lambdify(object):
"""Returns the lambdified function.
This function uses experimental_lambdify to create a lambdified
expression. It uses cmath to lambdify the expression. If the function
is not implemented in python cmath, python cmath calls evalf on those
functions.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_evalf=True,
use_python_cmath=True)
self.failure = False
def __call__(self, args, kwargs = {}):
if not self.lambda_func.use_python_math:
args = complex(args)
try:
#The result can be sympy.Float. Hence wrap it with complex type.
result = complex(self.lambda_func(args))
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
except Exception as e:
# The exceptions raised by sympy, cmath are not consistent and
# hence it is not possible to specify all the exceptions that
# are to be caught. Presently there are no cases for which the code
# reaches this block other than ZeroDivisionError and complex
# comparison. Also the exception is caught only once. If the
# exception repeats itself,
# then it is not caught and the corresponding error is raised.
# XXX: Remove catching all exceptions once the plotting module
# is heavily tested.
if isinstance(e, ZeroDivisionError):
return None
elif isinstance(e, TypeError) and ('no ordering relation is'
' defined for complex numbers'
in str(e) or 'unorderable '
'types' in str(e) or "not "
"supported between instances of"
in str(e)):
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
use_python_math=True)
result = self.lambda_func(args.real)
return result
else:
if self.failure:
raise e
#Failure
#Try wrapping it with complex(..).evalf()
self.failure = True
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
complex_wrap_evalf=True)
result = self.lambda_func(args)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
def experimental_lambdify(*args, **kwargs):
l = Lambdifier(*args, **kwargs)
return l
class Lambdifier(object):
def __init__(self, args, expr, print_lambda=False, use_evalf=False,
float_wrap_evalf=False, complex_wrap_evalf=False,
use_np=False, use_python_math=False, use_python_cmath=False,
use_interval=False):
self.print_lambda = print_lambda
self.use_evalf = use_evalf
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
self.use_np = use_np
self.use_python_math = use_python_math
self.use_python_cmath = use_python_cmath
self.use_interval = use_interval
# Constructing the argument string
# - check
if not all([isinstance(a, Symbol) for a in args]):
raise ValueError('The arguments must be Symbols.')
# - use numbered symbols
syms = numbered_symbols(exclude=expr.free_symbols)
newargs = [next(syms) for _ in args]
expr = expr.xreplace(dict(zip(args, newargs)))
argstr = ', '.join([str(a) for a in newargs])
del syms, newargs, args
# Constructing the translation dictionaries and making the translation
self.dict_str = self.get_dict_str()
self.dict_fun = self.get_dict_fun()
exprstr = str(expr)
newexpr = self.tree2str_translate(self.str2tree(exprstr))
# Constructing the namespaces
namespace = {}
namespace.update(self.sympy_atoms_namespace(expr))
namespace.update(self.sympy_expression_namespace(expr))
# XXX Workaround
# Ugly workaround because Pow(a,Half) prints as sqrt(a)
# and sympy_expression_namespace can not catch it.
from sympy import sqrt
namespace.update({'sqrt': sqrt})
namespace.update({'Eq': lambda x, y: x == y})
namespace.update({'Ne': lambda x, y: x != y})
# End workaround.
if use_python_math:
namespace.update({'math': __import__('math')})
if use_python_cmath:
namespace.update({'cmath': __import__('cmath')})
if use_np:
try:
namespace.update({'np': __import__('numpy')})
except ImportError:
raise ImportError(
'experimental_lambdify failed to import numpy.')
if use_interval:
namespace.update({'imath': __import__(
'sympy.plotting.intervalmath', fromlist=['intervalmath'])})
namespace.update({'math': __import__('math')})
# Construct the lambda
if self.print_lambda:
print(newexpr)
eval_str = 'lambda %s : ( %s )' % (argstr, newexpr)
self.eval_str = eval_str
exec_("from __future__ import division; MYNEWLAMBDA = %s" % eval_str, namespace)
self.lambda_func = namespace['MYNEWLAMBDA']
def __call__(self, *args, **kwargs):
return self.lambda_func(*args, **kwargs)
##############################################################################
# Dicts for translating from sympy to other modules
##############################################################################
###
# builtins
###
# Functions with different names in builtins
builtin_functions_different = {
'Min': 'min',
'Max': 'max',
'Abs': 'abs',
}
# Strings that should be translated
builtin_not_functions = {
'I': '1j',
# 'oo': '1e400',
}
###
# numpy
###
# Functions that are the same in numpy
numpy_functions_same = [
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'exp', 'log',
'sqrt', 'floor', 'conjugate',
]
# Functions with different names in numpy
numpy_functions_different = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"im": "imag",
"ln": "log",
"Max": "amax",
"Min": "amin",
"re": "real",
"Abs": "abs",
}
# Strings that should be translated
numpy_not_functions = {
'pi': 'np.pi',
'oo': 'np.inf',
'E': 'np.e',
}
###
# python math
###
# Functions that are the same in math
math_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'erf', 'sqrt', 'floor', 'factorial', 'gamma',
]
# Functions with different names in math
math_functions_different = {
'ceiling': 'ceil',
'ln': 'log',
'loggamma': 'lgamma'
}
# Strings that should be translated
math_not_functions = {
'pi': 'math.pi',
'E': 'math.e',
}
###
# python cmath
###
# Functions that are the same in cmath
cmath_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'sqrt',
]
# Functions with different names in cmath
cmath_functions_different = {
'ln': 'log',
'arg': 'phase',
}
# Strings that should be translated
cmath_not_functions = {
'pi': 'cmath.pi',
'E': 'cmath.e',
}
###
# intervalmath
###
interval_not_functions = {
'pi': 'math.pi',
'E': 'math.e'
}
interval_functions_same = [
'sin', 'cos', 'exp', 'tan', 'atan', 'log',
'sqrt', 'cosh', 'sinh', 'tanh', 'floor',
'acos', 'asin', 'acosh', 'asinh', 'atanh',
'Abs', 'And', 'Or'
]
interval_functions_different = {
'Min': 'imin',
'Max': 'imax',
'ceiling': 'ceil',
}
###
# mpmath, etc
###
#TODO
###
# Create the final ordered tuples of dictionaries
###
# For strings
def get_dict_str(self):
dict_str = dict(self.builtin_not_functions)
if self.use_np:
dict_str.update(self.numpy_not_functions)
if self.use_python_math:
dict_str.update(self.math_not_functions)
if self.use_python_cmath:
dict_str.update(self.cmath_not_functions)
if self.use_interval:
dict_str.update(self.interval_not_functions)
return dict_str
# For functions
def get_dict_fun(self):
dict_fun = dict(self.builtin_functions_different)
if self.use_np:
for s in self.numpy_functions_same:
dict_fun[s] = 'np.' + s
for k, v in self.numpy_functions_different.items():
dict_fun[k] = 'np.' + v
if self.use_python_math:
for s in self.math_functions_same:
dict_fun[s] = 'math.' + s
for k, v in self.math_functions_different.items():
dict_fun[k] = 'math.' + v
if self.use_python_cmath:
for s in self.cmath_functions_same:
dict_fun[s] = 'cmath.' + s
for k, v in self.cmath_functions_different.items():
dict_fun[k] = 'cmath.' + v
if self.use_interval:
for s in self.interval_functions_same:
dict_fun[s] = 'imath.' + s
for k, v in self.interval_functions_different.items():
dict_fun[k] = 'imath.' + v
return dict_fun
##############################################################################
# The translator functions, tree parsers, etc.
##############################################################################
def str2tree(self, exprstr):
"""Converts an expression string to a tree.
Functions are represented by ('func_name(', tree_of_arguments).
Other expressions are (head_string, mid_tree, tail_str).
Expressions that do not contain functions are directly returned.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> str2tree(str(Integral(x, (x, 1, y))))
('', ('Integral(', 'x, (x, 1, y)'), ')')
>>> str2tree(str(x+y))
'x + y'
>>> str2tree(str(x+y*sin(z)+1))
('x + y*', ('sin(', 'z'), ') + 1')
>>> str2tree('sin(y*(y + 1.1) + (sin(y)))')
('', ('sin(', ('y*(y + 1.1) + (', ('sin(', 'y'), '))')), ')')
"""
#matches the first 'function_name('
first_par = re.search(r'(\w+\()', exprstr)
if first_par is None:
return exprstr
else:
start = first_par.start()
end = first_par.end()
head = exprstr[:start]
func = exprstr[start:end]
tail = exprstr[end:]
count = 0
for i, c in enumerate(tail):
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == -1:
break
func_tail = self.str2tree(tail[:i])
tail = self.str2tree(tail[i:])
return (head, (func, func_tail), tail)
@classmethod
def tree2str(cls, tree):
"""Converts a tree to string without translations.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> tree2str = Lambdifier([x], x).tree2str
>>> tree2str(str2tree(str(x+y*sin(z)+1)))
'x + y*sin(z) + 1'
"""
if isinstance(tree, str):
return tree
else:
return ''.join(map(cls.tree2str, tree))
def tree2str_translate(self, tree):
"""Converts a tree to string with translations.
Function names are translated by translate_func.
Other strings are translated by translate_str.
"""
if isinstance(tree, str):
return self.translate_str(tree)
elif isinstance(tree, tuple) and len(tree) == 2:
return self.translate_func(tree[0][:-1], tree[1])
else:
return ''.join([self.tree2str_translate(t) for t in tree])
def translate_str(self, estr):
"""Translate substrings of estr using in order the dictionaries in
dict_tuple_str."""
for pattern, repl in self.dict_str.items():
estr = re.sub(pattern, repl, estr)
return estr
def translate_func(self, func_name, argtree):
"""Translate function names and the tree of arguments.
If the function name is not in the dictionaries of dict_tuple_fun then the
function is surrounded by a float((...).evalf()).
The use of float is necessary as np.<function>(sympy.Float(..)) raises an
error."""
if func_name in self.dict_fun:
new_name = self.dict_fun[func_name]
argstr = self.tree2str_translate(argtree)
return new_name + '(' + argstr
elif func_name in ['Eq', 'Ne']:
op = {'Eq': '==', 'Ne': '!='}
return "(lambda x, y: x {} y)({}".format(op[func_name], self.tree2str_translate(argtree))
else:
template = '(%s(%s)).evalf(' if self.use_evalf else '%s(%s'
if self.float_wrap_evalf:
template = 'float(%s)' % template
elif self.complex_wrap_evalf:
template = 'complex(%s)' % template
# Wrapping should only happen on the outermost expression, which
# is the only thing we know will be a number.
float_wrap_evalf = self.float_wrap_evalf
complex_wrap_evalf = self.complex_wrap_evalf
self.float_wrap_evalf = False
self.complex_wrap_evalf = False
ret = template % (func_name, self.tree2str_translate(argtree))
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
return ret
##############################################################################
# The namespace constructors
##############################################################################
@classmethod
def sympy_expression_namespace(cls, expr):
"""Traverses the (func, args) tree of an expression and creates a sympy
namespace. All other modules are imported only as a module name. That way
the namespace is not polluted and rests quite small. It probably causes much
more variable lookups and so it takes more time, but there are no tests on
that for the moment."""
if expr is None:
return {}
else:
funcname = str(expr.func)
# XXX Workaround
# Here we add an ugly workaround because str(func(x))
# is not always the same as str(func). Eg
# >>> str(Integral(x))
# "Integral(x)"
# >>> str(Integral)
# "<class 'sympy.integrals.integrals.Integral'>"
# >>> str(sqrt(x))
# "sqrt(x)"
# >>> str(sqrt)
# "<function sqrt at 0x3d92de8>"
# >>> str(sin(x))
# "sin(x)"
# >>> str(sin)
# "sin"
# Either one of those can be used but not all at the same time.
# The code considers the sin example as the right one.
regexlist = [
r'<class \'sympy[\w.]*?.([\w]*)\'>$',
# the example Integral
r'<function ([\w]*) at 0x[\w]*>$', # the example sqrt
]
for r in regexlist:
m = re.match(r, funcname)
if m is not None:
funcname = m.groups()[0]
# End of the workaround
# XXX debug: print funcname
args_dict = {}
for a in expr.args:
if (isinstance(a, Symbol) or
isinstance(a, NumberSymbol) or
a in [I, zoo, oo]):
continue
else:
args_dict.update(cls.sympy_expression_namespace(a))
args_dict.update({funcname: expr.func})
return args_dict
@staticmethod
def sympy_atoms_namespace(expr):
"""For no real reason this function is separated from
sympy_expression_namespace. It can be moved to it."""
atoms = expr.atoms(Symbol, NumberSymbol, I, zoo, oo)
d = {}
for a in atoms:
# XXX debug: print 'atom:' + str(a)
d[str(a)] = a
return d
|
6af8688add823501486dd497c4d47a1d36cb15a7a1bb030e93fbafb480bd1b38 | from __future__ import unicode_literals
from sympy import (S, Symbol, Interval, exp,
symbols, Eq, cos, And, Tuple, integrate, oo, sin, Sum, Basic,
DiracDelta, Lambda, log, pi, FallingFactorial, Rational)
from sympy.stats import (Die, Normal, Exponential, FiniteRV, P, E, H, variance,
density, given, independent, dependent, where, pspace,
random_symbols, sample, Geometric, factorial_moment, Binomial, Hypergeometric,
DiscreteUniform, Poisson, characteristic_function, moment_generating_function)
from sympy.stats.rv import (IndependentProductPSpace, rs_swap, Density, NamedArgsMixin,
RandomSymbol, sample_iter, PSpace)
from sympy.testing.pytest import raises
from sympy.core.numbers import comp
from sympy.stats.frv_types import BernoulliDistribution
def test_where():
X, Y = Die('X'), Die('Y')
Z = Normal('Z', 0, 1)
assert where(Z**2 <= 1).set == Interval(-1, 1)
assert where(Z**2 <= 1).as_boolean() == Interval(-1, 1).as_relational(Z.symbol)
assert where(And(X > Y, Y > 4)).as_boolean() == And(
Eq(X.symbol, 6), Eq(Y.symbol, 5))
assert len(where(X < 3).set) == 2
assert 1 in where(X < 3).set
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
assert where(And(X**2 <= 1, X >= 0)).set == Interval(0, 1)
XX = given(X, And(X**2 <= 1, X >= 0))
assert XX.pspace.domain.set == Interval(0, 1)
assert XX.pspace.domain.as_boolean() == \
And(0 <= X.symbol, X.symbol**2 <= 1, -oo < X.symbol, X.symbol < oo)
with raises(TypeError):
XX = given(X, X + 3)
def test_random_symbols():
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
assert set(random_symbols(2*X + 1)) == set((X,))
assert set(random_symbols(2*X + Y)) == set((X, Y))
assert set(random_symbols(2*X + Y.symbol)) == set((X,))
assert set(random_symbols(2)) == set()
def test_characteristic_function():
# Imports I from sympy
from sympy import I
X = Normal('X',0,1)
Y = DiscreteUniform('Y', [1,2,7])
Z = Poisson('Z', 2)
t = symbols('_t')
P = Lambda(t, exp(-t**2/2))
Q = Lambda(t, exp(7*t*I)/3 + exp(2*t*I)/3 + exp(t*I)/3)
R = Lambda(t, exp(2 * exp(t*I) - 2))
assert characteristic_function(X) == P
assert characteristic_function(Y) == Q
assert characteristic_function(Z) == R
def test_moment_generating_function():
X = Normal('X',0,1)
Y = DiscreteUniform('Y', [1,2,7])
Z = Poisson('Z', 2)
t = symbols('_t')
P = Lambda(t, exp(t**2/2))
Q = Lambda(t, (exp(7*t)/3 + exp(2*t)/3 + exp(t)/3))
R = Lambda(t, exp(2 * exp(t) - 2))
assert moment_generating_function(X) == P
assert moment_generating_function(Y) == Q
assert moment_generating_function(Z) == R
def test_sample_iter():
X = Normal('X',0,1)
Y = DiscreteUniform('Y', [1,2,7])
Z = Poisson('Z', 2)
expr = X**2 + 3
iterator = sample_iter(expr)
expr2 = Y**2 + 5*Y + 4
iterator2 = sample_iter(expr2)
expr3 = Z**3 + 4
iterator3 = sample_iter(expr3)
def is_iterator(obj):
if (
hasattr(obj, '__iter__') and
(hasattr(obj, 'next') or
hasattr(obj, '__next__')) and
callable(obj.__iter__) and
obj.__iter__() is obj
):
return True
else:
return False
assert is_iterator(iterator)
assert is_iterator(iterator2)
assert is_iterator(iterator3)
def test_pspace():
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
x = Symbol('x')
raises(ValueError, lambda: pspace(5 + 3))
raises(ValueError, lambda: pspace(x < 1))
assert pspace(X) == X.pspace
assert pspace(2*X + 1) == X.pspace
assert pspace(2*X + Y) == IndependentProductPSpace(Y.pspace, X.pspace)
def test_rs_swap():
X = Normal('x', 0, 1)
Y = Exponential('y', 1)
XX = Normal('x', 0, 2)
YY = Normal('y', 0, 3)
expr = 2*X + Y
assert expr.subs(rs_swap((X, Y), (YY, XX))) == 2*XX + YY
def test_RandomSymbol():
X = Normal('x', 0, 1)
Y = Normal('x', 0, 2)
assert X.symbol == Y.symbol
assert X != Y
assert X.name == X.symbol.name
X = Normal('lambda', 0, 1) # make sure we can use protected terms
X = Normal('Lambda', 0, 1) # make sure we can use SymPy terms
def test_RandomSymbol_diff():
X = Normal('x', 0, 1)
assert (2*X).diff(X)
def test_random_symbol_no_pspace():
x = RandomSymbol(Symbol('x'))
assert x.pspace == PSpace()
def test_overlap():
X = Normal('x', 0, 1)
Y = Normal('x', 0, 2)
raises(ValueError, lambda: P(X > Y))
def test_IndependentProductPSpace():
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 1)
px = X.pspace
py = Y.pspace
assert pspace(X + Y) == IndependentProductPSpace(px, py)
assert pspace(X + Y) == IndependentProductPSpace(py, px)
def test_E():
assert E(5) == 5
def test_H():
X = Normal('X', 0, 1)
D = Die('D', sides = 4)
G = Geometric('G', 0.5)
assert H(X, X > 0) == -log(2)/2 + S.Half + log(pi)/2
assert H(D, D > 2) == log(2)
assert comp(H(G).evalf().round(2), 1.39)
def test_Sample():
X = Die('X', 6)
Y = Normal('Y', 0, 1)
z = Symbol('z')
assert sample(X) in [1, 2, 3, 4, 5, 6]
assert sample(X + Y).is_Float
P(X + Y > 0, Y < 0, numsamples=10).is_number
assert E(X + Y, numsamples=10).is_number
assert variance(X + Y, numsamples=10).is_number
raises(ValueError, lambda: P(Y > z, numsamples=5))
assert P(sin(Y) <= 1, numsamples=10) == 1
assert P(sin(Y) <= 1, cos(Y) < 1, numsamples=10) == 1
# Make sure this doesn't raise an error
E(Sum(1/z**Y, (z, 1, oo)), Y > 2, numsamples=3)
assert all(i in range(1, 7) for i in density(X, numsamples=10))
assert all(i in range(4, 7) for i in density(X, X>3, numsamples=10))
def test_given():
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 1)
A = given(X, True)
B = given(X, Y > 2)
assert X == A == B
def test_factorial_moment():
X = Poisson('X', 2)
Y = Binomial('Y', 2, S.Half)
Z = Hypergeometric('Z', 4, 2, 2)
assert factorial_moment(X, 2) == 4
assert factorial_moment(Y, 2) == S.Half
assert factorial_moment(Z, 2) == Rational(1, 3)
x, y, z, l = symbols('x y z l')
Y = Binomial('Y', 2, y)
Z = Hypergeometric('Z', 10, 2, 3)
assert factorial_moment(Y, l) == y**2*FallingFactorial(
2, l) + 2*y*(1 - y)*FallingFactorial(1, l) + (1 - y)**2*\
FallingFactorial(0, l)
assert factorial_moment(Z, l) == 7*FallingFactorial(0, l)/\
15 + 7*FallingFactorial(1, l)/15 + FallingFactorial(2, l)/15
def test_dependence():
X, Y = Die('X'), Die('Y')
assert independent(X, 2*Y)
assert not dependent(X, 2*Y)
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
assert independent(X, Y)
assert dependent(X, 2*X)
# Create a dependency
XX, YY = given(Tuple(X, Y), Eq(X + Y, 3))
assert dependent(XX, YY)
def test_dependent_finite():
X, Y = Die('X'), Die('Y')
# Dependence testing requires symbolic conditions which currently break
# finite random variables
assert dependent(X, Y + X)
XX, YY = given(Tuple(X, Y), X + Y > 5) # Create a dependency
assert dependent(XX, YY)
def test_normality():
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
x = Symbol('x', real=True, finite=True)
z = Symbol('z', real=True, finite=True)
dens = density(X - Y, Eq(X + Y, z))
assert integrate(dens(x), (x, -oo, oo)) == 1
def test_Density():
X = Die('X', 6)
d = Density(X)
assert d.doit() == density(X)
def test_NamedArgsMixin():
class Foo(Basic, NamedArgsMixin):
_argnames = 'foo', 'bar'
a = Foo(1, 2)
assert a.foo == 1
assert a.bar == 2
raises(AttributeError, lambda: a.baz)
class Bar(Basic, NamedArgsMixin):
pass
raises(AttributeError, lambda: Bar(1, 2).foo)
def test_density_constant():
assert density(3)(2) == 0
assert density(3)(3) == DiracDelta(0)
def test_real():
x = Normal('x', 0, 1)
assert x.is_real
def test_issue_10052():
X = Exponential('X', 3)
assert P(X < oo) == 1
assert P(X > oo) == 0
assert P(X < 2, X > oo) == 0
assert P(X < oo, X > oo) == 0
assert P(X < oo, X > 2) == 1
assert P(X < 3, X == 2) == 0
raises(ValueError, lambda: P(1))
raises(ValueError, lambda: P(X < 1, 2))
def test_issue_11934():
density = {0: .5, 1: .5}
X = FiniteRV('X', density)
assert E(X) == 0.5
assert P( X>= 2) == 0
def test_issue_8129():
X = Exponential('X', 4)
assert P(X >= X) == 1
assert P(X > X) == 0
assert P(X > X+1) == 0
def test_issue_12237():
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 1)
U = P(X > 0, X)
V = P(Y < 0, X)
W = P(X + Y > 0, X)
assert W == P(X + Y > 0, X)
assert U == BernoulliDistribution(S.Half, S.Zero, S.One)
assert V == S.Half
|
745b1bca727fb88c8ecab296f7f0691a3eff48ae5e888a55b7f78a7ab3ddf1f2 | from sympy import (FiniteSet, S, Symbol, sqrt, nan, beta, Rational, symbols,
simplify, Eq, cos, And, Tuple, Or, Dict, sympify, binomial,
cancel, exp, I, Piecewise, Sum, Dummy)
from sympy.external import import_module
from sympy.matrices import Matrix
from sympy.stats import (DiscreteUniform, Die, Bernoulli, Coin, Binomial, BetaBinomial,
Hypergeometric, Rademacher, P, E, variance, covariance, skewness,
sample, density, where, FiniteRV, pspace, cdf, correlation, moment,
cmoment, smoment, characteristic_function, moment_generating_function,
quantile, kurtosis, median)
from sympy.stats.frv_types import DieDistribution, BinomialDistribution, \
HypergeometricDistribution
from sympy.stats.rv import Density
from sympy.testing.pytest import raises, skip
def BayesTest(A, B):
assert P(A, B) == P(And(A, B)) / P(B)
assert P(A, B) == P(B, A) * P(A) / P(B)
def test_discreteuniform():
# Symbolic
a, b, c, t = symbols('a b c t')
X = DiscreteUniform('X', [a, b, c])
assert E(X) == (a + b + c)/3
assert simplify(variance(X)
- ((a**2 + b**2 + c**2)/3 - (a/3 + b/3 + c/3)**2)) == 0
assert P(Eq(X, a)) == P(Eq(X, b)) == P(Eq(X, c)) == S('1/3')
Y = DiscreteUniform('Y', range(-5, 5))
# Numeric
assert E(Y) == S('-1/2')
assert variance(Y) == S('33/4')
assert median(Y) == FiniteSet(-1, 0)
for x in range(-5, 5):
assert P(Eq(Y, x)) == S('1/10')
assert P(Y <= x) == S(x + 6)/10
assert P(Y >= x) == S(5 - x)/10
assert dict(density(Die('D', 6)).items()) == \
dict(density(DiscreteUniform('U', range(1, 7))).items())
assert characteristic_function(X)(t) == exp(I*a*t)/3 + exp(I*b*t)/3 + exp(I*c*t)/3
assert moment_generating_function(X)(t) == exp(a*t)/3 + exp(b*t)/3 + exp(c*t)/3
# issue 18611
raises(ValueError, lambda: DiscreteUniform('Z', [a, a, a, b, b, c]))
def test_dice():
# TODO: Make iid method!
X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6)
a, b, t, p = symbols('a b t p')
assert E(X) == 3 + S.Half
assert variance(X) == Rational(35, 12)
assert E(X + Y) == 7
assert E(X + X) == 7
assert E(a*X + b) == a*E(X) + b
assert variance(X + Y) == variance(X) + variance(Y) == cmoment(X + Y, 2)
assert variance(X + X) == 4 * variance(X) == cmoment(X + X, 2)
assert cmoment(X, 0) == 1
assert cmoment(4*X, 3) == 64*cmoment(X, 3)
assert covariance(X, Y) is S.Zero
assert covariance(X, X + Y) == variance(X)
assert density(Eq(cos(X*S.Pi), 1))[True] == S.Half
assert correlation(X, Y) == 0
assert correlation(X, Y) == correlation(Y, X)
assert smoment(X + Y, 3) == skewness(X + Y)
assert smoment(X + Y, 4) == kurtosis(X + Y)
assert smoment(X, 0) == 1
assert P(X > 3) == S.Half
assert P(2*X > 6) == S.Half
assert P(X > Y) == Rational(5, 12)
assert P(Eq(X, Y)) == P(Eq(X, 1))
assert E(X, X > 3) == 5 == moment(X, 1, 0, X > 3)
assert E(X, Y > 3) == E(X) == moment(X, 1, 0, Y > 3)
assert E(X + Y, Eq(X, Y)) == E(2*X)
assert moment(X, 0) == 1
assert moment(5*X, 2) == 25*moment(X, 2)
assert quantile(X)(p) == Piecewise((nan, (p > 1) | (p < 0)),\
(S.One, p <= Rational(1, 6)), (S(2), p <= Rational(1, 3)), (S(3), p <= S.Half),\
(S(4), p <= Rational(2, 3)), (S(5), p <= Rational(5, 6)), (S(6), p <= 1))
assert P(X > 3, X > 3) is S.One
assert P(X > Y, Eq(Y, 6)) is S.Zero
assert P(Eq(X + Y, 12)) == Rational(1, 36)
assert P(Eq(X + Y, 12), Eq(X, 6)) == Rational(1, 6)
assert density(X + Y) == density(Y + Z) != density(X + X)
d = density(2*X + Y**Z)
assert d[S(22)] == Rational(1, 108) and d[S(4100)] == Rational(1, 216) and S(3130) not in d
assert pspace(X).domain.as_boolean() == Or(
*[Eq(X.symbol, i) for i in [1, 2, 3, 4, 5, 6]])
assert where(X > 3).set == FiniteSet(4, 5, 6)
assert characteristic_function(X)(t) == exp(6*I*t)/6 + exp(5*I*t)/6 + exp(4*I*t)/6 + exp(3*I*t)/6 + exp(2*I*t)/6 + exp(I*t)/6
assert moment_generating_function(X)(t) == exp(6*t)/6 + exp(5*t)/6 + exp(4*t)/6 + exp(3*t)/6 + exp(2*t)/6 + exp(t)/6
assert median(X) == FiniteSet(3, 4)
D = Die('D', 7)
assert median(D) == FiniteSet(4)
# Bayes test for die
BayesTest(X > 3, X + Y < 5)
BayesTest(Eq(X - Y, Z), Z > Y)
BayesTest(X > 3, X > 2)
# arg test for die
raises(ValueError, lambda: Die('X', -1)) # issue 8105: negative sides.
raises(ValueError, lambda: Die('X', 0))
raises(ValueError, lambda: Die('X', 1.5)) # issue 8103: non integer sides.
# symbolic test for die
n, k = symbols('n, k', positive=True)
D = Die('D', n)
dens = density(D).dict
assert dens == Density(DieDistribution(n))
assert set(dens.subs(n, 4).doit().keys()) == set([1, 2, 3, 4])
assert set(dens.subs(n, 4).doit().values()) == set([Rational(1, 4)])
k = Dummy('k', integer=True)
assert E(D).dummy_eq(
Sum(Piecewise((k/n, k <= n), (0, True)), (k, 1, n)))
assert variance(D).subs(n, 6).doit() == Rational(35, 12)
ki = Dummy('ki')
cumuf = cdf(D)(k)
assert cumuf.dummy_eq(
Sum(Piecewise((1/n, (ki >= 1) & (ki <= n)), (0, True)), (ki, 1, k)))
assert cumuf.subs({n: 6, k: 2}).doit() == Rational(1, 3)
t = Dummy('t')
cf = characteristic_function(D)(t)
assert cf.dummy_eq(
Sum(Piecewise((exp(ki*I*t)/n, (ki >= 1) & (ki <= n)), (0, True)), (ki, 1, n)))
assert cf.subs(n, 3).doit() == exp(3*I*t)/3 + exp(2*I*t)/3 + exp(I*t)/3
mgf = moment_generating_function(D)(t)
assert mgf.dummy_eq(
Sum(Piecewise((exp(ki*t)/n, (ki >= 1) & (ki <= n)), (0, True)), (ki, 1, n)))
assert mgf.subs(n, 3).doit() == exp(3*t)/3 + exp(2*t)/3 + exp(t)/3
def test_given():
X = Die('X', 6)
assert density(X, X > 5) == {S(6): S.One}
assert where(X > 2, X > 5).as_boolean() == Eq(X.symbol, 6)
assert sample(X, X > 5) == 6
def test_domains():
X, Y = Die('x', 6), Die('y', 6)
x, y = X.symbol, Y.symbol
# Domains
d = where(X > Y)
assert d.condition == (x > y)
d = where(And(X > Y, Y > 3))
assert d.as_boolean() == Or(And(Eq(x, 5), Eq(y, 4)), And(Eq(x, 6),
Eq(y, 5)), And(Eq(x, 6), Eq(y, 4)))
assert len(d.elements) == 3
assert len(pspace(X + Y).domain.elements) == 36
Z = Die('x', 4)
raises(ValueError, lambda: P(X > Z)) # Two domains with same internal symbol
assert pspace(X + Y).domain.set == FiniteSet(1, 2, 3, 4, 5, 6)**2
assert where(X > 3).set == FiniteSet(4, 5, 6)
assert X.pspace.domain.dict == FiniteSet(
*[Dict({X.symbol: i}) for i in range(1, 7)])
assert where(X > Y).dict == FiniteSet(*[Dict({X.symbol: i, Y.symbol: j})
for i in range(1, 7) for j in range(1, 7) if i > j])
def test_bernoulli():
p, a, b, t = symbols('p a b t')
X = Bernoulli('B', p, a, b)
assert E(X) == a*p + b*(-p + 1)
assert density(X)[a] == p
assert density(X)[b] == 1 - p
assert characteristic_function(X)(t) == p * exp(I * a * t) + (-p + 1) * exp(I * b * t)
assert moment_generating_function(X)(t) == p * exp(a * t) + (-p + 1) * exp(b * t)
X = Bernoulli('B', p, 1, 0)
z = Symbol("z")
assert E(X) == p
assert simplify(variance(X)) == p*(1 - p)
assert E(a*X + b) == a*E(X) + b
assert simplify(variance(a*X + b)) == simplify(a**2 * variance(X))
assert quantile(X)(z) == Piecewise((nan, (z > 1) | (z < 0)), (0, z <= 1 - p), (1, z <= 1))
Y = Bernoulli('Y', Rational(1, 2))
assert median(Y) == FiniteSet(0, 1)
Z = Bernoulli('Z', Rational(2, 3))
assert median(Z) == FiniteSet(1)
raises(ValueError, lambda: Bernoulli('B', 1.5))
raises(ValueError, lambda: Bernoulli('B', -0.5))
#issue 8248
assert X.pspace.compute_expectation(1) == 1
def test_cdf():
D = Die('D', 6)
o = S.One
assert cdf(
D) == sympify({1: o/6, 2: o/3, 3: o/2, 4: 2*o/3, 5: 5*o/6, 6: o})
def test_coins():
C, D = Coin('C'), Coin('D')
H, T = symbols('H, T')
assert P(Eq(C, D)) == S.Half
assert density(Tuple(C, D)) == {(H, H): Rational(1, 4), (H, T): Rational(1, 4),
(T, H): Rational(1, 4), (T, T): Rational(1, 4)}
assert dict(density(C).items()) == {H: S.Half, T: S.Half}
F = Coin('F', Rational(1, 10))
assert P(Eq(F, H)) == Rational(1, 10)
d = pspace(C).domain
assert d.as_boolean() == Or(Eq(C.symbol, H), Eq(C.symbol, T))
raises(ValueError, lambda: P(C > D)) # Can't intelligently compare H to T
def test_binomial_verify_parameters():
raises(ValueError, lambda: Binomial('b', .2, .5))
raises(ValueError, lambda: Binomial('b', 3, 1.5))
def test_binomial_numeric():
nvals = range(5)
pvals = [0, Rational(1, 4), S.Half, Rational(3, 4), 1]
for n in nvals:
for p in pvals:
X = Binomial('X', n, p)
assert E(X) == n*p
assert variance(X) == n*p*(1 - p)
if n > 0 and 0 < p < 1:
assert skewness(X) == (1 - 2*p)/sqrt(n*p*(1 - p))
assert kurtosis(X) == 3 + (1 - 6*p*(1 - p))/(n*p*(1 - p))
for k in range(n + 1):
assert P(Eq(X, k)) == binomial(n, k)*p**k*(1 - p)**(n - k)
def test_binomial_quantile():
X = Binomial('X', 50, S.Half)
assert quantile(X)(0.95) == S(31)
assert median(X) == FiniteSet(25)
X = Binomial('X', 5, S.Half)
p = Symbol("p", positive=True)
assert quantile(X)(p) == Piecewise((nan, p > S.One), (S.Zero, p <= Rational(1, 32)),\
(S.One, p <= Rational(3, 16)), (S(2), p <= S.Half), (S(3), p <= Rational(13, 16)),\
(S(4), p <= Rational(31, 32)), (S(5), p <= S.One))
assert median(X) == FiniteSet(2, 3)
def test_binomial_symbolic():
n = 2
p = symbols('p', positive=True)
X = Binomial('X', n, p)
t = Symbol('t')
assert simplify(E(X)) == n*p == simplify(moment(X, 1))
assert simplify(variance(X)) == n*p*(1 - p) == simplify(cmoment(X, 2))
assert cancel((skewness(X) - (1 - 2*p)/sqrt(n*p*(1 - p)))) == 0
assert cancel((kurtosis(X)) - (3 + (1 - 6*p*(1 - p))/(n*p*(1 - p)))) == 0
assert characteristic_function(X)(t) == p ** 2 * exp(2 * I * t) + 2 * p * (-p + 1) * exp(I * t) + (-p + 1) ** 2
assert moment_generating_function(X)(t) == p ** 2 * exp(2 * t) + 2 * p * (-p + 1) * exp(t) + (-p + 1) ** 2
# Test ability to change success/failure winnings
H, T = symbols('H T')
Y = Binomial('Y', n, p, succ=H, fail=T)
assert simplify(E(Y) - (n*(H*p + T*(1 - p)))) == 0
# test symbolic dimensions
n = symbols('n')
B = Binomial('B', n, p)
raises(NotImplementedError, lambda: P(B > 2))
assert density(B).dict == Density(BinomialDistribution(n, p, 1, 0))
assert set(density(B).dict.subs(n, 4).doit().keys()) == \
set([S.Zero, S.One, S(2), S(3), S(4)])
assert set(density(B).dict.subs(n, 4).doit().values()) == \
set([(1 - p)**4, 4*p*(1 - p)**3, 6*p**2*(1 - p)**2, 4*p**3*(1 - p), p**4])
k = Dummy('k', integer=True)
assert E(B > 2).dummy_eq(
Sum(Piecewise((k*p**k*(1 - p)**(-k + n)*binomial(n, k), (k >= 0)
& (k <= n) & (k > 2)), (0, True)), (k, 0, n)))
def test_beta_binomial():
# verify parameters
raises(ValueError, lambda: BetaBinomial('b', .2, 1, 2))
raises(ValueError, lambda: BetaBinomial('b', 2, -1, 2))
raises(ValueError, lambda: BetaBinomial('b', 2, 1, -2))
assert BetaBinomial('b', 2, 1, 1)
# test numeric values
nvals = range(1,5)
alphavals = [Rational(1, 4), S.Half, Rational(3, 4), 1, 10]
betavals = [Rational(1, 4), S.Half, Rational(3, 4), 1, 10]
for n in nvals:
for a in alphavals:
for b in betavals:
X = BetaBinomial('X', n, a, b)
assert E(X) == moment(X, 1)
assert variance(X) == cmoment(X, 2)
# test symbolic
n, a, b = symbols('a b n')
assert BetaBinomial('x', n, a, b)
n = 2 # Because we're using for loops, can't do symbolic n
a, b = symbols('a b', positive=True)
X = BetaBinomial('X', n, a, b)
t = Symbol('t')
assert E(X).expand() == moment(X, 1).expand()
assert variance(X).expand() == cmoment(X, 2).expand()
assert skewness(X) == smoment(X, 3)
assert characteristic_function(X)(t) == exp(2*I*t)*beta(a + 2, b)/beta(a, b) +\
2*exp(I*t)*beta(a + 1, b + 1)/beta(a, b) + beta(a, b + 2)/beta(a, b)
assert moment_generating_function(X)(t) == exp(2*t)*beta(a + 2, b)/beta(a, b) +\
2*exp(t)*beta(a + 1, b + 1)/beta(a, b) + beta(a, b + 2)/beta(a, b)
def test_hypergeometric_numeric():
for N in range(1, 5):
for m in range(0, N + 1):
for n in range(1, N + 1):
X = Hypergeometric('X', N, m, n)
N, m, n = map(sympify, (N, m, n))
assert sum(density(X).values()) == 1
assert E(X) == n * m / N
if N > 1:
assert variance(X) == n*(m/N)*(N - m)/N*(N - n)/(N - 1)
# Only test for skewness when defined
if N > 2 and 0 < m < N and n < N:
assert skewness(X) == simplify((N - 2*m)*sqrt(N - 1)*(N - 2*n)
/ (sqrt(n*m*(N - m)*(N - n))*(N - 2)))
def test_hypergeometric_symbolic():
N, m, n = symbols('N, m, n')
H = Hypergeometric('H', N, m, n)
dens = density(H).dict
expec = E(H > 2)
assert dens == Density(HypergeometricDistribution(N, m, n))
assert dens.subs(N, 5).doit() == Density(HypergeometricDistribution(5, m, n))
assert set(dens.subs({N: 3, m: 2, n: 1}).doit().keys()) == set([S.Zero, S.One])
assert set(dens.subs({N: 3, m: 2, n: 1}).doit().values()) == set([Rational(1, 3), Rational(2, 3)])
k = Dummy('k', integer=True)
assert expec.dummy_eq(
Sum(Piecewise((k*binomial(m, k)*binomial(N - m, -k + n)
/binomial(N, n), k > 2), (0, True)), (k, 0, n)))
def test_rademacher():
X = Rademacher('X')
t = Symbol('t')
assert E(X) == 0
assert variance(X) == 1
assert density(X)[-1] == S.Half
assert density(X)[1] == S.Half
assert characteristic_function(X)(t) == exp(I*t)/2 + exp(-I*t)/2
assert moment_generating_function(X)(t) == exp(t) / 2 + exp(-t) / 2
def test_FiniteRV():
F = FiniteRV('F', {1: S.Half, 2: Rational(1, 4), 3: Rational(1, 4)})
p = Symbol("p", positive=True)
assert dict(density(F).items()) == {S.One: S.Half, S(2): Rational(1, 4), S(3): Rational(1, 4)}
assert P(F >= 2) == S.Half
assert quantile(F)(p) == Piecewise((nan, p > S.One), (S.One, p <= S.Half),\
(S(2), p <= Rational(3, 4)),(S(3), True))
assert pspace(F).domain.as_boolean() == Or(
*[Eq(F.symbol, i) for i in [1, 2, 3]])
raises(ValueError, lambda: FiniteRV('F', {1: S.Half, 2: S.Half, 3: S.Half}))
raises(ValueError, lambda: FiniteRV('F', {1: S.Half, 2: Rational(-1, 2), 3: S.One}))
raises(ValueError, lambda: FiniteRV('F', {1: S.One, 2: Rational(3, 2), 3: S.Zero,\
4: Rational(-1, 2), 5: Rational(-3, 4), 6: Rational(-1, 4)}))
def test_density_call():
from sympy.abc import p
x = Bernoulli('x', p)
d = density(x)
assert d(0) == 1 - p
assert d(S.Zero) == 1 - p
assert d(5) == 0
assert 0 in d
assert 5 not in d
assert d(S.Zero) == d[S.Zero]
def test_DieDistribution():
from sympy.abc import x
X = DieDistribution(6)
assert X.pmf(S.Half) is S.Zero
assert X.pmf(x).subs({x: 1}).doit() == Rational(1, 6)
assert X.pmf(x).subs({x: 7}).doit() == 0
assert X.pmf(x).subs({x: -1}).doit() == 0
assert X.pmf(x).subs({x: Rational(1, 3)}).doit() == 0
raises(ValueError, lambda: X.pmf(Matrix([0, 0])))
raises(ValueError, lambda: X.pmf(x**2 - 1))
def test_FinitePSpace():
X = Die('X', 6)
space = pspace(X)
assert space.density == DieDistribution(6)
def test_symbolic_conditions():
B = Bernoulli('B', Rational(1, 4))
D = Die('D', 4)
b, n = symbols('b, n')
Y = P(Eq(B, b))
Z = E(D > n)
assert Y == \
Piecewise((Rational(1, 4), Eq(b, 1)), (0, True)) + \
Piecewise((Rational(3, 4), Eq(b, 0)), (0, True))
assert Z == \
Piecewise((Rational(1, 4), n < 1), (0, True)) + Piecewise((S.Half, n < 2), (0, True)) + \
Piecewise((Rational(3, 4), n < 3), (0, True)) + Piecewise((S.One, n < 4), (0, True))
def test_sampling_methods():
distribs_random = [DiscreteUniform("D", list(range(5)))]
distribs_scipy = [Hypergeometric("H", 1, 1, 1)]
distribs_pymc3 = [BetaBinomial("B", 1, 1, 1)]
size = 5
for X in distribs_random:
sam = X.pspace.distribution._sample_random(size)
for i in range(size):
assert sam[i] in X.pspace.domain.set
scipy = import_module('scipy')
if not scipy:
skip('Scipy not installed. Abort tests for _sample_scipy.')
else:
for X in distribs_scipy:
sam = X.pspace.distribution._sample_scipy(size)
for i in range(size):
assert sam[i] in X.pspace.domain.set
pymc3 = import_module('pymc3')
if not pymc3:
skip('PyMC3 not installed. Abort tests for _sample_pymc3.')
else:
for X in distribs_pymc3:
sam = X.pspace.distribution._sample_pymc3(size)
for i in range(size):
assert sam[i] in X.pspace.domain.set
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.